diff --git a/.vscode/settings.json b/.vscode/settings.json index 1955a10..8ae7e59 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,5 +6,6 @@ ], "settings": { "asciidoc.antora.enableAntoraSupport": true - } + }, + "asciidoc.antora.enableAntoraSupport": true } \ No newline at end of file diff --git a/content/modules/ROOT/assets/images/00-compliance-1.png b/content/modules/ROOT/assets/images/00-compliance-1.png index 284a30e..58c50cf 100644 Binary files a/content/modules/ROOT/assets/images/00-compliance-1.png and b/content/modules/ROOT/assets/images/00-compliance-1.png differ diff --git a/content/modules/ROOT/assets/images/00-compliance-3.png b/content/modules/ROOT/assets/images/00-compliance-3.png index 6180740..2433bba 100644 Binary files a/content/modules/ROOT/assets/images/00-compliance-3.png and b/content/modules/ROOT/assets/images/00-compliance-3.png differ diff --git a/content/modules/ROOT/assets/images/00-config-1.png b/content/modules/ROOT/assets/images/00-config-1.png index e95ca0f..d95f1d3 100644 Binary files a/content/modules/ROOT/assets/images/00-config-1.png and b/content/modules/ROOT/assets/images/00-config-1.png differ diff --git a/content/modules/ROOT/assets/images/00-network-1.png b/content/modules/ROOT/assets/images/00-network-1.png index c3b4c91..417d81e 100644 Binary files a/content/modules/ROOT/assets/images/00-network-1.png and b/content/modules/ROOT/assets/images/00-network-1.png differ diff --git a/content/modules/ROOT/assets/images/00-network-2.png b/content/modules/ROOT/assets/images/00-network-2.png index 6220173..f7756eb 100644 Binary files a/content/modules/ROOT/assets/images/00-network-2.png and b/content/modules/ROOT/assets/images/00-network-2.png differ diff --git a/content/modules/ROOT/assets/images/00-network-3.png b/content/modules/ROOT/assets/images/00-network-3.png index 424481c..45b3fa5 100644 Binary files a/content/modules/ROOT/assets/images/00-network-3.png and b/content/modules/ROOT/assets/images/00-network-3.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-1.png b/content/modules/ROOT/assets/images/00-pc-1.png index 228f9ea..cbfc9c8 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-1.png and b/content/modules/ROOT/assets/images/00-pc-1.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-2.png b/content/modules/ROOT/assets/images/00-pc-2.png index 0c828ae..035fe2b 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-2.png and b/content/modules/ROOT/assets/images/00-pc-2.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-3.png b/content/modules/ROOT/assets/images/00-pc-3.png index d988075..7c65772 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-3.png and b/content/modules/ROOT/assets/images/00-pc-3.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-4.png b/content/modules/ROOT/assets/images/00-pc-4.png index 14d17dc..2e45f37 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-4.png and b/content/modules/ROOT/assets/images/00-pc-4.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-5.png b/content/modules/ROOT/assets/images/00-pc-5.png index 9f2996b..c18f3f1 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-5.png and b/content/modules/ROOT/assets/images/00-pc-5.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-6.png b/content/modules/ROOT/assets/images/00-pc-6.png index 2fb0018..e01ee7b 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-6.png and b/content/modules/ROOT/assets/images/00-pc-6.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-7.png b/content/modules/ROOT/assets/images/00-pc-7.png index c51e442..463322b 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-7.png and b/content/modules/ROOT/assets/images/00-pc-7.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-8.png b/content/modules/ROOT/assets/images/00-pc-8.png index 2c0ecbd..0119579 100644 Binary files a/content/modules/ROOT/assets/images/00-pc-8.png and b/content/modules/ROOT/assets/images/00-pc-8.png differ diff --git a/content/modules/ROOT/assets/images/00-pc-9.png b/content/modules/ROOT/assets/images/00-pc-9.png new file mode 100644 index 0000000..d7aa0d2 Binary files /dev/null and b/content/modules/ROOT/assets/images/00-pc-9.png differ diff --git a/content/modules/ROOT/assets/images/00-risk-1.png b/content/modules/ROOT/assets/images/00-risk-1.png index 167de5a..25f52b2 100644 Binary files a/content/modules/ROOT/assets/images/00-risk-1.png and b/content/modules/ROOT/assets/images/00-risk-1.png differ diff --git a/content/modules/ROOT/assets/images/00-risk-2.png b/content/modules/ROOT/assets/images/00-risk-2.png index 7c3bf08..9b4927c 100644 Binary files a/content/modules/ROOT/assets/images/00-risk-2.png and b/content/modules/ROOT/assets/images/00-risk-2.png differ diff --git a/content/modules/ROOT/assets/images/00-vuln-1.png b/content/modules/ROOT/assets/images/00-vuln-1.png index 781ee58..75bea8f 100644 Binary files a/content/modules/ROOT/assets/images/00-vuln-1.png and b/content/modules/ROOT/assets/images/00-vuln-1.png differ diff --git a/content/modules/ROOT/assets/images/01-compliance-4.png b/content/modules/ROOT/assets/images/01-compliance-4.png new file mode 100644 index 0000000..e6d50e5 Binary files /dev/null and b/content/modules/ROOT/assets/images/01-compliance-4.png differ diff --git a/content/modules/ROOT/assets/images/01-compliance-5.png b/content/modules/ROOT/assets/images/01-compliance-5.png new file mode 100644 index 0000000..8b0d91c Binary files /dev/null and b/content/modules/ROOT/assets/images/01-compliance-5.png differ diff --git a/content/modules/ROOT/assets/images/01-compliance-6.png b/content/modules/ROOT/assets/images/01-compliance-6.png new file mode 100644 index 0000000..390bd9e Binary files /dev/null and b/content/modules/ROOT/assets/images/01-compliance-6.png differ diff --git a/content/modules/ROOT/assets/images/01-violations-1.png b/content/modules/ROOT/assets/images/01-violations-1.png index 9f67319..d188d81 100644 Binary files a/content/modules/ROOT/assets/images/01-violations-1.png and b/content/modules/ROOT/assets/images/01-violations-1.png differ diff --git a/content/modules/ROOT/assets/images/acs-vuln-dashboard-00.png b/content/modules/ROOT/assets/images/acs-vuln-dashboard-00.png index 14e82a4..f80473a 100644 Binary files a/content/modules/ROOT/assets/images/acs-vuln-dashboard-00.png and b/content/modules/ROOT/assets/images/acs-vuln-dashboard-00.png differ diff --git a/content/modules/ROOT/assets/images/acs-vuln-dashboard-01.png b/content/modules/ROOT/assets/images/acs-vuln-dashboard-01.png index 93fc265..0b4c11c 100644 Binary files a/content/modules/ROOT/assets/images/acs-vuln-dashboard-01.png and b/content/modules/ROOT/assets/images/acs-vuln-dashboard-01.png differ diff --git a/content/modules/ROOT/assets/images/acs-vuln-dashboard-02.png b/content/modules/ROOT/assets/images/acs-vuln-dashboard-02.png index 8238669..cdf25d2 100644 Binary files a/content/modules/ROOT/assets/images/acs-vuln-dashboard-02.png and b/content/modules/ROOT/assets/images/acs-vuln-dashboard-02.png differ diff --git a/content/modules/ROOT/assets/images/quay-scan-hover.png b/content/modules/ROOT/assets/images/quay-scan-hover.png index 5edd25b..659c9b2 100644 Binary files a/content/modules/ROOT/assets/images/quay-scan-hover.png and b/content/modules/ROOT/assets/images/quay-scan-hover.png differ diff --git a/content/modules/ROOT/assets/images/quay-tags.png b/content/modules/ROOT/assets/images/quay-tags.png index 3f80ac9..86ecea2 100644 Binary files a/content/modules/ROOT/assets/images/quay-tags.png and b/content/modules/ROOT/assets/images/quay-tags.png differ diff --git a/content/modules/ROOT/nav.adoc b/content/modules/ROOT/nav.adoc index 9ac0c83..9214ccb 100644 --- a/content/modules/ROOT/nav.adoc +++ b/content/modules/ROOT/nav.adoc @@ -2,7 +2,7 @@ * xref:01-visibility-and-navigation.adoc[1. Visibility & Navigation] * xref:02-vulnerability-management-lab.adoc[2. Vulnerability Management] * xref:03-risk-profiling.adoc[3. Risk profiling] -* xref:04-policy-management.adoc[4.Policy Management] +* xref:04-policy-management.adoc[4. Policy Management] * xref:05-cicd-and-automation.adoc[5. CI/CD Automation and Integration] * xref:06-compliance.adoc[6. Compliance] * xref:07-notifications.adoc[7. Notifications and Alerting] @@ -11,7 +11,6 @@ * xref:10-installation.adoc[10. Installation] * xref:misc-log-4-shell-lab.adoc[Black Hat - log4shell Example] -// * xref:misc-reverse-shell.adoc[Black Hat - reverse shell runtime Example] * xref:misc-hacking-linux.adoc[Black Hat - CTF - hack a web application] * xref:partner-paladin.adoc[Partner - Paladin Cloud & RHACS Integration] diff --git a/content/modules/ROOT/pages/00-setup-install-navigation.adoc b/content/modules/ROOT/pages/00-setup-install-navigation.adoc index ff6414e..cf4e03a 100644 --- a/content/modules/ROOT/pages/00-setup-install-navigation.adoc +++ b/content/modules/ROOT/pages/00-setup-install-navigation.adoc @@ -76,17 +76,13 @@ image::01-rhacs-login.png[RHACS console] image::01-rhacs-console-dashboard.png[RHACS console] -==== -Congrats! Half way there. -==== - === OpenShift admin access verification OpenShift admin access verification involves ensuring that users have the appropriate permissions and roles assigned to them for managing the OpenShift cluster. This can be done by checking the user roles and bindings within the cluster. You'll be verifying your permissions using the oc command-line tool. There are *TWO clusters* we need to verify access too. -==== Verify access to the EKS cluster +*Verify access to the EKS cluster* [source,sh,subs="attributes",role=execute] ---- @@ -121,7 +117,7 @@ ip-.us-east-2.compute.internal Ready 163m v1.28.8-ek IMPORTANT: We should not have access with the *oc* command as it is an OpenShift command but you can see the EKS nodes and their information. -==== Verify access to the OpenShift cluster +*Verify access to the OpenShift cluster* Next, let's switch to the OpenShift cluster running and do our work (for now) in the OpenShift cluster @@ -168,7 +164,7 @@ NAME STATUS ROLES AGE You will now see the OCP role using the *oc* command, as we are currently working on the OpenShift cluster -IMPORTANT: We will be working with the OpenShift cluster in all modules unless otherwise specified. +NOTE: We will be working with the OpenShift cluster in all modules unless otherwise specified. === roxctl CLI verification @@ -224,7 +220,7 @@ Access: rw WorkflowAdministration ---- -NOTE: This output is showing that you have unrestricted access to the RHACS product. these permissions can be seen in the RHACS Access Control tab that we will review later. +NOTE: This output is showing that you have unrestricted access to the RHACS product. these permissions can be seen in the **RHACS Access Control** tab that we will review later. image::01-rhacs-access-control.png[RHACS access control] @@ -238,7 +234,7 @@ You now have access to the core apps. Next, you'll deploy insecure apps into the === Build a container image -In this section, we will download the "Java app," give it a new tag, and push the image to Quay. Later, we'll deploy the image to the OpenShift Cluster and use it in future modules. +In this section, we will download the "*Java app*" give it a new tag, and push the image to Quay. Later, we'll deploy the image to the OpenShift Cluster and use it in future modules. Let's export a few variables to make things easier. These variables will stay in the .bashrc file so they're saved in case you need to refresh the terminal. @@ -247,6 +243,7 @@ TIP: With the variables saved in the ~/.bashrc file you will not have to declare [source,sh,subs="attributes",role=execute] ---- echo export QUAY_USER={quay_admin_username} >> ~/.bashrc +QUAY_USER={quay_admin_username} ---- [start=2] @@ -256,6 +253,7 @@ echo export QUAY_USER={quay_admin_username} >> ~/.bashrc [source,sh,subs="attributes",role=execute] ---- echo export QUAY_URL=$(oc -n quay-enterprise get route quay-quay -o jsonpath='{.spec.host}') >> ~/.bashrc +QUAY_URL=$(oc -n quay-enterprise get route quay-quay -o jsonpath='{.spec.host}') ---- IMPORTANT: Verify that the variables are correct @@ -436,12 +434,12 @@ In this tab you can add/remove users and update permissions, alter the privacy o image::quay-settings.png[link=self, window=blank, width=100%] [start=6] -. Make your repository public before deploying our application in the next step by clicking the *Make Public* button under `Repository Visability` - -IMPORTANT: Make sure to make the repository public. Otherwise we will not be able to deploy the application in the next step. +. Make your repository public before deploying our application in the next step by clicking the *Make Public* button under `Repository Visibility` image::quay-make-public.png[link=self, window=blank, width=100%] +IMPORTANT: Make sure to make the repository public. Otherwise we will not be able to deploy the application in the next step. + [start=7] . Click OK @@ -486,7 +484,7 @@ Congratulations, you now know how to examine images in your registry for potenti == Deploy the workshop applications -n the final part of this module, you'll deploy several insecure applications to the OpenShift cluster. You'll scan a few of these containers using the *roxctl* CLI to understand what you're deploying and what to expect when you dive into RHACS. +In the final part of this module, you'll deploy several insecure applications to the OpenShift cluster. You'll scan a few of these containers using the *roxctl* CLI to understand what you're deploying and what to expect when you dive into RHACS. IMPORTANT: Make sure the variables are set before running the following commands. If not, go back to the Quay section to redo the previous commands. @@ -598,16 +596,41 @@ image::https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExNnJoNHE2MXhocm52ZzFmeHVy Run roxctl against a few of your favorite container images. Try pulling from link:https://hub.docker.com/[docker hub] or link:https://quay.io/[quay.io]. Try modifying the command below to include your image of choice. +For example: + +[.console-output] +[source,bash,subs="+macros,+attributes"] +---- +[lab-user@bastion ~]$ MYIMAGE=docker.io/ubuntu +[lab-user@bastion ~]$ roxctl --insecure-skip-tls-verify -e "$ROX_CENTRAL_ADDRESS:443" image scan --image=$MYIMAGE --force -o table --severity=CRITICAL +---- + +[.console-output] +[source,bash,subs="+macros,+attributes"] +---- +Scan results for image: docker.io/ubuntu +(TOTAL-COMPONENTS: 0, TOTAL-VULNERABILITIES: 0, LOW: 0, MODERATE: 0, IMPORTANT: 0, CRITICAL: 0) + ++-----------+---------+-----+----------+------+---------------+ +| COMPONENT | VERSION | CVE | SEVERITY | LINK | FIXED VERSION | ++-----------+---------+-----+----------+------+---------------+ ++-----------+---------+-----+----------+------+---------------+ +---- +Showing that the latest version of Ubuntu from Docker.io has 0 critical vulnerabilities. + +*Your turn* + [source,sh,subs="attributes",role=execute] ---- -roxctl --insecure-skip-tls-verify -e "$ROX_CENTRAL_ADDRESS:443" image scan --image=$QUAY_URL/$QUAY_USER/ctf-web-to-system:1.0 --force -o table --severity=CRITICAL +MYIMAGE= +roxctl --insecure-skip-tls-verify -e "$ROX_CENTRAL_ADDRESS:443" image scan $MYIMAGE --force -o table --severity=CRITICAL ---- == Summary image::https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExbnY0NDA0ZnJqNXh6cGNqeHNxZGd5Zm5qMnlpOHhrbm1hY2pwcG5ydSZlcD12MV9pbnRlcm5hbF9naWZfYnlfaWQmY3Q9Zw/p18ohAgD3H60LSoI1C/giphy.gif[link=self, window=blank, width=100%, class="center"] -Beautiful! +*Beautiful!* In this module, you got access to all of the lab UI's and interfaces including the Showroom lab enviroment (Where you are reading this sentence). You downloaded and deployed some very insecure applications and setup the lab full of examples to dive into. diff --git a/content/modules/ROOT/pages/01-visibility-and-navigation.adoc b/content/modules/ROOT/pages/01-visibility-and-navigation.adoc index 3b76caf..0b49900 100644 --- a/content/modules/ROOT/pages/01-visibility-and-navigation.adoc +++ b/content/modules/ROOT/pages/01-visibility-and-navigation.adoc @@ -133,15 +133,15 @@ As you can see, the content is only relevant to the violations tab, but the sear === 2. Navigation menu -image::01-acs-nav-01.png[link=self, window=blank, width=100%, Navigation Menu] +image::acs-nav-01.png[link=self, window=blank, width=100%, Navigation Menu] The left-hand navigation menu provides access to each of the security use cases, as well as product configuration to integrate RHACS with your existing tooling. The navigation menu has the following items: - *Dashboard:* Summary view of your environment -- *Network Graph:* Configured and actual network flows and the creation of Network Policies to implement network segmentation -- *Violations:* Events that do not match the defined security policies -- *Compliance:* Our new compliance dashboard (Next gen compliance) update is in progress. With plans to make compliance reporting even easier with tailored compliance profiles and reporting by application, namespace and cluster. -- *Vulnerability Management:* Over the past year, we've revamped our Vulnerability Management process, focusing on filtering important and critical issues. We've also introduced node, platform, and workload-specific vulnerability dashboards. This segmentation helps the operations team quickly identify where a vulnerability exists and determine which team to contact, making the process much more efficient. +- *Network Graph:* Real time network flows and public ports that are available. Enables to automated creation of Network Policies to implement network segmentation +- *Violations:* Events that are in violation of the default and your defined security policies +- *Compliance:* Our new compliance dashboard (Self titled *NextGen Compliance*) update is in progress. With plans to make compliance reporting even easier with tailored compliance profiles and reporting by application, namespace and cluster. +- *Vulnerability Management:* Over the past year, we've revamped our Vulnerability Management workflows, focusing on filtering important and critical issues. We've also introduced node, platform, and workload-specific vulnerability dashboards. This segmentation helps the operations team quickly identify where a vulnerability exists and determine which team to contact, making the process much more efficient. - *Configuration Management:* The configuration management tab enables you to identify potential misconfigurations that can lead to security issues - *Risk:* The Risk tab points out major risky applications by using configuration, runtime, and vulnerability data, helping you focus on the "high-risk" workloads. - *Platform Configuration:* RHACS configuration, policy management and integration details, including; @@ -149,6 +149,7 @@ The left-hand navigation menu provides access to each of the security use cases, * Policy Management * Collections * Integrations +* Exception Configuration * Access Control * System Configuration * Administration Events @@ -215,7 +216,7 @@ The network graph tab allows you to visualize all the network connections in you image::00-network-2.png[link=self, window=blank, width=100%, Dashboard Filter] -The listening endpoints tab allows you to see all of the deployments across all of your clusters and audit for any reported listening endpoints as you drill down through cluster namespace and into deployments, you will see the exact process ID Port protocol pod ID and container name and if they are exposed. +With the *Listening Endpoints tab*, you can see all of the deployments across your clusters and audit for any reported listening endpoints. As you drill down through cluster namespace and into deployments, you will see the exact process ID, Port protocol pod ID, container name and whether they are exposed. image::00-network-3.png[link=self, window=blank, width=100%, Dashboard Filter] @@ -240,57 +241,72 @@ image::01-violations-1.png[link=self, window=blank, width=100%] Don't worry, you'll go through this policy violation workflow in later modules. -=== Compliance (Next Gen Dashboard) +=== Compliance (Next-Gen Dashboard) Red Hat Advanced Cluster Security for Kubernetes supports OpenShift Container Platform configuration compliance standards through an integration with the OpenShift Container Platform Compliance Operator. In addition, it allows you to measure and report on configuration security best practices for OpenShift and supported Kubernetes platforms. The OpenShift Compliance Operator allows OpenShift Container Platform administrators to define the desired compliance state of a cluster and provides an overview of gaps and ways to remediate any non-compliant policy. We will be installing and managing the compliance operator in later modules -image::00-compliance-1.png[link=self, window=blank, width=100%] +image::01-compliance-1.png[link=self, window=blank, width=100%] -The Compliance 2.0 tab is in tech preview this was just released in ACS 4.4, and we are currently in the process of migrating The existing 1.0 dashboard into 2.0 +The Compliance dropdown has three tabs to choose from. -=== Compliance 1.0 +- Coverage +- Schedules +- Dashboard -The Compliance 1.0 dashboard should be empty when you're in here for the first time. It's because you have not completed a scan. +---- +Coverage +---- -.Procedure +The *Coverage* tab will contain all of your scanned clusters and workloads after the *Compliance Operator* is setup. You will go through this in the Compliance section later in the roadshow. -. We will go into this in a later module, but for now, hit the *Scan environment* button in the top right of the page to kick off your first scan. +image::01-compliance-2.png[link=self, window=blank, width=100%] -image::00-compliance-2.png[link=self, window=blank, width=100%] +---- +Schedules +---- -image::00-compliance-3.png[link=self, window=blank, width=100%] +The schedules tab contains all of the created scan schedules that report on the various CIS, PCI-DSS and DISA-STIG compliance standards you wish to evaluate. -[start=2] +image::01-compliance-3.png[link=self, window=blank, width=100%] -. Ensure you see the bar graphs fill up with data before moving. We we will dissthesethis compliance results in a later module +---- +Compliance Dashboard (Previously Compliance 1.0) +---- -image::00-compliance-4.png[link=self, window=blank, width=100%] +The Compliance dashboard should be empty when you're in here for the first time. It's because you have not completed a scan. -=== Vulnerability Management 2.0 +.Procedure -Next, we have the vulnerability management 2.0 tab, similar to the compliance 2.0 tab vulnerability management is currently getting an overhaul. +. We will go into this in a later module, but for now, hit the *Scan environment* button in the top right of the page to kick off your first scan. -image::00-vuln-1.png[link=self, window=blank, width=100%] +image::01-compliance-4.png[link=self, window=blank, width=100%] -The vulnerability management 2.0 tab contains a workload cve Tab and a vulnerability reporting tab with the workload CV tab currently in Tech preview +image::01-compliance-5.png[link=self, window=blank, width=100%] -image::00-vuln-2.png[link=self, window=blank, width=100%] +[start=2] + +. Ensure you see the bar graphs fill up with data before moving. We we will review these compliance results in a later module. -Feel free to click around however we will be exploring the vulnerability management section in the upcoming module +image::01-compliance-6.png[link=self, window=blank, width=100%] -=== Vulnerability Management 1.0 +=== Vulnerability Management -The Vulnerability Management 1.0 tab has the original vulnerability management dashboard and the risk acceptance workflow. +Next, we have the *Vulnerability Management* tab, which has been overhauled for greater visibility and efficient filtering. -image::00-vuln-3.png[link=self, window=blank, width=100%] +image::00-vuln-1.png[link=self, window=blank, width=100%] -The underlying vulnerability data is the same as how we display, categorize and show it to the user. It is essential to to manage vulnerabilities at scale and make them actionable, which is a high priority in ACS. This is why we're prioritizing the vulnerability management workflow and eventually moving all features and functionality into a single tab +The Vulnerability Management tab contains *a lot* of information including -image::00-vuln-4.png[link=self, window=blank, width=100%] +- Workload CVEs +- Exception Management +- Vulnerability Reporting +- Platform CVEs +- Node CVEs +- Dashboard (Deprecated) -Again, feel free to click around however we will be exploring the vulnerability management section in the upcoming module. +Feel free to click around, and get your questions ready as the upcoming module will be exploring the Vulnerability Management tab in detail. === Configuration Management @@ -298,7 +314,6 @@ The Configuration Management tab contains a bunch of information about the secur image::00-config-1.png[link=self, window=blank, width=100%] - This information includes: - Policy violations by severity @@ -308,7 +323,7 @@ This information includes: === Risk -Another risk tab is a combination of security configuration management Network detection run time and incident response and vulnerability management all coming together so that users can gain a greater context and prioritize security issues throughout OpenShift and Kubernetes clusters +The *Risk tab* is a combination of security configuration management, Network detection, runtime information, incident response, and vulnerability management all coming together so that users can gain a greater context and prioritize security issues throughout OpenShift and Kubernetes clusters. image::00-risk-1.png[link=self, window=blank, width=100%] @@ -349,27 +364,33 @@ The integration tab holds all of your options for: - Cloud Source Integrations - and authentication tokens -==== Access Control +==== Exception Configuration + +Configure exception behavior for vulnerabilities image::00-pc-5.png[link=self, window=blank, width=100%] +==== Access Control + +image::00-pc-6.png[link=self, window=blank, width=100%] + The access control tab is where you set up your authentication providers and the roles that you would like to have an ACS, along with permission sets and access scopes. ==== System Configuration -image::00-pc-6.png[link=self, window=blank, width=100%] +image::00-pc-7.png[link=self, window=blank, width=100%] The system configuration tab manages things like private data retention configuration cluster deletion public configuration and all of these are settings are editable ==== Administration Events -image::00-pc-7.png[link=self, window=blank, width=100%] +image::00-pc-8.png[link=self, window=blank, width=100%] The administration events tab is handy for troubleshooting platform issues by reviewing event logs now, these logs are approached after four days by default, but you can change that in the system configuration tab. We will have a whole section on the administration events later, but it is beneficial for diagnosing issues and looking into domains such as authentication image scanning Integrations and more ==== System Health -image::00-pc-8.png[link=self, window=blank, width=100%] +image::00-pc-9.png[link=self, window=blank, width=100%] And lastly, the system Health Tap will help you handle things like Administration usage, generate diagnostic bundles that you monitor cluster status sensor upgrades, credential expiration, and more. @@ -379,4 +400,4 @@ image::https://media.giphy.com/media/v1.Y2lkPTc5MGI3NjExcTYwZWZlazBoanhlcXV3Njkx Nice job! -In this module, you learned how to navigate the ACS dashboard and perform basic search queries. You have the navigational basics to move through the ACI UI proficiently. On to *Vulnerability Management*!! \ No newline at end of file +In this module, you learned how to navigate the ACS dashboard and perform basic search queries. You have the navigational basics to move through the ACS UI proficiently. On to *Vulnerability Management*!! \ No newline at end of file diff --git a/content/modules/ROOT/pages/02-vulnerability-management-lab.adoc b/content/modules/ROOT/pages/02-vulnerability-management-lab.adoc index e48202c..966c246 100644 --- a/content/modules/ROOT/pages/02-vulnerability-management-lab.adoc +++ b/content/modules/ROOT/pages/02-vulnerability-management-lab.adoc @@ -6,6 +6,8 @@ * Set and manage risk acceptance workflows * Create a simple report to email to stakeholders +IMPORTANT: The locations and size of your panels may vary depending on your screen size and zoom. + == Introduction to vulnerability management in RHACS You start with the topic of vulnerability management because it is familiar to most security teams, even those without prior experience with containers or Kubernetes. The vulnerability management process helps protect the software supply chain and prevent known vulnerabilities from being used as an entry point into your applications. @@ -75,15 +77,14 @@ Key components of RHSA: Find out more at the link: https://access.redhat.com/articles/explaining_redhat_errata[Red Hat website^] -=== The Vulnerability Management (1.0) dashboard - -Let us continue by looking at our primary use case for RHACS, which is the Vulnerability Management features and dashboard, a familiar topic for most security teams. +=== The Vulnerability Management dashboard -IMPORTANT: The locations and size of your panels may vary depending on your screen size and zoom. +Let's continue by looking at our customers primary use case for RHACS, which is the Vulnerability Management features and dashboard, a familiar topic for most security teams. We are going to *start with the deprecated dashboard* so that you can evaluate the vulnerability management workflows for yourself. NOTE: For the following section, please note that the order in which the images appear or the number of components affected may vary depending on versions and other applications running in the cluster. -. Click the *Vulnerability Management (1.0)* tab, and then select *Dashboard* +.Procedure +. Click the *Vulnerability Management* tab, and then select *Dashboard (Deprecate)* Buttons along the top of the interface will list details by; @@ -109,8 +110,6 @@ The dashboard options provide several critical vulnerability breakdowns, such as - Recently detected image vulnerabilities - Most common image vulnerabilities -More important than fixing any vulnerability is establishing a process to keep container images updated and to prevent the promotion through the pipeline for images with serious, fixable vulnerabilities. RHACS displays this through the *Top Risky Deployments by CVE and CVSS Score* and takes the container's configuration and vulnerability details to show you the most *at risk* deployments in your cluster. - image::acs-vuln-dashboard-01.png[link=self, window=blank, width=100%, Riskiest Deployments] [start=2] @@ -167,37 +166,14 @@ You can move on to the next section only when the dashboard displays the image b image::acs-risk-07.png[link=self, window=blank, width=100%, Image Info] -=== RHACS Vulnerability Scanner - -RHACS' Scanner v4 is a built-in vulnerability scanner that breaks down images into layers and components - where components can be operating-system installed packages or dependencies installed by programming languages like Python, Javascript, Go, Java and more. The *Image Summary tab* provides the essential security details of the image overall, with links to the components. Below, you can see why the image is ranked as a critically vulnerable application: - -- In the *Details and metadata* → Image Summary panel, the information you see there tells you that this image has a severe security problem - the base image was imported several years ago (Debian 9). -- At the top of the page is the warning that CVE data is stale - that this image has a base OS version whose distribution has stopped providing security information and likely stopped publishing security fixes. ACS will still scan for language issues even if the Operating System does not have CVE data available. - - -.Procedure -. Scroll down the page to the *Image Findings* section. - -Here you find the details of the image vulnerabilities. There are 82 vulnerabilities detected with 80 of those vulnerabilities listed as fixable (at the time of the creation of this workshop.) - -image::acs-vulns-00.png[link=self, window=blank, width=100%, Fixable Vulnerabilities] - -[start=2] - -. Above the *Image Findings* section, click on the *Dockerfile* tab: - -image::acs-vulns-01.png[link=self, window=blank, width=100%, Dockerfile View] - -The Dockerfile tab view shows the layer-by-layer view, and, as you can see, the most recent layers are also several years old. Time is not kind to images and components - as vulnerabilities are discovered, RHACS will display newly discovered CVEs. The layers that are listed as *Source=OS* are not showing CVE data since the CVE feeds are stale or do not have any information. However, the Python libraries that are added to the container are showing vulnerabilities. For example, the 'mercurial' package in the four layers. - -image::acs-vulns-02.png[link=self, window=blank, width=100%] - === The Vulnerability Management (2.0) dashboard The Vulnerability Management 2.0 dashboard is part of a more extensive overhaul of vulnerability management in RHACS. Vulnerability management 2.0 is focused on the categorizing vulnerabilities by workload so that we can scan RHEL CoreOS and node-level scanning and correlate it with platform and application vulnerabilities. This is because security teams want to understand at what software layer of vulnerability resides so they know what team it can reach out to to resolve a fix. Let's start off this section by reviewing a similar use case in the Vulnerability Management 2.0 dashboard. +More important than fixing any vulnerability is establishing a process to keep container images updated and to prevent the promotion through the pipeline for images with serious, fixable vulnerabilities. RHACS displays this through the *Top Risky Deployments by CVE and CVSS Score* and takes the container's configuration and vulnerability details to show you the most *at risk* deployments in your cluster. + image::02-vuln2-1.png[link=self, window=blank, width=100%] Another dashboard aims to show the same information as the vulnerability management 1.0 dashboard but in a more scalable and systematic approach, you can see on the top left that the vulnerabilities are categorized by CVE, Image and Deployment. diff --git a/content/modules/ROOT/pages/extra-blocking-privilege.adoc b/content/modules/ROOT/pages/extra-blocking-privilege.adoc index f892ab6..ecbd230 100644 --- a/content/modules/ROOT/pages/extra-blocking-privilege.adoc +++ b/content/modules/ROOT/pages/extra-blocking-privilege.adoc @@ -13,25 +13,21 @@ Almost all software you are running in your containers does not require 'root' l . Create a new project called *myproject* (or any other name that you prefer). -+ [source] ---- oc new-project myproject ---- . In the OpenShift console, navigate to *Home->Projects*, search for *myproject* and click on it. -+ image:images/lab1.1-myproject.png[] . Then, go to the *Workloads* tab and click on the *Container Image* tile. -+ image:images/lab1.1-4.7.33-workloads.png[] - NOTE: It may be easier to use *Developer* perspective of OpenShift console to navigate to a project and click "+Add" button on the left hand side menu to initiate that workflow. + NOTE: It may be easier to use *Developer* perspective of OpenShift console to navigate to a project and click Add" button on the left hand side menu to initiate that workflow. . Then, click on *Container Images* . Make sure that *Image name from external registry* is selected and for the Image Name, type *docker.io/httpd*. Press the magnifying glass. -+ image:images/lab1.1-4.7.33-image-external.png[] NOTE: Notice that this container image requires to be run as 'root' and listen on port 80. @@ -39,12 +35,10 @@ image:images/lab1.1-4.7.33-image-external.png[] . Leave other values as defaults and press *Create*. . Now, go back to your terminal and navigate into the project you just created by typing *oc project myproject*. Then, take a look at your pods by typing *oc get pods*. Notice that one of your pods has a CrashLoopBackOff error. -+ image:images/lab1.1-crashloopbackofferror.png[] . Let's investigate further what is causing this error. Take a look at the log of the pod that is causing this error. You can get the name of the pod from the previous *oc get pods* command. -+ [source] ---- POD=`oc get pods --selector app=httpd -o custom-columns=NAME:.metadata.name --no-headers`; oc logs $POD @@ -54,37 +48,31 @@ POD=`oc get pods --selector app=httpd -o custom-columns=NAME:.metadata.name --no . Notice that you get permission denied errors saying that you cannot bind to port 80. This is because the proccess was not startup as root and was modified by the security context constraint to run as a specific user. -+ image:images/lab1.1-noport80.png[1500,1500] . Also we can review failing container logs via OpenShift UI console, Log tab for that pod: -+ image:images/lab1.1-failingpod-log.png[] . For a more detailed look, type 'oc describe pod ....' with the name of your pod. -+ [source] ---- oc describe pod $pod # Or # oc describe pod ---- -+ image:images/lab1.1-describepod-error.png[] . Notice that the output shows that the container failed after trying to start on port 80 and terminated due to a CrashLoopBackOff error. Also notice the default OpenShift Security Context Constraints (SCC) policy that is in place is 'restricted' (openshift.io/scc: restricted). . Finally, investigate your pod yaml in the OpenShift console by navigating to the *YAML** view of your pod in the OpenShift console. Scroll down to the containers definition and notice how the SCC has dropped several capabilites and added a specifc runAsUser. These modifications have prevented your pod from scheduling because it was originally designed in an insecure state. -+ image:images/lab1.1-scc-modify.png[] === Lab 1.2 Work around the default container security restriction by using service accounts with SCC privileges . Now let's resolve this issue. In order to allow containers to run with elevated SCC privileges, we will create a Service Account (a special user account to run services) called 'privileged-sa': -+ [source] ---- [localhost ~]$ oc create sa privileged-sa @@ -92,7 +80,6 @@ serviceaccount/privileged-sa created ---- . Then, we will entitle that Service Account (which is not used by default by any pods) to run as any userId by running the folowing command to add an SCC context: -+ [source] ---- [localhost ~]$ oc adm policy add-scc-to-user anyuid -z privileged-sa @@ -100,14 +87,12 @@ clusterrole.rbac.authorization.k8s.io/system:openshift:scc:anyuid added: "privil ---- . Now we have a Service Account that can run pods/containers using any userId. But how can we "plug" it into out application to allow it to run with that privilege? There is a pretty straighforward OpenShift command for that as well that "injects" that non-default service account into our application deployment: -+ [source] ---- [localhost ~]$ oc set serviceaccount deployment httpd privileged-sa deployment.apps/httpd serviceaccount updated ---- . That will make our 'httpd' pod use this Service Account and enable elevated privileges. We can verify that our Deployment now is using that Service Account by running command: -+ [source] ---- [localhost ~]$ oc describe deployment httpd @@ -159,7 +144,6 @@ Events: ---- . We now see that Replica Set that controls pods instances has been regenerated and our HTTP server pod is running OK which we can also check in its logs: -+ [source] ---- [localhost ~]$oc logs httpd-765df85d48-pwtm5 diff --git a/content/modules/ROOT/pages/vulnmgmtextra.adoc b/content/modules/ROOT/pages/vulnmgmtextra.adoc new file mode 100644 index 0000000..5795071 --- /dev/null +++ b/content/modules/ROOT/pages/vulnmgmtextra.adoc @@ -0,0 +1,24 @@ +=== RHACS Vulnerability Scanner + +RHACS' Scanner v4 is a built-in vulnerability scanner that breaks down images into layers and components - where components can be operating-system installed packages or dependencies installed by programming languages like Python, Javascript, Go, Java and more. The *Image Summary tab* provides the essential security details of the image overall, with links to the components. Below, you can see why the image is ranked as a critically vulnerable application: + +- In the *Details and metadata* → Image Summary panel, the information you see there tells you that this image has a severe security problem - the base image was imported several years ago (Debian 9). +- At the top of the page is the warning that CVE data is stale - that this image has a base OS version whose distribution has stopped providing security information and likely stopped publishing security fixes. ACS will still scan for language issues even if the Operating System does not have CVE data available. + + +.Procedure +. Scroll down the page to the *Image Findings* section. + +Here you find the details of the image vulnerabilities. There are 82 vulnerabilities detected with 80 of those vulnerabilities listed as fixable (at the time of the creation of this workshop.) + +image::acs-vulns-00.png[link=self, window=blank, width=100%, Fixable Vulnerabilities] + +[start=2] + +. Above the *Image Findings* section, click on the *Dockerfile* tab: + +image::acs-vulns-01.png[link=self, window=blank, width=100%, Dockerfile View] + +The Dockerfile tab view shows the layer-by-layer view, and, as you can see, the most recent layers are also several years old. Time is not kind to images and components - as vulnerabilities are discovered, RHACS will display newly discovered CVEs. The layers that are listed as *Source=OS* are not showing CVE data since the CVE feeds are stale or do not have any information. However, the Python libraries that are added to the container are showing vulnerabilities. For example, the 'mercurial' package in the four layers. + +image::acs-vulns-02.png[link=self, window=blank, width=100%] \ No newline at end of file