Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add kubernetes metadata for each node #99

Merged
merged 3 commits into from
Sep 4, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 98 additions & 5 deletions app/lib/kubernetes_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,104 @@
namespace = get_namespace()


def get_pod(pod_name):
pod = kubernetes_client.CoreV1Api().read_namespaced_pod(namespace=namespace, name=pod_name)
return pod


def get_pod_details(pod_name):
pod = get_pod(pod_name)
volume_details = []

# Get the specs of each volume
for volume in pod.spec.volumes:
if volume.persistent_volume_claim:
pvc_name = volume.persistent_volume_claim.claim_name
volume_specs = get_pod_pvc_volume_size(namespace, pvc_name)
volume_details.append({
'pvc_name': pvc_name,
'size': volume_specs['size'],
'class': volume_specs['class'],
'phase': volume_specs['phase']
})


affinity = pod.spec.affinity
if affinity:
affinity_details = get_affinity_details(affinity)
else:
affinity_details = {}

# Get the node name where the pod is scheduled
pod_node_location = pod.spec.node_name
node_details = get_pod_node_details(pod_node_location)

potential_ready = pod.status.phase == "Running"
container_resources = pod.spec.containers[0].resources

pod_details = {
'affinity': affinity_details,
'limit_cpu': container_resources.limits.get('cpu', '') if container_resources.limits else 'None',
'limit_memory': container_resources.limits.get('memory', '') if container_resources.limits else 'None',
'name': pod.metadata.name,
'namespace': pod.metadata.namespace,
'node_details': node_details,
'node_name': pod.spec.node_name,
'node_selector': pod.spec.node_selector or {},
'potential_ready': potential_ready,
'request_cpu': container_resources.requests.get('cpu', '') if container_resources.requests else 'None',
'request_memory': container_resources.requests.get('memory', '') if container_resources.requests else 'None',
'restart_count': pod.status.container_statuses[0].restart_count,
'tolerations': pod.spec.tolerations or [],
'volume_details': volume_details
}
return pod_details


def get_pod_pvc_volume_size(namespace, pvc_name):
lazam marked this conversation as resolved.
Show resolved Hide resolved
pvc = kubernetes_client.CoreV1Api().read_namespaced_persistent_volume_claim(namespace=namespace, name=pvc_name)
pvc_details = {
'size': pvc.spec.resources.requests['storage'],
'class': pvc.spec.storage_class_name,
'phase': pvc.status.phase
}
return pvc_details


def get_pod_node_details(pod_name):
node = kubernetes_client.CoreV1Api().read_node(name=pod_name)
node_details = {}

for label in ["beta.kubernetes.io/arch", "beta.kubernetes.io/instance-type", "beta.kubernetes.io/os"]:
lazam marked this conversation as resolved.
Show resolved Hide resolved
if label in node.metadata.labels:
node_details[label] = node.metadata.labels[label]
return node_details


def get_affinity_details(affinity):
affinity_types = [("node_affinity", affinity.node_affinity),
("pod_affinity", affinity.pod_affinity),
("pod_anti_affinity", affinity.pod_anti_affinity)]
affinity_details = {}

for affinity_type, affinity_obj in affinity_types:
if affinity_obj:
required_during_scheduling = affinity_obj.required_during_scheduling_ignored_during_execution
if required_during_scheduling:
affinity_details[affinity_type] = {
"policy": "required_during_scheduling",
"details": required_during_scheduling
}

preferred_during_scheduling = affinity_obj.preferred_during_scheduling_ignored_during_execution
if preferred_during_scheduling:
affinity_details[affinity_type] = {
"policy": "preferred_during_scheduling",
"details": preferred_during_scheduling
}
return affinity_details


def list_stateful_sets():
return kubernetes_client.CustomObjectsApi().list_namespaced_custom_object(group="apps", version="v1",
plural="statefulsets",
Expand All @@ -28,11 +126,6 @@ def list_parachain_collator_stateful_sets(para_id):
return list(map(lambda sts: sts['metadata']['name'], collator_stateful_sets))


def get_pod(pod_name):
pod = kubernetes_client.CoreV1Api().read_namespaced_pod(namespace=namespace, name=pod_name)
return pod


def list_substrate_node_pods(role_label=''):
pods = kubernetes_client.CoreV1Api().list_namespaced_pod(namespace=namespace).items
# Keep only pods which are substrate nodes
Expand Down
4 changes: 2 additions & 2 deletions app/routers/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from app import __version__
from app.config.network_configuration import get_node_logs_link, get_network
from app.lib.kubernetes_client import list_validator_stateful_sets, list_parachain_collator_stateful_sets
from app.lib.kubernetes_client import get_pod_details, list_validator_stateful_sets, list_parachain_collator_stateful_sets
from app.lib.network_utils import list_substrate_nodes, list_validators, get_session_queued_keys, list_parachains, \
list_parachain_collators, get_substrate_node
from app.lib.runtime_utils import get_relay_runtime, get_relay_active_configuration, get_parachain_runtime
Expand Down Expand Up @@ -43,7 +43,7 @@ async def get_nodes(
request: Request,
node_name: str = Path(description="Name of the node")
):
return templates.TemplateResponse('node_info.html', dict(request=request, network=network, node=get_substrate_node(node_name)))
return templates.TemplateResponse('node_info.html', dict(request=request, network=network, node=get_substrate_node(node_name), pod=get_pod_details(node_name)))


@router.get("/validators", response_class=HTMLResponse, include_in_schema=False)
Expand Down
139 changes: 137 additions & 2 deletions app/templates/node_info.html
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ <h3>{{ node.name }}</h3>
<button id="rotateSessionKeys">Rotate session keys</button>
{% endif %}

<table id="table" class="display compact">
<h2>Substrate Node Information</h2>
<table id="table_substrate" class="display compact">
<thead>
<tr>
<th>Keys</th>
Expand Down Expand Up @@ -225,14 +226,148 @@ <h3>{{ node.name }}</h3>
</tbody>
</table>

<h2>Kubernetes Pod Information</h2>
<table id="table_kubernetes" class="display compact">
<thead>
<tr>
<th>Keys</th>
<th>Values</th>
</tr>
</thead>
<tbody>
<tr>
<td>Name</td>
<td>{{ pod.name }}</td>
</tr>
<tr>
<td>Namespace</td>
<td>{{ pod.namespace }}</td>
</tr>
<tr>
<td>Node</td>
<td>{{ pod.node_name }}</td>
</tr>
{% if pod.node_selector %}
<tr>
<td>Node Selector</td>
<td>{{ pod.node_selector }}</td>
</tr>
{% endif %}
<tr>
<td>Node Tolerations</td>
<td>
<ul>
{% for toleration in pod.tolerations %}
<li>{{ toleration.key }}={{ toleration.operator }}:{{ toleration.value }}</li>
{% endfor %}
</ul>
</td>
</tr>
<tr>
<td>Request CPU</td>
<td>{{ pod.request_cpu }}</td>
</tr>
<tr>
<td>Request Memory</td>
<td>{{ pod.request_memory }}</td>
</tr>
<tr>
<td>Limit CPU</td>
<td>{{ pod.limit_cpu }}</td>
</tr>
<tr>
<td>Limit Memory</td>
<td>{{ pod.limit_memory }}</td>
</tr>
<tr>
<td>Restart Count</td>
<td>{{ pod.restart_count }}</td>
</tr>
<tr>
<td>Volume Details</td>
<td>
<table>
<thead>
<tr>
<th>Name</th>
<th>Size</th>
<th>Storage Class</th>
<th>Storage Phase</th>
</tr>
</thead>
<tbody>
{% for volume in pod.volume_details %}
<tr>
<td>{{ volume.pvc_name }}</td>
<td>{{ volume.size }}</td>
<td>{{ volume.class }}</td>
<td>{{ volume.phase }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</td>
</tr>
<tr>
<td>Potential Ready</td>
<td>{{ pod.potential_ready }}</td>
</tr>
{% if pod.affinity %}
<tr>
<td>Affinity</td>
<td>
<table>
<thead>
<tr>
<th>Type</th>
<th>Policy</th>
<th>Details</th>
</tr>
</thead>
<tbody>
{% for key, value in pod.affinity.items() %}
<tr>
<td>{{ key }}</td>
<td>{{ value.policy }}</td>
<td>{{ value.details }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</td>
</tr>
{% endif %}
<tr>
<td>Node Details</td>
<td>
<table>
<thead>
<tr>
<th>Key</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for key, value in pod.node_details.items() %}
<tr>
<td>{{ key }}</td>
<td>{{ value }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
<script
src="/static/libs/jquery-3.6.0.slim.min.js"
integrity="sha256-u7e5khyithlIdTpu22PHhENmPcRdFiHRjhAuHcs05RI="
crossorigin="anonymous"></script>
<script type="text/javascript" charset="utf8" src="/static/libs/datatables.min.js"></script>
<script>
$(document).ready( function () {
$('#table').DataTable({
$('#table_substrate, #table_kubernetes').DataTable({
// https://datatables.net/reference/option/
"lengthMenu": [[-1], ["All"]]
lazam marked this conversation as resolved.
Show resolved Hide resolved
});
Expand Down
2 changes: 1 addition & 1 deletion local-kubernetes/charts/helmfile-rococo.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ releases:
values:
- values-shell-collator.yaml

# Para chain Statemint ##
## Para chain Statemint ##
- name: local-rococo-statemint-alice
namespace: rococo
chart: parity/node
Expand Down
Loading