Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions pkg/kubernetes/blockdevice.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,25 @@ func GetConsumableBlockDevices(ctx context.Context, kubeconfig *rest.Config) ([]
logger.Debug("Found %d consumable BlockDevices", len(blockDevices))
return blockDevices, nil
}

// GetConsumableBlockDevicesByNode returns consumable BlockDevices for a specific node.
func GetConsumableBlockDevicesByNode(ctx context.Context, kubeconfig *rest.Config, nodeName string) ([]BlockDevice, error) {
if nodeName == "" {
return nil, fmt.Errorf("nodeName is required")
}

logger.Debug("Getting consumable BlockDevices from node %s", nodeName)

bdClient, err := storage.NewBlockDeviceClient(ctx, kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to create BlockDevice client: %w", err)
}

blockDevices, err := bdClient.ListConsumableByNode(ctx, nodeName)
if err != nil {
return nil, err
}

logger.Debug("Found %d consumable BlockDevices on node %s", len(blockDevices), nodeName)
return blockDevices, nil
}
150 changes: 150 additions & 0 deletions pkg/kubernetes/localstorageclass.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
Copyright 2025 Flant JSC

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package kubernetes

import (
"context"
"fmt"
"time"

apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"

"github.com/deckhouse/storage-e2e/internal/logger"
)

var LocalStorageClassGVR = schema.GroupVersionResource{
Group: "storage.deckhouse.io",
Version: "v1alpha1",
Resource: "localstorageclasses",
}

type LocalStorageClassConfig struct {
Name string
LVMVolumeGroups []string // LVMVolumeGroup resource names
LVMType string // "Thick" or "Thin"
ThinPoolName string // required when LVMType is "Thin"
ReclaimPolicy string // "Delete" or "Retain" (default: "Delete")
VolumeBindingMode string // "WaitForFirstConsumer" or "Immediate" (default: "WaitForFirstConsumer")
}

func CreateLocalStorageClass(ctx context.Context, kubeconfig *rest.Config, cfg LocalStorageClassConfig) error {
if cfg.Name == "" {
return fmt.Errorf("LocalStorageClass name is required")
}
if len(cfg.LVMVolumeGroups) == 0 {
return fmt.Errorf("at least one LVMVolumeGroup is required")
}
if cfg.LVMType == "" {
cfg.LVMType = "Thick"
}
if cfg.LVMType == "Thin" && cfg.ThinPoolName == "" {
return fmt.Errorf("ThinPoolName is required for Thin LVM type")
}
if cfg.ReclaimPolicy == "" {
cfg.ReclaimPolicy = "Delete"
}
if cfg.VolumeBindingMode == "" {
cfg.VolumeBindingMode = "WaitForFirstConsumer"
}

dynamicClient, err := NewDynamicClientWithRetry(ctx, kubeconfig)
if err != nil {
return fmt.Errorf("failed to create dynamic client: %w", err)
}

lvgRefs := make([]interface{}, len(cfg.LVMVolumeGroups))
for i, name := range cfg.LVMVolumeGroups {
ref := map[string]interface{}{
"name": name,
}
if cfg.LVMType == "Thin" {
ref["thin"] = map[string]interface{}{
"poolName": cfg.ThinPoolName,
}
}
lvgRefs[i] = ref
}

lsc := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "storage.deckhouse.io/v1alpha1",
"kind": "LocalStorageClass",
"metadata": map[string]interface{}{
"name": cfg.Name,
},
"spec": map[string]interface{}{
"lvm": map[string]interface{}{
"lvmVolumeGroups": lvgRefs,
"type": cfg.LVMType,
},
"reclaimPolicy": cfg.ReclaimPolicy,
"volumeBindingMode": cfg.VolumeBindingMode,
},
},
}

logger.Info("Creating LocalStorageClass %s (type=%s, lvmVolumeGroups=%v)", cfg.Name, cfg.LVMType, cfg.LVMVolumeGroups)
_, err = dynamicClient.Resource(LocalStorageClassGVR).Create(ctx, lsc, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
logger.Info("LocalStorageClass %s already exists, skipping create", cfg.Name)
return nil
}
return fmt.Errorf("failed to create LocalStorageClass %s: %w", cfg.Name, err)
}
logger.Success("LocalStorageClass %s created", cfg.Name)
return nil
}

// WaitForLocalStorageClassCreated waits for the LocalStorageClass CR status to indicate
// that the controller has created the corresponding StorageClass.
func WaitForLocalStorageClassCreated(ctx context.Context, kubeconfig *rest.Config, name string, timeout time.Duration) error {
logger.Debug("Waiting for LocalStorageClass %s to be Created (timeout: %v)", name, timeout)

dynamicClient, err := NewDynamicClientWithRetry(ctx, kubeconfig)
if err != nil {
return fmt.Errorf("failed to create dynamic client: %w", err)
}

ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()

for {
obj, err := dynamicClient.Resource(LocalStorageClassGVR).Get(ctx, name, metav1.GetOptions{})
if err == nil {
phase, _, _ := unstructured.NestedString(obj.Object, "status", "phase")
if phase == "Created" {
logger.Success("LocalStorageClass %s is Created", name)
return nil
}
logger.Debug("LocalStorageClass %s phase: %s, waiting...", name, phase)
}

select {
case <-ctx.Done():
return fmt.Errorf("timeout waiting for LocalStorageClass %s to be Created: %w", name, ctx.Err())
case <-ticker.C:
}
}
}
129 changes: 129 additions & 0 deletions pkg/kubernetes/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"fmt"
"time"

corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"

Expand All @@ -29,6 +31,133 @@ import (

const nodeLabelPollInterval = 10 * time.Second

// GetNodes returns the names of all nodes in the cluster.
func GetNodes(ctx context.Context, kubeconfig *rest.Config) ([]corev1.Node, error) {
clientset, err := NewClientsetWithRetry(ctx, kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to create clientset: %w", err)
}

nodeList, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list nodes: %w", err)
}

logger.Debug("Found %d nodes", len(nodeList.Items))
return nodeList.Items, nil
}

// GetWorkerNodes returns all worker nodes in the cluster.
// A worker node is any node that does NOT have the "node-role.kubernetes.io/control-plane" label.
func GetWorkerNodes(ctx context.Context, kubeconfig *rest.Config) ([]corev1.Node, error) {
allNodes, err := GetNodes(ctx, kubeconfig)
if err != nil {
return nil, err
}

var workers []corev1.Node
for _, node := range allNodes {
if _, isMaster := node.Labels["node-role.kubernetes.io/control-plane"]; isMaster {
continue
}
if _, isMaster := node.Labels["node-role.kubernetes.io/master"]; isMaster {
continue
}
workers = append(workers, node)
}

logger.Debug("Found %d worker nodes", len(workers))
return workers, nil
}

// LabelNodes adds a label to each of the specified nodes.
// If a node already has the label with the desired value, it is skipped.
// Uses retry with re-fetch to handle optimistic concurrency conflicts.
func LabelNodes(ctx context.Context, kubeconfig *rest.Config, nodeNames []string, labelKey, labelValue string) error {
if len(nodeNames) == 0 {
return nil
}

clientset, err := NewClientsetWithRetry(ctx, kubeconfig)
if err != nil {
return fmt.Errorf("failed to create clientset: %w", err)
}

const maxRetries = 5

for _, name := range nodeNames {
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
node, err := clientset.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get node %s: %w", name, err)
}

if node.Labels != nil {
if v, ok := node.Labels[labelKey]; ok && v == labelValue {
logger.Debug("Node %s already has label %s=%s", name, labelKey, labelValue)
lastErr = nil
break
}
}

if node.Labels == nil {
node.Labels = make(map[string]string)
}
node.Labels[labelKey] = labelValue

_, lastErr = clientset.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
if lastErr == nil {
logger.Info("Labeled node %s with %s=%s", name, labelKey, labelValue)
break
}

if apierrors.IsConflict(lastErr) {
logger.Debug("Conflict labeling node %s (attempt %d/%d), retrying...", name, attempt+1, maxRetries)
continue
}
return fmt.Errorf("failed to label node %s: %w", name, lastErr)
}
if lastErr != nil {
return fmt.Errorf("failed to label node %s after %d attempts: %w", name, maxRetries, lastErr)
}
}

return nil
}

// GetNodeTaints returns the taints of the named node.
func GetNodeTaints(ctx context.Context, kubeconfig *rest.Config, nodeName string) ([]corev1.Taint, error) {
clientset, err := NewClientsetWithRetry(ctx, kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to create clientset: %w", err)
}

node, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get node %s: %w", nodeName, err)
}

return node.Spec.Taints, nil
}

// IsNodeCordoned checks whether a node has NoSchedule or NoExecute taints
// that would prevent DaemonSet pods from scheduling.
func IsNodeCordoned(ctx context.Context, kubeconfig *rest.Config, nodeName string) (bool, error) {
taints, err := GetNodeTaints(ctx, kubeconfig, nodeName)
if err != nil {
return false, err
}

for _, taint := range taints {
if taint.Effect == corev1.TaintEffectNoSchedule || taint.Effect == corev1.TaintEffectNoExecute {
logger.Debug("Node %s has taint %s=%s:%s", nodeName, taint.Key, taint.Value, taint.Effect)
return true, nil
}
}
return false, nil
}

// WaitForNodesLabeled waits for all specified nodes to have the given label with the expected value.
// It polls each node in parallel every 10 seconds until all nodes have the label or the context times out.
// Parameters:
Expand Down
Loading
Loading