mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 11:13:48 +00:00
Merge pull request #102886 from gnufied/add-local-expansion
Add support for expanding local volumes
This commit is contained in:
commit
1f8b1b84fb
@ -57,6 +57,7 @@ type localVolumePlugin struct {
|
|||||||
var _ volume.VolumePlugin = &localVolumePlugin{}
|
var _ volume.VolumePlugin = &localVolumePlugin{}
|
||||||
var _ volume.PersistentVolumePlugin = &localVolumePlugin{}
|
var _ volume.PersistentVolumePlugin = &localVolumePlugin{}
|
||||||
var _ volume.BlockVolumePlugin = &localVolumePlugin{}
|
var _ volume.BlockVolumePlugin = &localVolumePlugin{}
|
||||||
|
var _ volume.NodeExpandableVolumePlugin = &localVolumePlugin{}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
localVolumePluginName = "kubernetes.io/local-volume"
|
localVolumePluginName = "kubernetes.io/local-volume"
|
||||||
@ -376,6 +377,48 @@ func (dm *deviceMounter) MountDevice(spec *volume.Spec, devicePath string, devic
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *localVolumePlugin) RequiresFSResize() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (plugin *localVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
|
||||||
|
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error checking VolumeMode: %v", err)
|
||||||
|
}
|
||||||
|
if !fsVolume {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
localDevicePath := resizeOptions.VolumeSpec.PersistentVolume.Spec.Local.Path
|
||||||
|
|
||||||
|
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
|
||||||
|
if !ok {
|
||||||
|
return false, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
|
||||||
|
}
|
||||||
|
|
||||||
|
fileType, err := kvh.GetHostUtil().GetFileType(localDevicePath)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch fileType {
|
||||||
|
case hostutil.FileTypeBlockDev:
|
||||||
|
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), localDevicePath, resizeOptions.DeviceMountPath)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
case hostutil.FileTypeDirectory:
|
||||||
|
// if the given local volume path is of already filesystem directory, return directly because
|
||||||
|
// we do not want to prevent mount operation from succeeding.
|
||||||
|
klog.InfoS("expansion of directory based local volumes is NO-OP", "local-volume-path", localDevicePath)
|
||||||
|
return true, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("only directory and block device are supported")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getVolumeSourceFSType(spec *volume.Spec) (string, error) {
|
func getVolumeSourceFSType(spec *volume.Spec) (string, error) {
|
||||||
if spec.PersistentVolume != nil &&
|
if spec.PersistentVolume != nil &&
|
||||||
spec.PersistentVolume.Spec.Local != nil {
|
spec.PersistentVolume.Spec.Local != nil {
|
||||||
|
@ -83,6 +83,37 @@ func getBlockPlugin(t *testing.T) (string, volume.BlockVolumePlugin) {
|
|||||||
return tmpDir, plug
|
return tmpDir, plug
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getNodeExpandablePlugin(t *testing.T, isBlockDevice bool) (string, volume.NodeExpandableVolumePlugin) {
|
||||||
|
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't make a temp dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
|
var pathToFSType map[string]hostutil.FileType
|
||||||
|
if isBlockDevice {
|
||||||
|
pathToFSType = map[string]hostutil.FileType{
|
||||||
|
tmpDir: hostutil.FileTypeBlockDev,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pathToFSType = map[string]hostutil.FileType{
|
||||||
|
tmpDir: hostutil.FileTypeDirectory,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHostWithMounterFSType(t, tmpDir, nil, nil, pathToFSType))
|
||||||
|
|
||||||
|
plug, err := plugMgr.FindNodeExpandablePluginByName(localVolumePluginName)
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
t.Fatalf("Can't find the plugin by name")
|
||||||
|
}
|
||||||
|
if plug.GetPluginName() != localVolumePluginName {
|
||||||
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
|
}
|
||||||
|
return tmpDir, plug
|
||||||
|
}
|
||||||
|
|
||||||
func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
|
func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
|
||||||
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
tmpDir, err := utiltesting.MkTmpdir("localVolumeTest")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -148,6 +179,9 @@ func getTestVolume(readOnly bool, path string, isBlock bool, mountOptions []stri
|
|||||||
if isBlock {
|
if isBlock {
|
||||||
blockMode := v1.PersistentVolumeBlock
|
blockMode := v1.PersistentVolumeBlock
|
||||||
pv.Spec.VolumeMode = &blockMode
|
pv.Spec.VolumeMode = &blockMode
|
||||||
|
} else {
|
||||||
|
fsMode := v1.PersistentVolumeFilesystem
|
||||||
|
pv.Spec.VolumeMode = &fsMode
|
||||||
}
|
}
|
||||||
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
return volume.NewSpecFromPersistentVolume(pv, readOnly)
|
||||||
}
|
}
|
||||||
@ -289,6 +323,28 @@ func TestFSGlobalPathAndMountDevice(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNodeExpand(t *testing.T) {
|
||||||
|
// FS global path testing
|
||||||
|
tmpFSDir, plug := getNodeExpandablePlugin(t, false)
|
||||||
|
defer os.RemoveAll(tmpFSDir)
|
||||||
|
|
||||||
|
pvSpec := getTestVolume(false, tmpFSDir, false, nil)
|
||||||
|
|
||||||
|
resizeOptions := volume.NodeResizeOptions{
|
||||||
|
VolumeSpec: pvSpec,
|
||||||
|
DevicePath: tmpFSDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actually, we will do no volume expansion if volume is of type dir
|
||||||
|
resizeDone, err := plug.NodeExpand(resizeOptions)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !resizeDone {
|
||||||
|
t.Errorf("expected resize to be done")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMountUnmount(t *testing.T) {
|
func TestMountUnmount(t *testing.T) {
|
||||||
tmpDir, plug := getPlugin(t)
|
tmpDir, plug := getPlugin(t)
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
173
test/e2e/storage/local_volume_resize.go
Normal file
173
test/e2e/storage/local_volume_resize.go
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = utils.SIGDescribe("PersistentVolumes-expansion ", func() {
|
||||||
|
f := framework.NewDefaultFramework("persistent-local-volumes-expansion")
|
||||||
|
ginkgo.Context("loopback local block volume", func() {
|
||||||
|
var (
|
||||||
|
config *localTestConfig
|
||||||
|
scName string
|
||||||
|
)
|
||||||
|
|
||||||
|
testVolType := BlockFsWithFormatLocalVolumeType
|
||||||
|
var testVol *localTestVolume
|
||||||
|
testMode := immediateMode
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
nodes, err := e2enode.GetBoundedReadySchedulableNodes(f.ClientSet, maxNodes)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
|
||||||
|
// Choose a random node
|
||||||
|
randomNode := &nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
|
|
||||||
|
hostExec := utils.NewHostExec(f)
|
||||||
|
ltrMgr := utils.NewLocalResourceManager("local-volume-test", hostExec, hostBase)
|
||||||
|
config = &localTestConfig{
|
||||||
|
ns: f.Namespace.Name,
|
||||||
|
client: f.ClientSet,
|
||||||
|
timeouts: f.Timeouts,
|
||||||
|
nodes: nodes.Items,
|
||||||
|
randomNode: randomNode,
|
||||||
|
scName: scName,
|
||||||
|
discoveryDir: filepath.Join(hostBase, f.Namespace.Name),
|
||||||
|
hostExec: hostExec,
|
||||||
|
ltrMgr: ltrMgr,
|
||||||
|
}
|
||||||
|
|
||||||
|
setupExpandableLocalStorageClass(config, &testMode)
|
||||||
|
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, testMode)
|
||||||
|
testVol = testVols[0]
|
||||||
|
})
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||||
|
cleanupStorageClass(config)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should support online expansion on node", func() {
|
||||||
|
var (
|
||||||
|
pod1 *v1.Pod
|
||||||
|
pod1Err error
|
||||||
|
)
|
||||||
|
ginkgo.By("Creating pod1")
|
||||||
|
pod1, pod1Err = createLocalPod(config, testVol, nil)
|
||||||
|
framework.ExpectNoError(pod1Err)
|
||||||
|
verifyLocalPod(config, testVol, pod1, config.randomNode.Name)
|
||||||
|
|
||||||
|
// We expand the PVC while l.pod is using it for online expansion.
|
||||||
|
ginkgo.By("Expanding current pvc")
|
||||||
|
currentPvcSize := testVol.pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||||
|
newSize := currentPvcSize.DeepCopy()
|
||||||
|
newSize.Add(resource.MustParse("10Mi"))
|
||||||
|
framework.Logf("currentPvcSize %s, newSize %s", currentPvcSize.String(), newSize.String())
|
||||||
|
newPVC, err := testsuites.ExpandPVCSize(testVol.pvc, newSize, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err, "While updating pvc for more size")
|
||||||
|
testVol.pvc = newPVC
|
||||||
|
gomega.Expect(testVol.pvc).NotTo(gomega.BeNil())
|
||||||
|
|
||||||
|
pvcSize := testVol.pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||||
|
if pvcSize.Cmp(newSize) != 0 {
|
||||||
|
framework.Failf("error updating pvc size %q", testVol.pvc.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now update the underlying volume manually
|
||||||
|
err = config.ltrMgr.ExpandBlockDevice(testVol.ltr, 10 /*number of 1M blocks to add*/)
|
||||||
|
framework.ExpectNoError(err, "while expanding loopback device")
|
||||||
|
|
||||||
|
// now update PV to matching size
|
||||||
|
pv, err := UpdatePVSize(testVol.pv, newSize, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err, "while updating pv to more size")
|
||||||
|
gomega.Expect(pv).NotTo(gomega.BeNil())
|
||||||
|
testVol.pv = pv
|
||||||
|
|
||||||
|
ginkgo.By("Waiting for file system resize to finish")
|
||||||
|
testVol.pvc, err = testsuites.WaitForFSResize(testVol.pvc, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err, "while waiting for fs resize to finish")
|
||||||
|
|
||||||
|
pvcConditions := testVol.pvc.Status.Conditions
|
||||||
|
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
func UpdatePVSize(pv *v1.PersistentVolume, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolume, error) {
|
||||||
|
pvName := pv.Name
|
||||||
|
pvToUpdate := pv.DeepCopy()
|
||||||
|
|
||||||
|
var lastError error
|
||||||
|
waitErr := wait.PollImmediate(5*time.Second, csiResizeWaitPeriod, func() (bool, error) {
|
||||||
|
var err error
|
||||||
|
pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error fetching pv %s: %v", pvName, err)
|
||||||
|
}
|
||||||
|
pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size
|
||||||
|
pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(context.TODO(), pvToUpdate, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Logf("error updating PV %s: %v", pvName, err)
|
||||||
|
lastError = err
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
if waitErr == wait.ErrWaitTimeout {
|
||||||
|
return nil, fmt.Errorf("timed out attempting to update PV size. last update error: %v", lastError)
|
||||||
|
}
|
||||||
|
if waitErr != nil {
|
||||||
|
return nil, fmt.Errorf("failed to expand PV size: %v", waitErr)
|
||||||
|
}
|
||||||
|
return pvToUpdate, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupExpandableLocalStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMode) {
|
||||||
|
enableExpansion := true
|
||||||
|
sc := &storagev1.StorageClass{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: config.scName,
|
||||||
|
},
|
||||||
|
Provisioner: "kubernetes.io/no-provisioner",
|
||||||
|
VolumeBindingMode: mode,
|
||||||
|
AllowVolumeExpansion: &enableExpansion,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.client.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
}
|
@ -70,6 +70,7 @@ type LocalTestResource struct {
|
|||||||
// LocalTestResourceManager represents interface to create/destroy local test resources on node
|
// LocalTestResourceManager represents interface to create/destroy local test resources on node
|
||||||
type LocalTestResourceManager interface {
|
type LocalTestResourceManager interface {
|
||||||
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
||||||
|
ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error
|
||||||
Remove(ltr *LocalTestResource)
|
Remove(ltr *LocalTestResource)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,6 +290,21 @@ func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error {
|
||||||
|
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd)
|
||||||
|
loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node)
|
||||||
|
losetupCmd := fmt.Sprintf("losetup -c %s", loopDev)
|
||||||
|
return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error {
|
||||||
|
switch ltr.VolumeType {
|
||||||
|
case LocalVolumeBlockFS:
|
||||||
|
return l.expandLocalVolumeBlockFS(ltr, mbtoAdd)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType)
|
||||||
|
}
|
||||||
|
|
||||||
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
||||||
var ltr *LocalTestResource
|
var ltr *LocalTestResource
|
||||||
switch volumeType {
|
switch volumeType {
|
||||||
|
Loading…
Reference in New Issue
Block a user