Skip to content

Commit 3cb5e37

Browse files
authored
Merge pull request #726 from jterry75/simplify_hcsoci_mountlayers_cleanup
Simplify hcsoci.MountContainerLayers cleanup logic
2 parents c558106 + 9a999e5 commit 3cb5e37

File tree

4 files changed

+109
-113
lines changed

4 files changed

+109
-113
lines changed

internal/hcsoci/layers.go

Lines changed: 69 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,6 @@ import (
2020
"github.com/sirupsen/logrus"
2121
)
2222

23-
type lcowLayerEntry struct {
24-
hostPath string
25-
uvmPath string
26-
scsi bool
27-
}
28-
2923
const scratchPath = "scratch"
3024

3125
// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting
@@ -37,7 +31,7 @@ const scratchPath = "scratch"
3731
// inside the utility VM which is a GUID mapping of the scratch folder. Each
3832
// of the layers are the VSMB locations where the read-only layers are mounted.
3933
//
40-
func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot string, uvm *uvmpkg.UtilityVM) (interface{}, error) {
34+
func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot string, uvm *uvmpkg.UtilityVM) (_ interface{}, err error) {
4135
log.G(ctx).WithField("layerFolders", layerFolders).Debug("hcsshim::mountContainerLayers")
4236

4337
if uvm == nil {
@@ -50,34 +44,37 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
5044
if err := wclayer.ActivateLayer(path); err != nil {
5145
return nil, err
5246
}
47+
defer func() {
48+
if err != nil {
49+
if err := wclayer.DeactivateLayer(path); err != nil {
50+
log.G(ctx).WithFields(logrus.Fields{
51+
logrus.ErrorKey: err,
52+
"path": path,
53+
}).Warn("failed to DeactivateLayer on cleanup")
54+
}
55+
}
56+
}()
57+
5358
log.G(ctx).WithFields(logrus.Fields{
5459
"path": path,
5560
"rest": rest,
5661
}).Debug("hcsshim::mountContainerLayers PrepareLayer")
5762
if err := wclayer.PrepareLayer(path, rest); err != nil {
58-
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
59-
log.G(ctx).WithFields(logrus.Fields{
60-
logrus.ErrorKey: err,
61-
"path": path,
62-
}).Warn("Failed to Deactivate")
63-
}
6463
return nil, err
6564
}
65+
defer func() {
66+
if err != nil {
67+
if err := wclayer.UnprepareLayer(path); err != nil {
68+
log.G(ctx).WithFields(logrus.Fields{
69+
logrus.ErrorKey: err,
70+
"path": path,
71+
}).Warn("failed to UnprepareLayer on cleanup")
72+
}
73+
}
74+
}()
6675

6776
mountPath, err := wclayer.GetLayerMountPath(path)
6877
if err != nil {
69-
if err := wclayer.UnprepareLayer(path); err != nil {
70-
log.G(ctx).WithFields(logrus.Fields{
71-
logrus.ErrorKey: err,
72-
"path": path,
73-
}).Warn("Failed to Unprepare")
74-
}
75-
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
76-
log.G(ctx).WithFields(logrus.Fields{
77-
logrus.ErrorKey: err,
78-
"path": path,
79-
}).Warn("Failed to Deactivate")
80-
}
8178
return nil, err
8279
}
8380
return mountPath, nil
@@ -86,17 +83,34 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
8683
// V2 UVM
8784
log.G(ctx).WithField("os", uvm.OS()).Debug("hcsshim::mountContainerLayers V2 UVM")
8885

89-
// Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in
90-
// a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the
91-
// max size supported, where we put it on SCSI instead.
92-
//
93-
// Each layer is ref-counted so that multiple containers in the same utility VM can share them.
94-
var wcowLayersAdded []string
95-
var lcowlayersAdded []lcowLayerEntry
96-
attachedSCSIHostPath := ""
86+
var (
87+
layersAdded []string
88+
lcowUvmLayerPaths []string
89+
)
90+
defer func() {
91+
if err != nil {
92+
if uvm.OS() == "windows" {
93+
for _, l := range layersAdded {
94+
if err := uvm.RemoveVSMB(ctx, l); err != nil {
95+
log.G(ctx).WithError(err).Warn("failed to remove wcow layer on cleanup")
96+
}
97+
}
98+
} else {
99+
for _, l := range layersAdded {
100+
// Assume it was added to vPMEM and fall back to SCSI
101+
e := uvm.RemoveVPMEM(ctx, l)
102+
if e == uvmpkg.ErrNotAttached {
103+
e = uvm.RemoveSCSI(ctx, l)
104+
}
105+
if e != nil {
106+
log.G(ctx).WithError(e).Warn("failed to remove lcow layer on cleanup")
107+
}
108+
}
109+
}
110+
}
111+
}()
97112

98113
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
99-
var err error
100114
if uvm.OS() == "windows" {
101115
options := &hcsschema.VirtualSmbShareOptions{
102116
ReadOnly: true,
@@ -107,32 +121,26 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
107121
}
108122
err = uvm.AddVSMB(ctx, layerPath, "", options)
109123
if err == nil {
110-
wcowLayersAdded = append(wcowLayersAdded, layerPath)
124+
layersAdded = append(layersAdded, layerPath)
111125
}
112126
} else {
113-
entry := lcowLayerEntry{
114-
hostPath: filepath.Join(layerPath, "layer.vhd"),
115-
}
127+
var (
128+
layerPath = filepath.Join(layerPath, "layer.vhd")
129+
uvmPath string
130+
)
131+
116132
// We first try vPMEM and if it is full or the file is too large we
117133
// fall back to SCSI.
118-
_, entry.uvmPath, err = uvm.AddVPMEM(ctx, entry.hostPath, true)
134+
uvmPath, err = uvm.AddVPMEM(ctx, layerPath)
119135
if err == uvmpkg.ErrNoAvailableLocation || err == uvmpkg.ErrMaxVPMEMLayerSize {
120-
var (
121-
controller int
122-
lun int32
123-
)
124-
controller, lun, err = uvm.AddSCSILayer(ctx, entry.hostPath)
125-
if err == nil {
126-
entry.uvmPath = fmt.Sprintf("/tmp/S%d/%d", controller, lun)
127-
entry.scsi = true
128-
}
136+
uvmPath, err = uvm.AddSCSILayer(ctx, layerPath)
129137
}
130138
if err == nil {
131-
lcowlayersAdded = append(lcowlayersAdded, entry)
139+
layersAdded = append(layersAdded, layerPath)
140+
lcowUvmLayerPaths = append(lcowUvmLayerPaths, uvmPath)
132141
}
133142
}
134143
if err != nil {
135-
cleanupOnMountFailure(ctx, uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
136144
return nil, err
137145
}
138146
}
@@ -143,19 +151,23 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
143151

144152
// BUGBUG Rename guestRoot better.
145153
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
146-
_, _, err := uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false)
154+
_, _, err = uvm.AddSCSI(ctx, hostPath, containerScratchPathInUVM, false)
147155
if err != nil {
148-
cleanupOnMountFailure(ctx, uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
149156
return nil, err
150157
}
151-
attachedSCSIHostPath = hostPath
158+
defer func() {
159+
if err != nil {
160+
if err := uvm.RemoveSCSI(ctx, hostPath); err != nil {
161+
log.G(ctx).WithError(err).Warn("failed to remove scratch on cleanup")
162+
}
163+
}
164+
}()
152165

153166
if uvm.OS() == "windows" {
154167
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
155168
// read-only layer folders.
156-
layers, err := computeV2Layers(ctx, uvm, wcowLayersAdded)
169+
layers, err := computeV2Layers(ctx, uvm, layersAdded)
157170
if err != nil {
158-
cleanupOnMountFailure(ctx, uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
159171
return nil, err
160172
}
161173
guestRequest := guestrequest.CombinedLayers{
@@ -170,7 +182,6 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
170182
},
171183
}
172184
if err := uvm.Modify(ctx, combinedLayersModification); err != nil {
173-
cleanupOnMountFailure(ctx, uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
174185
return nil, err
175186
}
176187
log.G(ctx).Debug("hcsshim::mountContainerLayers Succeeded")
@@ -195,8 +206,8 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
195206
// /dev/sd(b...) are scratch spaces for each container
196207

197208
layers := []hcsschema.Layer{}
198-
for _, l := range lcowlayersAdded {
199-
layers = append(layers, hcsschema.Layer{Path: l.uvmPath})
209+
for _, l := range lcowUvmLayerPaths {
210+
layers = append(layers, hcsschema.Layer{Path: l})
200211
}
201212
guestRequest := guestrequest.CombinedLayers{
202213
ContainerRootPath: path.Join(guestRoot, rootfsPath),
@@ -211,7 +222,6 @@ func MountContainerLayers(ctx context.Context, layerFolders []string, guestRoot
211222
},
212223
}
213224
if err := uvm.Modify(ctx, combinedLayersModification); err != nil {
214-
cleanupOnMountFailure(ctx, uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
215225
return nil, err
216226
}
217227
log.G(ctx).Debug("hcsshim::mountContainerLayers Succeeded")
@@ -341,28 +351,6 @@ func UnmountContainerLayers(ctx context.Context, layerFolders []string, guestRoo
341351
return retError
342352
}
343353

344-
func cleanupOnMountFailure(ctx context.Context, uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) {
345-
for _, wl := range wcowLayers {
346-
if err := uvm.RemoveVSMB(ctx, wl); err != nil {
347-
log.G(ctx).WithError(err).Warn("Possibly leaked vsmbshare on error removal path")
348-
}
349-
}
350-
for _, ll := range lcowLayers {
351-
if ll.scsi {
352-
if err := uvm.RemoveSCSI(ctx, ll.hostPath); err != nil {
353-
log.G(ctx).WithError(err).Warn("Possibly leaked SCSI on error removal path")
354-
}
355-
} else if err := uvm.RemoveVPMEM(ctx, ll.hostPath); err != nil {
356-
log.G(ctx).WithError(err).Warn("Possibly leaked vpmemdevice on error removal path")
357-
}
358-
}
359-
if scratchHostPath != "" {
360-
if err := uvm.RemoveSCSI(ctx, scratchHostPath); err != nil {
361-
log.G(ctx).WithError(err).Warn("Possibly leaked SCSI disk on error removal path")
362-
}
363-
}
364-
}
365-
366354
func computeV2Layers(ctx context.Context, vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) {
367355
for _, path := range paths {
368356
uvmPath, err := vm.GetVSMBUvmPath(ctx, path)

internal/uvm/scsi.go

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,10 @@ import (
1212
"github.com/sirupsen/logrus"
1313
)
1414

15+
const (
16+
lcowSCSILayerFmt = "/tmp/S%d/%d"
17+
)
18+
1519
var (
1620
ErrNoAvailableLocation = fmt.Errorf("no available location")
1721
ErrNotAttached = fmt.Errorf("not attached")
@@ -112,15 +116,20 @@ func (uvm *UtilityVM) AddSCSIPhysicalDisk(ctx context.Context, hostPath, uvmPath
112116
return uvm.addSCSIActual(ctx, hostPath, uvmPath, "PassThru", false, readOnly)
113117
}
114118

115-
// AddSCSILayer adds a read-only layer disk to a utility VM at the next available
116-
// location. This function is used by LCOW as an alternate to PMEM for large layers.
117-
// The UVMPath will always be /tmp/S<controller>/<lun>.
118-
func (uvm *UtilityVM) AddSCSILayer(ctx context.Context, hostPath string) (int, int32, error) {
119+
// AddSCSILayer adds a read-only layer disk to a utility VM at the next
120+
// available location and returns the path in the UVM where the layer was
121+
// mounted. This function is used by LCOW as an alternate to PMEM for large
122+
// layers.
123+
func (uvm *UtilityVM) AddSCSILayer(ctx context.Context, hostPath string) (string, error) {
119124
if uvm.operatingSystem == "windows" {
120-
return -1, -1, ErrSCSILayerWCOWUnsupported
125+
return "", ErrSCSILayerWCOWUnsupported
121126
}
122127

123-
return uvm.addSCSIActual(ctx, hostPath, "", "VirtualDisk", true, true)
128+
controller, lun, err := uvm.addSCSIActual(ctx, hostPath, "", "VirtualDisk", true, true)
129+
if err != nil {
130+
return "", err
131+
}
132+
return fmt.Sprintf(lcowSCSILayerFmt, controller, lun), nil
124133
}
125134

126135
// addSCSIActual is the implementation behind the external functions AddSCSI and
@@ -189,7 +198,7 @@ func (uvm *UtilityVM) addSCSIActual(ctx context.Context, hostPath, uvmPath, atta
189198

190199
// Auto-generate the UVM path for LCOW layers
191200
if isLayer {
192-
uvmPath = fmt.Sprintf("/tmp/S%d/%d", controller, lun)
201+
uvmPath = fmt.Sprintf(lcowSCSILayerFmt, controller, lun)
193202
}
194203

195204
// See comment higher up. Now safe to release the lock.

internal/uvm/vpmem.go

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,10 @@ import (
1212
"github.com/sirupsen/logrus"
1313
)
1414

15+
const (
16+
lcowVPMEMLayerFmt = "/tmp/p%d"
17+
)
18+
1519
var (
1620
// ErrMaxVPMEMLayerSize is the error returned when the size of `hostPath` is
1721
// greater than the max vPMEM layer size set at create time.
@@ -50,13 +54,11 @@ func (uvm *UtilityVM) findVPMEMDevice(ctx context.Context, findThisHostPath stri
5054
return 0, ErrNotAttached
5155
}
5256

53-
// AddVPMEM adds a VPMEM disk to a utility VM at the next available location.
54-
//
55-
// Returns the location(0..MaxVPMEM-1) where the device is attached, and if exposed,
56-
// the utility VM path which will be /tmp/p<location>//
57-
func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string, expose bool) (_ uint32, _ string, err error) {
57+
// AddVPMEM adds a VPMEM disk to a utility VM at the next available location and
58+
// returns the UVM path where the layer was mounted.
59+
func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string) (_ string, err error) {
5860
if uvm.operatingSystem != "linux" {
59-
return 0, "", errNotSupported
61+
return "", errNotSupported
6062
}
6163

6264
uvm.m.Lock()
@@ -68,16 +70,16 @@ func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string, expose bool
6870
// We are going to add it so make sure it fits on vPMEM
6971
fi, err := os.Stat(hostPath)
7072
if err != nil {
71-
return 0, "", err
73+
return "", err
7274
}
7375
if uint64(fi.Size()) > uvm.vpmemMaxSizeBytes {
74-
return 0, "", ErrMaxVPMEMLayerSize
76+
return "", ErrMaxVPMEMLayerSize
7577
}
7678

7779
// It doesn't exist, so we're going to allocate and hot-add it
7880
deviceNumber, err = uvm.findNextVPMEM(ctx, hostPath)
7981
if err != nil {
80-
return 0, "", err
82+
return "", err
8183
}
8284

8385
modification := &hcsschema.ModifySettingRequest{
@@ -90,32 +92,30 @@ func (uvm *UtilityVM) AddVPMEM(ctx context.Context, hostPath string, expose bool
9092
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber),
9193
}
9294

93-
uvmPath := fmt.Sprintf("/tmp/p%d", deviceNumber)
94-
if expose {
95-
modification.GuestRequest = guestrequest.GuestRequest{
96-
ResourceType: guestrequest.ResourceTypeVPMemDevice,
97-
RequestType: requesttype.Add,
98-
Settings: guestrequest.LCOWMappedVPMemDevice{
99-
DeviceNumber: deviceNumber,
100-
MountPath: uvmPath,
101-
},
102-
}
95+
uvmPath := fmt.Sprintf(lcowVPMEMLayerFmt, deviceNumber)
96+
modification.GuestRequest = guestrequest.GuestRequest{
97+
ResourceType: guestrequest.ResourceTypeVPMemDevice,
98+
RequestType: requesttype.Add,
99+
Settings: guestrequest.LCOWMappedVPMemDevice{
100+
DeviceNumber: deviceNumber,
101+
MountPath: uvmPath,
102+
},
103103
}
104104

105105
if err := uvm.Modify(ctx, modification); err != nil {
106-
return 0, "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err)
106+
return "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err)
107107
}
108108

109109
uvm.vpmemDevices[deviceNumber] = &vpmemInfo{
110110
hostPath: hostPath,
111111
uvmPath: uvmPath,
112112
refCount: 1,
113113
}
114-
return deviceNumber, uvmPath, nil
114+
return uvmPath, nil
115115
}
116116
device := uvm.vpmemDevices[deviceNumber]
117117
device.refCount++
118-
return deviceNumber, device.uvmPath, nil
118+
return device.uvmPath, nil
119119
}
120120

121121
// RemoveVPMEM removes a VPMEM disk from a Utility VM. If the `hostPath` is not

0 commit comments

Comments
 (0)