forked from rancher/os
-
Notifications
You must be signed in to change notification settings - Fork 14
Expand file tree
/
Copy pathpower.go
More file actions
329 lines (286 loc) · 7.93 KB
/
power.go
File metadata and controls
329 lines (286 loc) · 7.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
package power
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/burmilla/os/cmd/control/install"
"github.com/burmilla/os/config"
"github.com/burmilla/os/pkg/docker"
"github.com/burmilla/os/pkg/log"
"github.com/burmilla/os/pkg/util"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
"golang.org/x/net/context"
)
// You can't shutdown the system from a process in console because we want to stop the console container.
// If you do that you kill yourself. So we spawn a separate container to do power operations
// This can up because on shutdown we want ssh to gracefully die, terminating ssh connections and not just hanging tcp session
//
// Be careful of container name. only [a-zA-Z0-9][a-zA-Z0-9_.-] are allowed
func runDocker(name string) error {
if os.ExpandEnv("${IN_DOCKER}") == "true" {
return nil
}
client, err := docker.NewSystemClient()
if err != nil {
return err
}
cmd := os.Args
log.Debugf("runDocker cmd: %s", cmd)
if name == "" {
name = filepath.Base(os.Args[0])
}
containerName := strings.TrimPrefix(strings.Join(strings.Split(name, "/"), "-"), "-")
existing, err := client.ContainerInspect(context.Background(), containerName)
if err == nil && existing.ID != "" {
// remove the old version of reboot
err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{
ContainerID: existing.ID,
})
if err != nil {
return err
}
}
currentContainerID, err := util.GetCurrentContainerID()
if err != nil {
return err
}
currentContainer, err := client.ContainerInspect(context.Background(), currentContainerID)
if err != nil {
return err
}
powerContainer, err := client.ContainerCreate(context.Background(),
&container.Config{
Image: currentContainer.Config.Image,
Cmd: cmd,
Env: []string{
"IN_DOCKER=true",
},
},
&container.HostConfig{
PidMode: "host",
NetworkMode: "none",
VolumesFrom: []string{
currentContainer.ID,
},
Privileged: true,
}, nil, containerName)
if err != nil {
return err
}
err = client.ContainerStart(context.Background(), powerContainer.ID)
if err != nil {
return err
}
reader, err := client.ContainerLogs(context.Background(), types.ContainerLogsOptions{
ContainerID: powerContainer.ID,
ShowStderr: true,
ShowStdout: true,
Follow: true,
})
if err != nil {
log.Fatal(err)
}
for {
p := make([]byte, 4096)
n, err := reader.Read(p)
if err != nil {
log.Error(err)
if n == 0 {
reader.Close()
break
}
}
if n > 0 {
fmt.Print(string(p))
}
}
if err != nil {
log.Fatal(err)
}
os.Exit(0)
return nil
}
func reboot(name string, force bool, code uint) {
if os.Geteuid() != 0 {
log.Fatalf("%s: Need to be root", os.Args[0])
}
cfg := config.LoadConfig()
// Validate config
if !force {
_, validationErrors, err := config.LoadConfigWithError()
if err != nil {
log.Fatal(err)
}
if validationErrors != nil && !validationErrors.Valid() {
for _, validationError := range validationErrors.Errors() {
log.Error(validationError)
}
return
}
}
// Add shutdown timeout
timeoutValue := cfg.Rancher.ShutdownTimeout
if timeoutValue == 0 {
timeoutValue = 60
}
if timeoutValue < 5 {
timeoutValue = 5
}
log.Infof("Setting %s timeout to %d (rancher.shutdown_timeout set to %d)", os.Args[0], timeoutValue, cfg.Rancher.ShutdownTimeout)
go func() {
timeout := time.After(time.Duration(timeoutValue) * time.Second)
tick := time.Tick(100 * time.Millisecond)
// Keep trying until we're timed out or got a result or got an error
for {
select {
// Got a timeout! fail with a timeout error
case <-timeout:
log.Errorf("Container shutdown taking too long, forcing %s.", os.Args[0])
syscall.Sync()
syscall.Reboot(int(code))
case <-tick:
fmt.Printf(".")
}
}
}()
// reboot -f should work even when system-docker is having problems
if !force {
if kexecFlag || previouskexecFlag || kexecAppendFlag != "" {
// pass through the cmdline args
name = ""
}
if err := runDocker(name); err != nil {
log.Fatal(err)
}
}
if kexecFlag || previouskexecFlag || kexecAppendFlag != "" {
// need to mount boot dir, or `system-docker run -v /:/host -w /host/boot` ?
baseName := "/mnt/new_img"
_, _, err := install.MountDevice(baseName, "", "", false)
if err != nil {
log.Errorf("ERROR: can't Kexec: %s", err)
return
}
defer util.Unmount(baseName)
Kexec(previouskexecFlag, filepath.Join(baseName, config.BootDir), kexecAppendFlag)
return
}
if !force {
err := shutDownContainers()
if err != nil {
log.Error(err)
}
}
syscall.Sync()
err := syscall.Reboot(int(code))
if err != nil {
log.Fatal(err)
}
}
func shutDownContainers() error {
var err error
shutDown := true
timeout := 2
for i, arg := range os.Args {
if arg == "-f" || arg == "--f" || arg == "--force" {
shutDown = false
}
if arg == "-t" || arg == "--t" || arg == "--timeout" {
if len(os.Args) > i+1 {
t, err := strconv.Atoi(os.Args[i+1])
if err != nil {
return err
}
timeout = t
} else {
log.Error("please specify a timeout")
}
}
}
if !shutDown {
return nil
}
client, err := docker.NewSystemClient()
if err != nil {
return err
}
filter := filters.NewArgs()
filter.Add("status", "running")
opts := types.ContainerListOptions{
All: true,
Filter: filter,
}
containers, err := client.ContainerList(context.Background(), opts)
if err != nil {
return err
}
currentContainerID, err := util.GetCurrentContainerID()
if err != nil {
return err
}
var stopErrorStrings []string
consoleContainerIdx := -1
for idx, container := range containers {
if container.ID == currentContainerID {
continue
}
if container.Names[0] == "/console" {
consoleContainerIdx = idx
continue
}
log.Infof("Stopping %s : %s", container.Names[0], container.ID[:12])
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
if stopErr != nil {
log.Errorf("------- Error Stopping %s : %s", container.Names[0], stopErr.Error())
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
}
}
// lets see what containers are still running and only wait on those
containers, err = client.ContainerList(context.Background(), opts)
if err != nil {
return err
}
var waitErrorStrings []string
for idx, container := range containers {
if container.ID == currentContainerID {
continue
}
if container.Names[0] == "/console" {
consoleContainerIdx = idx
continue
}
log.Infof("Waiting %s : %s", container.Names[0], container.ID[:12])
_, waitErr := client.ContainerWait(context.Background(), container.ID)
if waitErr != nil {
log.Errorf("------- Error Waiting %s : %s", container.Names[0], waitErr.Error())
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
}
}
// and now stop the console
if consoleContainerIdx != -1 {
container := containers[consoleContainerIdx]
log.Infof("Console Stopping %v : %s", container.Names, container.ID[:12])
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
if stopErr != nil {
log.Errorf("------- Error Stopping %v : %s", container.Names, stopErr.Error())
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
}
log.Infof("Console Waiting %v : %s", container.Names, container.ID[:12])
_, waitErr := client.ContainerWait(context.Background(), container.ID)
if waitErr != nil {
log.Errorf("------- Error Waiting %v : %s", container.Names, waitErr.Error())
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
}
}
if len(waitErrorStrings) != 0 || len(stopErrorStrings) != 0 {
return errors.New("error while stopping \n1. STOP Errors [" + strings.Join(stopErrorStrings, ",") + "] \n2. WAIT Errors [" + strings.Join(waitErrorStrings, ",") + "]")
}
return nil
}