From 8f3a3b4a10cf5383d8d44c19b23a5ba2e6d4efe6 Mon Sep 17 00:00:00 2001 From: Piotr Kazmierczak <470696+pkazmierczak@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:20:44 +0200 Subject: [PATCH] driver tests --- drivers/docker/driver_linux_test.go | 30 +- drivers/docker/driver_test.go | 848 ++++++++++++++-------------- drivers/docker/driver_unix_test.go | 129 +++-- 3 files changed, 504 insertions(+), 503 deletions(-) diff --git a/drivers/docker/driver_linux_test.go b/drivers/docker/driver_linux_test.go index 2bc03b3956bc..026d2f16ad8c 100644 --- a/drivers/docker/driver_linux_test.go +++ b/drivers/docker/driver_linux_test.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/pointer" tu "github.com/hashicorp/nomad/testutil" - "github.com/stretchr/testify/require" + "github.com/shoenig/test/must" ) func TestDockerDriver_authFromHelper(t *testing.T) { @@ -28,24 +28,24 @@ func TestDockerDriver_authFromHelper(t *testing.T) { helperFile := filepath.Join(dir, "docker-credential-testnomad") err := os.WriteFile(helperFile, helperContent, 0777) - require.NoError(t, err) + must.NoError(t, err) path := os.Getenv("PATH") t.Setenv("PATH", fmt.Sprintf("%s:%s", path, dir)) authHelper := authFromHelper("testnomad") creds, err := authHelper("registry.local:5000/repo/image") - require.NoError(t, err) - require.NotNil(t, creds) - require.Equal(t, "hashi", creds.Username) - require.Equal(t, "nomad", creds.Password) + must.NoError(t, err) + must.NotNil(t, creds) + must.Eq(t, "hashi", creds.Username) + must.Eq(t, "nomad", creds.Password) if _, err := os.Stat(filepath.Join(dir, "helper-get.out")); os.IsNotExist(err) { t.Fatalf("Expected helper-get.out to exist") } content, err := os.ReadFile(filepath.Join(dir, "helper-get.out")) - require.NoError(t, err) - require.Equal(t, "registry.local:5000", string(content)) + must.NoError(t, err) + must.Eq(t, "registry.local:5000", string(content)) } func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { @@ -57,18 +57,18 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { driver.config.PidsLimit = 5 task, cfg, _ := dockerTask(t) - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cfg.PidsLimit = 7 _, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.Error(t, err) - require.Contains(t, err.Error(), `pids_limit cannot be greater than nomad plugin config pids_limit`) + must.Error(t, err) + must.StrContains(t, err.Error(), `pids_limit cannot be greater than nomad plugin config pids_limit`) // Task PidsLimit should override plugin PidsLimit. cfg.PidsLimit = 3 opts, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) - require.Equal(t, pointer.Of(int64(3)), opts.HostConfig.PidsLimit) + must.NoError(t, err) + must.Eq(t, pointer.Of(int64(3)), opts.Host.PidsLimit) } func TestDockerDriver_PidsLimit(t *testing.T) { @@ -80,7 +80,7 @@ func TestDockerDriver_PidsLimit(t *testing.T) { cfg.PidsLimit = 1 cfg.Command = "/bin/sh" cfg.Args = []string{"-c", "sleep 5 & sleep 5 & sleep 5"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) _, _, _, cleanup := dockerSetup(t, task, nil) defer cleanup() @@ -98,6 +98,6 @@ func TestDockerDriver_PidsLimit(t *testing.T) { } return true, nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) } diff --git a/drivers/docker/driver_test.go b/drivers/docker/driver_test.go index ab848d56b818..e8be16a39bfb 100644 --- a/drivers/docker/driver_test.go +++ b/drivers/docker/driver_test.go @@ -17,9 +17,17 @@ import ( "testing" "time" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" containerapi "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/mount" + networkapi "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" "github.com/docker/docker/client" - docker "github.com/fsouza/go-dockerclient" + "github.com/docker/go-connections/nat" hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/numalib" @@ -35,9 +43,8 @@ import ( "github.com/hashicorp/nomad/plugins/drivers" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" tu "github.com/hashicorp/nomad/testutil" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/shoenig/test/must" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var ( @@ -62,14 +69,14 @@ var ( ) func dockerIsRemote(t *testing.T) bool { - client, err := docker.NewClientFromEnv() + client, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return false } // Technically this could be a local tcp socket but for testing purposes // we'll just assume that tcp is only used for remote connections. - if client.Endpoint()[0:3] == "tcp" { + if client.DaemonHost()[0:3] == "tcp" { return true } return false @@ -129,7 +136,7 @@ func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) { task.Env["NOMAD_SECRETS_DIR"] = "c:/secrets" } - require.NoError(t, task.EncodeConcreteDriverConfig(&cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&cfg)) return task, &cfg, ports } @@ -152,12 +159,12 @@ func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]in copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) dockerDriver, ok := driver.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) handle, ok := dockerDriver.tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) return client, driver, handle, func() { driver.DestroyTask(task.ID, true) @@ -168,23 +175,22 @@ func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]in // cleanSlate removes the specified docker image, including potentially stopping/removing any // containers based on that image. This is used to decouple tests that would be coupled // by using the same container image. -func cleanSlate(client *docker.Client, imageID string) { - if img, _ := client.InspectImage(imageID); img == nil { +func cleanSlate(client *client.Client, imageID string) { + ctx := context.Background() + if img, _, _ := client.ImageInspectWithRaw(ctx, imageID); img.ID == "" { return } - containers, _ := client.ListContainers(docker.ListContainersOptions{ + containers, _ := client.ContainerList(ctx, containerapi.ListOptions{ All: true, - Filters: map[string][]string{ - "ancestor": {imageID}, - }, + Filters: filters.NewArgs(filters.KeyValuePair{ + Key: "ancestor", + Value: imageID, + }), }) for _, c := range containers { - client.RemoveContainer(docker.RemoveContainerOptions{ - Force: true, - ID: c.ID, - }) + client.ContainerRemove(ctx, c.ID, containerapi.RemoveOptions{Force: true}) } - client.RemoveImageExtended(imageID, docker.RemoveImageOptions{ + client.ImageRemove(ctx, imageID, image.RemoveOptions{ Force: true, }) return @@ -220,9 +226,9 @@ func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.Dr }, }) - require.NoError(t, err) + must.NoError(t, err) instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger) - require.NoError(t, err) + must.NoError(t, err) driver, ok := instance.Plugin().(*dtestutil.DriverHarness) if !ok { t.Fatal("plugin instance is not a driver... wat?") @@ -257,7 +263,7 @@ func TestDockerDriver_Start_Wait(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -265,13 +271,13 @@ func TestDockerDriver_Start_Wait(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case <-waitCh: @@ -291,7 +297,7 @@ func TestDockerDriver_Start_WaitFinish(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -299,21 +305,21 @@ func TestDockerDriver_Start_WaitFinish(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { - require.Fail(t, "ExitResult should be successful: %v", res) + t.Fatalf("ExitResult should be successful: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail(t, "timeout") + t.Fatal(t, "timeout") } } @@ -445,7 +451,7 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -453,19 +459,19 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { - require.Fail(t, "ExitResult should be successful: %v", res) + t.Fatalf("ExitResult should be successful: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail(t, "timeout") + t.Fatal("timeout") } // Check that data was written to the shared alloc directory. @@ -497,15 +503,15 @@ func TestDockerDriver_Start_NoImage(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, false) defer cleanup() _, _, err := d.StartTask(task) - require.Error(t, err) - require.Contains(t, err.Error(), "image name required") + must.Error(t, err) + must.StrContains(t, err.Error(), "image name mustd") d.DestroyTask(task.ID, true) } @@ -528,14 +534,14 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) defer cleanup() _, _, err := d.StartTask(task) - require.Error(t, err) + must.Error(t, err) defer d.DestroyTask(task.ID, true) @@ -548,7 +554,7 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { ci.Parallel(t) - // This test requires that the alloc dir be mounted into docker as a volume. + // This test musts that the alloc dir be mounted into docker as a volume. // Because this cannot happen when docker is run remotely, e.g. when running // docker in a VM, we skip this when we detect Docker is being run remotely. if !testutil.DockerIsConnected(t) || dockerIsRemote(t) { @@ -570,7 +576,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -578,21 +584,21 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { - require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res)) + t.Fatalf("ExitResult should be successful: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail(t, "timeout") + t.Fatal("timeout") } // Check that data was written to the shared alloc directory. @@ -618,7 +624,7 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -626,7 +632,7 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) @@ -636,20 +642,20 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) { if runtime.GOOS == "windows" { signal = "SIGKILL" } - require.NoError(t, d.StopTask(task.ID, time.Second, signal)) + must.NoError(t, d.StopTask(task.ID, time.Second, signal)) }(t) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if res.Successful() { - require.Fail(t, "ExitResult should err: %v", res) + t.Fatalf("ExitResult should err: %v", res) } case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail(t, "timeout") + t.Fatal(t, "timeout") } } @@ -669,7 +675,7 @@ func TestDockerDriver_Start_KillTimeout(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -677,7 +683,7 @@ func TestDockerDriver_Start_KillTimeout(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) @@ -685,22 +691,22 @@ func TestDockerDriver_Start_KillTimeout(t *testing.T) { go func() { time.Sleep(100 * time.Millisecond) killSent = time.Now() - require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1")) + must.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1")) }() // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) var killed time.Time select { case <-waitCh: killed = time.Now() case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail(t, "timeout") + t.Fatal(t, "timeout") } - require.True(t, killed.Sub(killSent) > timeout) + must.True(t, killed.Sub(killSent) > timeout) } func TestDockerDriver_StartN(t *testing.T) { @@ -709,12 +715,9 @@ func TestDockerDriver_StartN(t *testing.T) { t.Skip("Windows Docker does not support SIGINT") } testutil.DockerCompatible(t) - require := require.New(t) task1, _, _ := dockerTask(t) - task2, _, _ := dockerTask(t) - task3, _, _ := dockerTask(t) taskList := []*drivers.TaskConfig{task1, task2, task3} @@ -728,7 +731,7 @@ func TestDockerDriver_StartN(t *testing.T) { defer cleanup() copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(err) + must.NoError(t, err) } @@ -738,16 +741,16 @@ func TestDockerDriver_StartN(t *testing.T) { t.Log("All tasks are started. Terminating...") for _, task := range taskList { - require.NoError(d.StopTask(task.ID, time.Second, "SIGINT")) + must.NoError(t, d.StopTask(task.ID, time.Second, "SIGINT")) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(err) + must.NoError(t, err) select { case <-waitCh: case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail("timeout waiting on task") + t.Fatal("timeout waiting on task") } } @@ -760,28 +763,27 @@ func TestDockerDriver_StartNVersions(t *testing.T) { t.Skip("Skipped on windows, we don't have image variants available") } testutil.DockerCompatible(t) - require := require.New(t) task1, cfg1, _ := dockerTask(t) tcfg1 := newTaskConfig("", []string{"echo", "hello"}) cfg1.Image = tcfg1.Image cfg1.LoadImage = tcfg1.LoadImage - require.NoError(task1.EncodeConcreteDriverConfig(cfg1)) + must.NoError(t, task1.EncodeConcreteDriverConfig(cfg1)) task2, cfg2, _ := dockerTask(t) tcfg2 := newTaskConfig("musl", []string{"echo", "hello"}) cfg2.Image = tcfg2.Image cfg2.LoadImage = tcfg2.LoadImage - require.NoError(task2.EncodeConcreteDriverConfig(cfg2)) + must.NoError(t, task2.EncodeConcreteDriverConfig(cfg2)) task3, cfg3, _ := dockerTask(t) tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"}) cfg3.Image = tcfg3.Image cfg3.LoadImage = tcfg3.LoadImage - require.NoError(task3.EncodeConcreteDriverConfig(cfg3)) + must.NoError(t, task3.EncodeConcreteDriverConfig(cfg3)) taskList := []*drivers.TaskConfig{task1, task2, task3} @@ -796,9 +798,9 @@ func TestDockerDriver_StartNVersions(t *testing.T) { copyImage(t, task.TaskDir(), "busybox_musl.tar") copyImage(t, task.TaskDir(), "busybox_glibc.tar") _, _, err := d.StartTask(task) - require.NoError(err) + must.NoError(t, err) - require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) } defer d.DestroyTask(task3.ID, true) @@ -807,16 +809,16 @@ func TestDockerDriver_StartNVersions(t *testing.T) { t.Log("All tasks are started. Terminating...") for _, task := range taskList { - require.NoError(d.StopTask(task.ID, time.Second, "SIGINT")) + must.NoError(t, d.StopTask(task.ID, time.Second, "SIGINT")) // Attempt to wait waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(err) + must.NoError(t, err) select { case <-waitCh: case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): - require.Fail("timeout waiting on task") + t.Fatal("timeout waiting on task") } } @@ -833,21 +835,21 @@ func TestDockerDriver_Labels(t *testing.T) { "label1": "value1", "label2": "value2", } - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) if err != nil { t.Fatalf("err: %v", err) } // expect to see 1 additional standard labels (allocID) - require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels)) + must.Eq(t, len(cfg.Labels)+1, len(container.Config.Labels)) for k, v := range cfg.Labels { - require.Equal(t, v, container.Config.Labels[k]) + must.Eq(t, v, container.Config.Labels[k]) } } @@ -857,16 +859,16 @@ func TestDockerDriver_ExtraLabels(t *testing.T) { task, cfg, _ := dockerTask(t) - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dockerClientConfig := make(map[string]interface{}) dockerClientConfig["extra_labels"] = []string{"task*", "job_name"} client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) if err != nil { t.Fatalf("err: %v", err) } @@ -879,9 +881,9 @@ func TestDockerDriver_ExtraLabels(t *testing.T) { } // expect to see 4 labels (allocID by default, task_name and task_group_name due to task*, and job_name) - require.Equal(t, 4, len(container.Config.Labels)) + must.Eq(t, 4, len(container.Config.Labels)) for k, v := range expectedLabels { - require.Equal(t, v, container.Config.Labels[k]) + must.Eq(t, v, container.Config.Labels[k]) } } @@ -891,7 +893,7 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) { task, cfg, _ := dockerTask(t) - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dockerClientConfig := make(map[string]interface{}) loggerConfig := map[string]string{"gelf-address": "udp://1.2.3.4:12201", "tag": "gelf"} @@ -902,13 +904,13 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) { } client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Equal(t, "gelf", container.HostConfig.LogConfig.Type) - require.Equal(t, loggerConfig, container.HostConfig.LogConfig.Config) + must.Eq(t, "gelf", container.HostConfig.LogConfig.Type) + must.Eq(t, loggerConfig, container.HostConfig.LogConfig.Config) } // TestDockerDriver_LogCollectionDisabled ensures that logmon isn't configured @@ -934,7 +936,7 @@ func TestDockerDriver_LogCollectionDisabled(t *testing.T) { client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig) t.Cleanup(cleanup) must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) must.NoError(t, err) must.Nil(t, handle.dlogger) @@ -955,7 +957,7 @@ func TestDockerDriver_HealthchecksDisable(t *testing.T) { defer cleanup() must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) must.NoError(t, err) must.NotNil(t, container.Config.Healthcheck) @@ -969,17 +971,15 @@ func TestDockerDriver_ForcePull(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.ForcePull = true - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - _, err := client.InspectContainer(handle.containerID) - if err != nil { - t.Fatalf("err: %v", err) - } + _, err := client.ContainerInspect(context.Background(), handle.containerID) + must.Nil(t, err) } func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { @@ -997,15 +997,15 @@ func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { cfg.ForcePull = true cfg.Command = busyboxLongRunningCmd[0] cfg.Args = busyboxLongRunningCmd[1:] - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) - require.Equal(t, localDigest, container.Image) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) + must.Eq(t, localDigest, container.Image) } func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { @@ -1018,18 +1018,18 @@ func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.SecurityOpt = []string{"seccomp=unconfined"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) if err != nil { t.Fatalf("err: %v", err) } - require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt) + must.Eq(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt) } func TestDockerDriver_SecurityOptFromFile(t *testing.T) { @@ -1042,16 +1042,16 @@ func TestDockerDriver_SecurityOptFromFile(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot") + must.StrContains(t, container.HostConfig.SecurityOpt[0], "reboot") } func TestDockerDriver_Runtime(t *testing.T) { @@ -1061,18 +1061,16 @@ func TestDockerDriver_Runtime(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.Runtime = "runc" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - if err != nil { - t.Fatalf("err: %v", err) - } + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime) + must.StrContains(t, cfg.Runtime, container.HostConfig.Runtime) } func TestDockerDriver_CreateContainerConfig(t *testing.T) { @@ -1083,20 +1081,20 @@ func TestDockerDriver_CreateContainerConfig(t *testing.T) { opt := map[string]string{"size": "120G"} cfg.StorageOpt = opt - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, "org/repo:0.1", c.Config.Image) - require.EqualValues(t, opt, c.HostConfig.StorageOpt) + must.Eq(t, "org/repo:0.1", c.Config.Image) + must.Eq(t, opt, c.Host.StorageOpt) // Container name should be /- for backward compat containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID) - require.Equal(t, containerName, c.Name) + must.Eq(t, containerName, c.Name) } func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { @@ -1106,7 +1104,7 @@ func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) @@ -1115,13 +1113,13 @@ func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { // Should error if a runtime was explicitly set that doesn't match gpu runtime cfg.Runtime = "nvidia" c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) - require.Equal(t, "nvidia", c.HostConfig.Runtime) + must.NoError(t, err) + must.Eq(t, "nvidia", c.Host.Runtime) cfg.Runtime = "custom" _, err = driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.Error(t, err) - require.Contains(t, err.Error(), "conflicting runtime requests") + must.Error(t, err) + must.StrContains(t, err.Error(), "conflicting runtime requests") } func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { @@ -1143,22 +1141,22 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { task, cfg, _ := dockerTask(t) - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) for _, runtime := range allowRuntime { t.Run(runtime, func(t *testing.T) { cfg.Runtime = runtime c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) - require.Equal(t, runtime, c.HostConfig.Runtime) + must.NoError(t, err) + must.Eq(t, runtime, c.Host.Runtime) }) } t.Run("not allowed: denied", func(t *testing.T) { cfg.Runtime = "denied" _, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.Error(t, err) - require.Contains(t, err.Error(), `runtime "denied" is not allowed`) + must.Error(t, err) + must.StrContains(t, err.Error(), `runtime "denied" is not allowed`) }) } @@ -1170,15 +1168,15 @@ func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { task.User = "random-user-1" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, task.User, c.Config.User) + must.Eq(t, task.User, c.Config.User) } func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { @@ -1197,13 +1195,13 @@ func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { "com.hashicorp.nomad.alloc_id": "bad_value", } - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) expectedLabels := map[string]string{ // user provided labels @@ -1212,7 +1210,7 @@ func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { "com.hashicorp.nomad.alloc_id": task.AllocID, } - require.Equal(t, expectedLabels, c.Config.Labels) + must.Eq(t, expectedLabels, c.Config.Labels) } func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { @@ -1278,17 +1276,17 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.Logging = c.loggingConfig - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type) - require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"]) - require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"]) + must.Eq(t, c.expectedConfig.Type, cc.Host.LogConfig.Type) + must.Eq(t, c.expectedConfig.Config["max-file"], cc.Host.LogConfig.Config["max-file"]) + must.Eq(t, c.expectedConfig.Config["max-size"], cc.Host.LogConfig.Config["max-size"]) }) } } @@ -1335,32 +1333,32 @@ func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") must.NoError(t, err) - must.Eq(t, []docker.HostMount{ + must.Eq(t, []mount.Mount{ // from mount map { Type: "bind", Target: "/map-bind-target", Source: "/map-source", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, { - Type: "tmpfs", - Target: "/map-tmpfs-target", - TempfsOptions: &docker.TempfsOptions{}, + Type: "tmpfs", + Target: "/map-tmpfs-target", + TmpfsOptions: &mount.TmpfsOptions{}, }, // from mount list { Type: "bind", Target: "/list-bind-target", Source: "/list-source", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, { - Type: "tmpfs", - Target: "/list-tmpfs-target", - TempfsOptions: &docker.TempfsOptions{}, + Type: "tmpfs", + Target: "/list-tmpfs-target", + TmpfsOptions: &mount.TmpfsOptions{}, }, - }, cc.HostConfig.Mounts) + }, cc.Host.Mounts) must.Eq(t, []string{ "alloc:/alloc:z", @@ -1368,7 +1366,7 @@ func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { "redis-demo/secrets:/secrets:z", "/etc/ssl/certs:/etc/ssl/certs:ro,z", "/var/www:/srv/www:z", - }, cc.HostConfig.Binds) + }, cc.Host.Binds) } func TestDockerDriver_CreateContainerConfig_Mounts_Windows(t *testing.T) { @@ -1412,32 +1410,32 @@ func TestDockerDriver_CreateContainerConfig_Mounts_Windows(t *testing.T) { cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") must.NoError(t, err) - must.Eq(t, []docker.HostMount{ + must.Eq(t, []mount.Mount{ // from mount map { Type: "bind", Target: "/map-bind-target", Source: "redis-demo\\map-source", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, { - Type: "tmpfs", - Target: "/map-tmpfs-target", - TempfsOptions: &docker.TempfsOptions{}, + Type: "tmpfs", + Target: "/map-tmpfs-target", + TmpfsOptions: &mount.TmpfsOptions{}, }, // from mount list { Type: "bind", Target: "/list-bind-target", Source: "redis-demo\\list-source", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, { - Type: "tmpfs", - Target: "/list-tmpfs-target", - TempfsOptions: &docker.TempfsOptions{}, + Type: "tmpfs", + Target: "/list-tmpfs-target", + TmpfsOptions: &mount.TmpfsOptions{}, }, - }, cc.HostConfig.Mounts) + }, cc.Host.Mounts) must.Eq(t, []string{ `alloc:c:/alloc`, @@ -1445,7 +1443,7 @@ func TestDockerDriver_CreateContainerConfig_Mounts_Windows(t *testing.T) { `redis-demo\secrets:c:/secrets`, `c:\etc\ssl\certs:c:/etc/ssl/certs`, `c:\var\www:c:/srv/www`, - }, cc.HostConfig.Binds) + }, cc.Host.Binds) } func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { @@ -1503,14 +1501,14 @@ func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") if testCase.expectToReturnError { - require.NotNil(t, err) + must.NotNil(t, err) } else { - require.NoError(t, err) + must.NoError(t, err) if testCase.nvidiaDevicesProvided { - require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime) + must.Eq(t, testCase.expectedRuntime, c.Host.Runtime) } else { // no nvidia devices provided -> no point to use nvidia runtime - require.Equal(t, "", c.HostConfig.Runtime) + must.Eq(t, "", c.Host.Runtime) } } }) @@ -1586,11 +1584,11 @@ func TestDockerDriver_Capabilities(t *testing.T) { if len(tc.CapDrop) > 0 { cfg.CapDrop = tc.CapDrop } - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) d := dockerDriverHarness(t, nil) dockerDriver, ok := d.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) if tc.Allowlist != "" { dockerDriver.config.AllowCaps = strings.Split(tc.Allowlist, ",") } @@ -1605,23 +1603,23 @@ func TestDockerDriver_Capabilities(t *testing.T) { t.Fatalf("Expected error in start: %v", tc.StartError) } else if err != nil { if tc.StartError == "" { - require.NoError(t, err) + must.NoError(t, err) } else { - require.Contains(t, err.Error(), tc.StartError) + must.StrContains(t, err.Error(), tc.StartError) } return } handle, ok := dockerDriver.tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd) - require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop) + must.Eq(t, tc.CapAdd, container.HostConfig.CapAdd) + must.Eq(t, tc.CapDrop, container.HostConfig.CapDrop) }) } } @@ -1658,12 +1656,12 @@ func TestDockerDriver_DNS(t *testing.T) { task, cfg, _ := dockerTask(t) task.DNS = c.cfg - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) _, d, _, cleanup := dockerSetup(t, task, nil) t.Cleanup(cleanup) - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) t.Cleanup(func() { _ = d.DestroyTask(task.ID, true) }) dtestutil.TestTaskDNSConfig(t, d, task.ID, c.cfg) @@ -1681,16 +1679,16 @@ func TestDockerDriver_Init(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.Init = true - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Equal(t, cfg.Init, container.HostConfig.Init) + must.Eq(t, cfg.Init, *container.HostConfig.Init) } func TestDockerDriver_CPUSetCPUs(t *testing.T) { @@ -1729,10 +1727,10 @@ func TestDockerDriver_CPUSetCPUs(t *testing.T) { defer cleanup() must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) must.NoError(t, err) - must.Eq(t, cfg.CPUSetCPUs, container.HostConfig.CPUSetCPUs) + must.Eq(t, cfg.CPUSetCPUs, container.HostConfig.Resources.CpusetCpus) }) } } @@ -1747,17 +1745,17 @@ func TestDockerDriver_MemoryHardLimit(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.MemoryHardLimit = 300 - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Equal(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation) - require.Equal(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory) + must.Eq(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation) + must.Eq(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory) } func TestDockerDriver_MACAddress(t *testing.T) { @@ -1770,16 +1768,16 @@ func TestDockerDriver_MACAddress(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.MacAddress = "00:16:3e:00:00:00" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress) + must.Eq(t, cfg.MacAddress, container.NetworkSettings.MacAddress) } func TestDockerWorkDir(t *testing.T) { @@ -1789,15 +1787,15 @@ func TestDockerWorkDir(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.WorkDir = "/some/path" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) - require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir)) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) + must.Eq(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir)) } func inSlice(needle string, haystack []string) bool { @@ -1819,20 +1817,20 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) // Verify that the correct ports are EXPOSED - expectedExposedPorts := map[docker.Port]struct{}{ - docker.Port(fmt.Sprintf("%d/tcp", res)): {}, - docker.Port(fmt.Sprintf("%d/udp", res)): {}, - docker.Port(fmt.Sprintf("%d/tcp", dyn)): {}, - docker.Port(fmt.Sprintf("%d/udp", dyn)): {}, + expectedExposedPorts := map[nat.Port]struct{}{ + nat.Port(fmt.Sprintf("%d/tcp", res)): {}, + nat.Port(fmt.Sprintf("%d/udp", res)): {}, + nat.Port(fmt.Sprintf("%d/tcp", dyn)): {}, + nat.Port(fmt.Sprintf("%d/udp", dyn)): {}, } - require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts) + must.Eq(t, expectedExposedPorts, container.Config.ExposedPorts) hostIP := "127.0.0.1" if runtime.GOOS == "windows" { @@ -1840,14 +1838,14 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { } // Verify that the correct ports are FORWARDED - expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, - docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + expectedPortBindings := map[nat.Port][]nat.PortBinding{ + nat.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + nat.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, } - require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings) + must.Eq(t, expectedPortBindings, container.HostConfig.PortBindings) } func TestDockerDriver_PortsMapping(t *testing.T) { @@ -1861,28 +1859,28 @@ func TestDockerDriver_PortsMapping(t *testing.T) { "main": 8080, "REDIS": 6379, } - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) // Verify that the port environment variables are set - require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080") - require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379") + must.SliceContains(t, container.Config.Env, "NOMAD_PORT_main=8080") + must.SliceContains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379") // Verify that the correct ports are EXPOSED - expectedExposedPorts := map[docker.Port]struct{}{ - docker.Port("8080/tcp"): {}, - docker.Port("8080/udp"): {}, - docker.Port("6379/tcp"): {}, - docker.Port("6379/udp"): {}, + expectedExposedPorts := map[nat.Port]struct{}{ + nat.Port("8080/tcp"): {}, + nat.Port("8080/udp"): {}, + nat.Port("6379/tcp"): {}, + nat.Port("6379/udp"): {}, } - require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts) + must.Eq(t, expectedExposedPorts, container.Config.ExposedPorts) hostIP := "127.0.0.1" if runtime.GOOS == "windows" { @@ -1890,13 +1888,13 @@ func TestDockerDriver_PortsMapping(t *testing.T) { } // Verify that the correct ports are FORWARDED - expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, - docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + expectedPortBindings := map[nat.Port][]nat.PortBinding{ + nat.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + nat.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, } - require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings) + must.Eq(t, expectedPortBindings, container.HostConfig.PortBindings) } func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { @@ -1927,18 +1925,18 @@ func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { driver := dh.Impl().(*Driver) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, "org/repo:0.1", c.Config.Image) + must.Eq(t, "org/repo:0.1", c.Config.Image) // Verify that the correct ports are FORWARDED - expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}}, - docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}}, - docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}}, - docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}}, + expectedPortBindings := map[nat.Port][]nat.PortBinding{ + nat.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}}, + nat.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}}, + nat.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}}, + nat.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}}, } - require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings) + must.Eq(t, expectedPortBindings, c.Host.PortBindings) } func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { @@ -1955,24 +1953,24 @@ func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { driver := dh.Impl().(*Driver) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, "org/repo:0.1", c.Config.Image) - require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080") - require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379") + must.Eq(t, "org/repo:0.1", c.Config.Image) + must.SliceContains(t, c.Config.Env, "NOMAD_PORT_main=8080") + must.SliceContains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379") // Verify that the correct ports are FORWARDED hostIP := "127.0.0.1" if runtime.GOOS == "windows" { hostIP = "" } - expectedPortBindings := map[docker.Port][]docker.PortBinding{ - docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, - docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, - docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + expectedPortBindings := map[nat.Port][]nat.PortBinding{ + nat.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, + nat.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, + nat.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, } - require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings) + must.Eq(t, expectedPortBindings, c.Host.PortBindings) } @@ -1983,13 +1981,13 @@ func TestDockerDriver_CleanupContainer(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.Command = "echo" cfg.Args = []string{"hello"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: @@ -1998,12 +1996,12 @@ func TestDockerDriver_CleanupContainer(t *testing.T) { } err = d.DestroyTask(task.ID, false) - require.NoError(t, err) + must.NoError(t, err) time.Sleep(3 * time.Second) // Ensure that the container isn't present - _, err := client.InspectContainer(handle.containerID) + _, err := client.ContainerInspect(context.Background(), handle.containerID) if err == nil { t.Fatalf("expected to not get container") } @@ -2016,11 +2014,12 @@ func TestDockerDriver_CleanupContainer(t *testing.T) { func TestDockerDriver_EnableImageGC(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + ctx := context.Background() task, cfg, _ := dockerTask(t) cfg.Command = "echo" cfg.Args = []string{"hello"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client := newTestDockerClient(t) driver := dockerDriverHarness(t, map[string]interface{}{ @@ -2037,15 +2036,15 @@ func TestDockerDriver_EnableImageGC(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) dockerDriver, ok := driver.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) _, ok = dockerDriver.tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) - waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + waitCh, err := dockerDriver.WaitTask(ctx, task.ID) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { @@ -2057,25 +2056,25 @@ func TestDockerDriver_EnableImageGC(t *testing.T) { } // we haven't called DestroyTask, image should be present - _, err = client.InspectImage(cfg.Image) - require.NoError(t, err) + _, _, err = client.ImageInspectWithRaw(ctx, cfg.Image) + must.NoError(t, err) err = dockerDriver.DestroyTask(task.ID, false) - require.NoError(t, err) + must.NoError(t, err) // image_delay is 3s, so image should still be around for a bit - _, err = client.InspectImage(cfg.Image) - require.NoError(t, err) + _, _, err = client.ImageInspectWithRaw(ctx, cfg.Image) + must.NoError(t, err) // Ensure image was removed tu.WaitForResult(func() (bool, error) { - if _, err := client.InspectImage(cfg.Image); err == nil { + if _, _, err := client.ImageInspectWithRaw(ctx, cfg.Image); err == nil { return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image) } return true, nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) } @@ -2083,10 +2082,12 @@ func TestDockerDriver_DisableImageGC(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + ctx := context.Background() + task, cfg, _ := dockerTask(t) cfg.Command = "echo" cfg.Args = []string{"hello"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client := newTestDockerClient(t) driver := dockerDriverHarness(t, map[string]interface{}{ @@ -2103,15 +2104,15 @@ func TestDockerDriver_DisableImageGC(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) dockerDriver, ok := driver.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) handle, ok := dockerDriver.tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) - waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + waitCh, err := dockerDriver.WaitTask(ctx, task.ID) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { @@ -2123,21 +2124,21 @@ func TestDockerDriver_DisableImageGC(t *testing.T) { } // we haven't called DestroyTask, image should be present - _, err = client.InspectImage(handle.containerImage) - require.NoError(t, err) + _, _, err = client.ImageInspectWithRaw(ctx, handle.containerImage) + must.NoError(t, err) err = dockerDriver.DestroyTask(task.ID, false) - require.NoError(t, err) + must.NoError(t, err) // image_delay is 1s, wait a little longer time.Sleep(3 * time.Second) // image should not have been removed or scheduled to be removed - _, err = client.InspectImage(cfg.Image) - require.NoError(t, err) + _, _, err = client.ImageInspectWithRaw(ctx, cfg.Image) + must.NoError(t, err) dockerDriver.coordinator.imageLock.Lock() _, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage] - require.False(t, ok, "image should not be registered for deletion") + must.False(t, ok, must.Sprint("image should not be registered for deletion")) dockerDriver.coordinator.imageLock.Unlock() } @@ -2145,11 +2146,13 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + ctx := context.Background() + task, cfg, _ := dockerTask(t) cfg.Command = "echo" cfg.Args = []string{"hello"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client := newTestDockerClient(t) driver := dockerDriverHarness(t, map[string]interface{}{ @@ -2166,15 +2169,15 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) dockerDriver, ok := driver.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) h, ok := dockerDriver.tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { @@ -2186,26 +2189,24 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { } // remove the container out-of-band - require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{ - ID: h.containerID, - })) + must.NoError(t, client.ContainerRemove(ctx, h.containerID, containerapi.RemoveOptions{})) - require.NoError(t, dockerDriver.DestroyTask(task.ID, false)) + must.NoError(t, dockerDriver.DestroyTask(task.ID, false)) // Ensure image was removed tu.WaitForResult(func() (bool, error) { - if _, err := client.InspectImage(cfg.Image); err == nil { + if _, _, err := client.ImageInspectWithRaw(ctx, cfg.Image); err == nil { return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image) } return true, nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) // Ensure that task handle was removed _, ok = dockerDriver.tasks.Get(task.ID) - require.False(t, ok) + must.False(t, ok) } func TestDockerDriver_Stats(t *testing.T) { @@ -2216,28 +2217,28 @@ func TestDockerDriver_Stats(t *testing.T) { cfg.Command = "sleep" cfg.Args = []string{"1000"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) _, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) go func() { defer d.DestroyTask(task.ID, true) ctx, cancel := context.WithCancel(context.Background()) defer cancel() ch, err := handle.Stats(ctx, 1*time.Second, top.Compute()) - assert.NoError(t, err) + must.NoError(t, err) select { case ru := <-ch: - assert.NotNil(t, ru.ResourceUsage) + must.NotNil(t, ru.ResourceUsage) case <-time.After(3 * time.Second): - assert.Fail(t, "stats timeout") + t.Fatal("stats timeout") } }() waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if res.Successful() { @@ -2271,7 +2272,7 @@ func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath strin Env: map[string]string{"VOL_PATH": containerPath}, Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) d := dockerDriverHarness(t, cfg) cleanup := d.MkAllocDir(task, true) @@ -2303,7 +2304,7 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) { _, _, err := driver.StartTask(task) defer driver.DestroyTask(task.ID, true) if err == nil { - require.Fail(t, "Started driver successfully when volumes should have been disabled.") + t.Fatal("Started driver successfully when volumes should have been disabled.") } } @@ -2313,11 +2314,11 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) { defer cleanup() _, _, err := driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer driver.DestroyTask(task.ID, true) waitCh, err := driver.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { @@ -2338,12 +2339,12 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) { defer cleanup() taskCfg.VolumeDriver = "flocker" - require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) _, _, err := driver.StartTask(task) defer driver.DestroyTask(task.ID, true) if err == nil { - require.Fail(t, "Started driver successfully when volume drivers should have been disabled.") + t.Fatal("Started driver successfully when volume drivers should have been disabled.") } } } @@ -2365,17 +2366,17 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) { // Evaluate symlinks so it works on MacOS tmpvol, err := filepath.EvalSymlinks(tmpvol) - require.NoError(t, err) + must.NoError(t, err) task, driver, _, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol) defer cleanup() _, _, err = driver.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer driver.DestroyTask(task.ID, true) waitCh, err := driver.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if !res.Successful() { @@ -2439,7 +2440,7 @@ func TestDockerDriver_Mounts(t *testing.T) { cfg.Command = "sleep" cfg.Args = []string{"10000"} cfg.Mounts = c.Mounts - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cleanup := d.MkAllocDir(task, true) defer cleanup() @@ -2467,7 +2468,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { path := "./test-resources/docker/auth.json" cases := []struct { Repo string - AuthConfig *docker.AuthConfiguration + AuthConfig *registry.AuthConfig }{ { Repo: "lolwhat.com/what:1337", @@ -2475,7 +2476,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { }, { Repo: "redis:7", - AuthConfig: &docker.AuthConfiguration{ + AuthConfig: ®istry.AuthConfig{ Username: "test", Password: "1234", Email: "", @@ -2484,7 +2485,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { }, { Repo: "quay.io/redis:7", - AuthConfig: &docker.AuthConfiguration{ + AuthConfig: ®istry.AuthConfig{ Username: "test", Password: "5678", Email: "", @@ -2493,7 +2494,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { }, { Repo: "other.io/redis:7", - AuthConfig: &docker.AuthConfiguration{ + AuthConfig: ®istry.AuthConfig{ Username: "test", Password: "abcd", Email: "", @@ -2504,8 +2505,8 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { for _, c := range cases { act, err := authFromDockerConfig(path)(c.Repo) - require.NoError(t, err) - require.Exactly(t, c.AuthConfig, act) + must.NoError(t, err) + must.Eq(t, c.AuthConfig, act) } } @@ -2514,7 +2515,7 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { cases := []struct { Auth DockerAuth - AuthConfig *docker.AuthConfiguration + AuthConfig *registry.AuthConfig Desc string }{ { @@ -2529,7 +2530,7 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { Email: "foo@bar.com", ServerAddr: "www.foobar.com", }, - AuthConfig: &docker.AuthConfiguration{ + AuthConfig: ®istry.AuthConfig{ Username: "foo", Password: "bar", Email: "foo@bar.com", @@ -2543,7 +2544,7 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { Password: "bar", ServerAddr: "www.foobar.com", }, - AuthConfig: &docker.AuthConfiguration{ + AuthConfig: ®istry.AuthConfig{ Username: "foo", Password: "bar", ServerAddress: "www.foobar.com", @@ -2555,8 +2556,8 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { for _, c := range cases { t.Run(c.Desc, func(t *testing.T) { act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test") - require.NoError(t, err) - require.Exactly(t, c.AuthConfig, act) + must.NoError(t, err) + must.Eq(t, c.AuthConfig, act) }) } } @@ -2579,7 +2580,7 @@ func TestDockerDriver_OOMKilled(t *testing.T) { task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024 task.Resources.NomadResources.Memory.MemoryMB = 10 - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -2587,12 +2588,12 @@ func TestDockerDriver_OOMKilled(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err := d.StartTask(task) - require.NoError(t, err) + must.NoError(t, err) defer d.DestroyTask(task.ID, true) waitCh, err := d.WaitTask(context.Background(), task.ID) - require.NoError(t, err) + must.NoError(t, err) select { case res := <-waitCh: if res.Successful() { @@ -2635,15 +2636,15 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { for _, tc := range testCases { task, cfg, _ := dockerTask(t) cfg.Devices = tc.deviceConfig - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) copyImage(t, task.TaskDir(), "busybox.tar") defer cleanup() _, _, err := d.StartTask(task) - require.Error(t, err) - require.Contains(t, err.Error(), tc.err.Error()) + must.Error(t, err) + must.StrContains(t, err.Error(), tc.err.Error()) } } @@ -2658,7 +2659,7 @@ func TestDockerDriver_Device_Success(t *testing.T) { cases := []struct { Name string Input DockerDevice - Expected docker.Device + Expected container.DeviceMapping }{ { Name: "AllSet", @@ -2667,7 +2668,7 @@ func TestDockerDriver_Device_Success(t *testing.T) { ContainerPath: "/dev/hostrandom", CgroupPermissions: "rwm", }, - Expected: docker.Device{ + Expected: container.DeviceMapping{ PathOnHost: "/dev/random", PathInContainer: "/dev/hostrandom", CgroupPermissions: "rwm", @@ -2678,7 +2679,7 @@ func TestDockerDriver_Device_Success(t *testing.T) { Input: DockerDevice{ HostPath: "/dev/random", }, - Expected: docker.Device{ + Expected: container.DeviceMapping{ PathOnHost: "/dev/random", PathInContainer: "/dev/random", CgroupPermissions: "rwm", @@ -2692,17 +2693,17 @@ func TestDockerDriver_Device_Success(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.Devices = []DockerDevice{tc.Input} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, driver, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.NotEmpty(t, container.HostConfig.Devices, "Expected one device") - require.Equal(t, tc.Expected, container.HostConfig.Devices[0], "Incorrect device ") + must.SliceNotEmpty(t, container.HostConfig.Devices, must.Sprint("Expected one device")) + must.Eq(t, tc.Expected, container.HostConfig.Devices[0], must.Sprint("Incorrect device")) }) } } @@ -2718,18 +2719,18 @@ func TestDockerDriver_Entrypoint(t *testing.T) { cfg.Command = strings.Join(busyboxLongRunningCmd, " ") cfg.Args = []string{} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, driver, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint") - require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ") + must.Len(t, 2, container.Config.Entrypoint, must.Sprint("Expected one entrypoint")) + must.Eq(t, entrypoint, container.Config.Entrypoint, must.Sprint("Incorrect entrypoint")) } func TestDockerDriver_ReadonlyRootfs(t *testing.T) { @@ -2743,32 +2744,32 @@ func TestDockerDriver_ReadonlyRootfs(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.ReadonlyRootfs = true - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, driver, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set") + must.True(t, container.HostConfig.ReadonlyRootfs, must.Sprint("ReadonlyRootfs option not set")) } // fakeDockerClient can be used in places that accept an interface for the // docker client such as createContainer. type fakeDockerClient struct{} -func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) { - return nil, fmt.Errorf("volume is attached on another node") +func (fakeDockerClient) ContainerCreate(context.Context, *containerapi.Config, *containerapi.HostConfig, *networkapi.NetworkingConfig, *ocispec.Platform, string) (containerapi.CreateResponse, error) { + return containerapi.CreateResponse{}, fmt.Errorf("volume is attached on another node") } -func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) { +func (fakeDockerClient) ContainerInspect(context.Context, string) (types.ContainerJSON, error) { panic("not implemented") } -func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) { +func (fakeDockerClient) ContainerList(context.Context, containerapi.ListOptions) ([]types.Container, error) { panic("not implemented") } -func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error { +func (fakeDockerClient) ContainerRemove(context.Context, string, containerapi.RemoveOptions) error { panic("not implemented") } @@ -2783,29 +2784,32 @@ func TestDockerDriver_VolumeError(t *testing.T) { driver := dockerDriverHarness(t, nil) // assert volume error is recoverable - _, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image) - require.True(t, structs.IsRecoverable(err)) + _, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, createContainerOptions{ + Config: &containerapi.Config{}}, cfg.Image) + must.True(t, structs.IsRecoverable(err)) } func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + ctx := context.Background() + expectedPrefix := "2001:db8:1::242:ac11" expectedAdvertise := true task, cfg, _ := dockerTask(t) cfg.AdvertiseIPv6Addr = expectedAdvertise - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client := newTestDockerClient(t) // Make sure IPv6 is enabled - net, err := client.NetworkInfo("bridge") + net, err := client.NetworkInspect(ctx, "bridge", networkapi.InspectOptions{}) if err != nil { t.Skip("error retrieving bridge network information, skipping") } - if net == nil || !net.EnableIPv6 { + if !net.EnableIPv6 { t.Skip("IPv6 not enabled on bridge network, skipping") } @@ -2816,21 +2820,22 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { _, network, err := driver.StartTask(task) defer driver.DestroyTask(task.ID, true) - require.NoError(t, err) + must.NoError(t, err) - require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise) + must.Eq(t, expectedAdvertise, network.AutoAdvertise, + must.Sprintf("Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)) if !strings.HasPrefix(network.IP, expectedPrefix) { t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix) } handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID) - require.True(t, ok) + must.True(t, ok) - require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second)) + must.NoError(t, driver.WaitUntilStarted(task.ID, time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(ctx, handle.containerID) + must.NoError(t, err) if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) { t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address) @@ -2853,8 +2858,8 @@ func TestParseDockerImage(t *testing.T) { for _, test := range tests { t.Run(test.Image, func(t *testing.T) { repo, tag := parseDockerImage(test.Image) - require.Equal(t, test.Repo, repo) - require.Equal(t, test.Tag, tag) + must.Eq(t, test.Repo, repo) + must.Eq(t, test.Tag, tag) }) } } @@ -2873,23 +2878,23 @@ func TestDockerImageRef(t *testing.T) { for _, test := range tests { t.Run(test.Image, func(t *testing.T) { image := dockerImageRef(test.Repo, test.Tag) - require.Equal(t, test.Image, image) + must.Eq(t, test.Image, image) }) } } -func waitForExist(t *testing.T, client *docker.Client, containerID string) { +func waitForExist(t *testing.T, client *client.Client, containerID string) { tu.WaitForResult(func() (bool, error) { - container, err := client.InspectContainer(containerID) + container, err := client.ContainerInspect(context.Background(), containerID) if err != nil { - if _, ok := err.(*docker.NoSuchContainer); !ok { + if !strings.Contains(err.Error(), NoSuchContainerError) { return false, err } } - return container != nil, nil + return container.ID != "", nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) } @@ -2900,9 +2905,11 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + ctx := context.Background() + task, cfg, _ := dockerTask(t) - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client := newTestDockerClient(t) driver := dockerDriverHarness(t, nil) @@ -2912,43 +2919,37 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") d, ok := driver.Impl().(*Driver) - require.True(t, ok) + must.True(t, ok) _, _, err := d.createImage(task, cfg, client) - require.NoError(t, err) + must.NoError(t, err) containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image) - require.NoError(t, err) + must.NoError(t, err) c, err := d.createContainer(client, containerCfg, cfg.Image) - require.NoError(t, err) - defer client.RemoveContainer(docker.RemoveContainerOptions{ - ID: c.ID, - Force: true, - }) + must.NoError(t, err) + defer client.ContainerRemove(ctx, c.ID, containerapi.RemoveOptions{Force: true}) // calling createContainer again creates a new one and remove old one c2, err := d.createContainer(client, containerCfg, cfg.Image) - require.NoError(t, err) - defer client.RemoveContainer(docker.RemoveContainerOptions{ - ID: c2.ID, - Force: true, - }) + must.NoError(t, err) + defer client.ContainerRemove(ctx, c2.ID, containerapi.RemoveOptions{Force: true}) - require.NotEqual(t, c.ID, c2.ID) + must.NotEq(t, c.ID, c2.ID) // old container was destroyed { - _, err := client.InspectContainer(c.ID) - require.Error(t, err) - require.Contains(t, err.Error(), NoSuchContainerError) + _, err := client.ContainerInspect(ctx, c.ID) + must.Error(t, err) + must.StrContains(t, err.Error(), NoSuchContainerError) } // now start container twice - require.NoError(t, d.startContainer(c2)) - require.NoError(t, d.startContainer(c2)) + must.NoError(t, d.startContainer(*c2)) + must.NoError(t, d.startContainer(*c2)) tu.WaitForResult(func() (bool, error) { - c, err := client.InspectContainer(c2.ID) + c, err := client.ContainerInspect(ctx, c2.ID) if err != nil { return false, fmt.Errorf("failed to get container status: %v", err) } @@ -2959,7 +2960,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) { return true, nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) } @@ -2980,14 +2981,14 @@ func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { "cpu_hard_limit": true, }, spec, nil) - require.NoError(t, task.EncodeDriverConfig(val)) + must.NoError(t, task.EncodeDriverConfig(val)) cfg := &TaskConfig{} - require.NoError(t, task.DecodeDriverConfig(cfg)) + must.NoError(t, task.DecodeDriverConfig(cfg)) c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) - require.NotZero(t, c.HostConfig.CPUQuota) - require.NotZero(t, c.HostConfig.CPUPeriod) + must.NonZero(t, c.Host.CPUQuota) + must.NonZero(t, c.Host.CPUPeriod) } func TestDockerDriver_memoryLimits(t *testing.T) { @@ -3040,8 +3041,8 @@ func TestDockerDriver_memoryLimits(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { hard, soft := memoryLimits(c.driverMemoryMB, c.taskResources) - require.Equal(t, c.expectedHard, hard) - require.Equal(t, c.expectedSoft, soft) + must.Eq(t, c.expectedHard, hard) + must.Eq(t, c.expectedSoft, soft) }) } } @@ -3085,10 +3086,10 @@ func TestDockerDriver_parseSignal(t *testing.T) { t.Run(tc.name, func(t *testing.T) { s, err := parseSignal(tc.runtime, tc.specifiedSignal) if tc.expectedSignal == "" { - require.Error(t, err, "invalid signal") + must.Error(t, err, must.Sprint("invalid signal")) } else { - require.NoError(t, err) - require.Equal(t, s.(syscall.Signal), s) + must.NoError(t, err) + must.Eq(t, s.(syscall.Signal).String(), s.String()) } }) } @@ -3145,7 +3146,7 @@ func TestDockerDriver_StopSignal(t *testing.T) { AllocID: uuid.Generate(), Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -3159,18 +3160,13 @@ func TestDockerDriver_StopSignal(t *testing.T) { client := newTestDockerClient(t) - listener := make(chan *docker.APIEvents) - err := client.AddEventListener(listener) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + listener, _ := client.Events(ctx, events.ListOptions{}) + defer cancel() - defer func() { - err := client.RemoveEventListener(listener) - require.NoError(t, err) - }() - - _, _, err = d.StartTask(task) - require.NoError(t, err) - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + _, _, err := d.StartTask(task) + must.NoError(t, err) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) stopErr := make(chan error, 1) go func() { @@ -3194,10 +3190,10 @@ func TestDockerDriver_StopSignal(t *testing.T) { } } case err := <-stopErr: - require.NoError(t, err, "stop task failed") + must.NoError(t, err, must.Sprint("stop task failed")) case <-timeout: // timeout waiting for signals - require.Equal(t, c.expectedSignals, receivedSignals, "timed out waiting for expected signals") + must.Eq(t, c.expectedSignals, receivedSignals, must.Sprint("timed out waiting for expected signals")) } } }) @@ -3212,14 +3208,14 @@ func TestDockerDriver_GroupAdd(t *testing.T) { task, cfg, _ := dockerTask(t) cfg.GroupAdd = []string{"12345", "9999"} - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, d, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() - require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Exactly(t, cfg.GroupAdd, container.HostConfig.GroupAdd) + must.Eq(t, cfg.GroupAdd, container.HostConfig.GroupAdd) } diff --git a/drivers/docker/driver_unix_test.go b/drivers/docker/driver_unix_test.go index ab736a28f64b..3b0f656f75e3 100644 --- a/drivers/docker/driver_unix_test.go +++ b/drivers/docker/driver_unix_test.go @@ -6,6 +6,7 @@ package docker import ( + "context" "fmt" "io" "os" @@ -17,7 +18,9 @@ import ( "testing" "time" - docker "github.com/fsouza/go-dockerclient" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/testutil" @@ -62,17 +65,19 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) - require := require.New(t) + ctx := context.Background() // Because go-dockerclient doesn't provide api for query network aliases, just check that // a container can be created with a 'network_aliases' property // Create network, network-scoped alias is supported only for containers in user defined networks client := newTestDockerClient(t) - networkOpts := docker.CreateNetworkOptions{Name: "foobar", Driver: "bridge"} - network, err := client.CreateNetwork(networkOpts) - require.NoError(err) - defer client.RemoveNetwork(network.ID) + networkResponse, err := client.NetworkCreate(ctx, "foobar", network.CreateOptions{Driver: "bridge"}) + must.NoError(t, err) + defer client.NetworkRemove(ctx, networkResponse.ID) + + network, err := client.NetworkInspect(ctx, networkResponse.ID, network.InspectOptions{}) + must.NoError(t, err) expected := []string{"foobar"} taskCfg := newTaskConfig("", busyboxLongRunningCmd) @@ -83,7 +88,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { Name: "busybox", Resources: basicResources, } - require.NoError(task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -91,19 +96,19 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { copyImage(t, task.TaskDir(), "busybox.tar") _, _, err = d.StartTask(task) - require.NoError(err) - require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, err) + must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) defer d.DestroyTask(task.ID, true) dockerDriver, ok := d.Impl().(*Driver) - require.True(ok) + must.True(t, ok) handle, ok := dockerDriver.tasks.Get(task.ID) - require.True(ok) + must.True(t, ok) - _, err = client.InspectContainer(handle.containerID) - require.NoError(err) + _, err = client.ContainerInspect(ctx, handle.containerID) + must.NoError(t, err) } func TestDockerDriver_NetworkMode_Host(t *testing.T) { @@ -119,7 +124,7 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) { Name: "busybox-demo", Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) d := dockerDriverHarness(t, nil) cleanup := d.MkAllocDir(task, true) @@ -141,10 +146,10 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) { client := newTestDockerClient(t) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) must.NoError(t, err) - actual := container.HostConfig.NetworkMode + actual := string(container.HostConfig.NetworkMode) must.Eq(t, expected, actual) } @@ -156,17 +161,17 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) { cfg.CPUHardLimit = true cfg.CPUCFSPeriod = 1000000 - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, _, handle, cleanup := dockerSetup(t, task, nil) defer cleanup() waitForExist(t, client, handle.containerID) - container, err := client.InspectContainer(handle.containerID) - require.NoError(t, err) + container, err := client.ContainerInspect(context.Background(), handle.containerID) + must.NoError(t, err) - require.Equal(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod) + must.Eq(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod) } func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { @@ -189,7 +194,7 @@ func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { defer cleanup() require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) - container, err := client.InspectContainer(handle.containerID) + container, err := client.ContainerInspect(context.Background(), handle.containerID) assert.Nil(t, err, "unexpected error: %v", err) want := "16384" @@ -349,13 +354,13 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { task.AllocDir = allocDir task.Name = "demo" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) + must.NoError(t, err) for _, v := range c.expectedVolumes { - require.Contains(t, cc.HostConfig.Binds, v) + must.SliceContains(t, cc.Host.Binds, v) } }) } @@ -375,16 +380,16 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { task.AllocDir = allocDir task.Name = "demo" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") if c.requiresVolumes { - require.Error(t, err, "volumes are not enabled") + must.Error(t, err, must.Sprint("volumes are not enabled")) } else { - require.NoError(t, err) + must.NoError(t, err) for _, v := range c.expectedVolumes { - require.Contains(t, cc.HostConfig.Binds, v) + must.SliceContains(t, cc.Host.Binds, v) } } }) @@ -405,7 +410,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { name string requiresVolumes bool passedMounts []DockerMount - expectedMounts []docker.HostMount + expectedMounts []mount.Mount }{ { name: "basic volume", @@ -417,13 +422,13 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { Source: "test", }, }, - expectedMounts: []docker.HostMount{ + expectedMounts: []mount.Mount{ { Type: "volume", Target: "/nomad", Source: "test", ReadOnly: true, - VolumeOptions: &docker.VolumeOptions{}, + VolumeOptions: &mount.VolumeOptions{}, }, }, }, @@ -436,12 +441,12 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { Source: "test", }, }, - expectedMounts: []docker.HostMount{ + expectedMounts: []mount.Mount{ { Type: "bind", Target: "/nomad", Source: "/tmp/nomad/alloc-dir/demo/test", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, }, }, @@ -455,12 +460,12 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { Source: "/tmp/test", }, }, - expectedMounts: []docker.HostMount{ + expectedMounts: []mount.Mount{ { Type: "bind", Target: "/nomad", Source: "/tmp/test", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, }, }, @@ -474,12 +479,12 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { Source: "../../test", }, }, - expectedMounts: []docker.HostMount{ + expectedMounts: []mount.Mount{ { Type: "bind", Target: "/nomad", Source: "/tmp/nomad/test", - BindOptions: &docker.BindOptions{}, + BindOptions: &mount.BindOptions{}, }, }, }, @@ -496,11 +501,11 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { }, }, }, - expectedMounts: []docker.HostMount{ + expectedMounts: []mount.Mount{ { Type: "tmpfs", Target: "/nomad", - TempfsOptions: &docker.TempfsOptions{ + TmpfsOptions: &mount.TmpfsOptions{ SizeBytes: 321, Mode: 0666, }, @@ -522,11 +527,11 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { task.AllocDir = allocDir task.Name = "demo" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") - require.NoError(t, err) - require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts) + must.NoError(t, err) + must.Eq(t, c.expectedMounts, cc.Host.Mounts) }) } }) @@ -545,14 +550,14 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { task.AllocDir = allocDir task.Name = "demo" - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") if c.requiresVolumes { - require.Error(t, err, "volumes are not enabled") + must.Error(t, err, must.Sprint("volumes are not enabled")) } else { - require.NoError(t, err) - require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts) + must.NoError(t, err) + must.Eq(t, c.expectedMounts, cc.Host.Mounts) } }) } @@ -608,13 +613,13 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") require.NoError(t, err) - expectedMounts := []docker.HostMount{ + expectedMounts := []mount.Mount{ { Type: "bind", Source: "/tmp/cfg-mount", Target: "/container/tmp/cfg-mount", ReadOnly: false, - BindOptions: &docker.BindOptions{ + BindOptions: &mount.BindOptions{ Propagation: "", }, }, @@ -623,24 +628,24 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { Source: "/tmp/task-mount", Target: "/container/tmp/task-mount", ReadOnly: true, - BindOptions: &docker.BindOptions{ + BindOptions: &mount.BindOptions{ Propagation: "rprivate", }, }, } if runtime.GOOS != "linux" { - expectedMounts[0].BindOptions = &docker.BindOptions{} - expectedMounts[1].BindOptions = &docker.BindOptions{} + expectedMounts[0].BindOptions = &mount.BindOptions{} + expectedMounts[1].BindOptions = &mount.BindOptions{} } - foundMounts := c.HostConfig.Mounts + foundMounts := c.Host.Mounts sort.Slice(foundMounts, func(i, j int) bool { return foundMounts[i].Target < foundMounts[j].Target }) - require.EqualValues(t, expectedMounts, foundMounts) + must.Eq(t, expectedMounts, foundMounts) - expectedDevices := []docker.Device{ + expectedDevices := []container.DeviceMapping{ { PathOnHost: "/dev/stdout", PathInContainer: "/container/dev/cfg-stdout", @@ -653,11 +658,11 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { }, } - foundDevices := c.HostConfig.Devices + foundDevices := c.Host.Devices sort.Slice(foundDevices, func(i, j int) bool { return foundDevices[i].PathInContainer < foundDevices[j].PathInContainer }) - require.EqualValues(t, expectedDevices, foundDevices) + must.Eq(t, expectedDevices, foundDevices) } // TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images. @@ -676,7 +681,7 @@ func TestDockerDriver_Cleanup(t *testing.T) { Resources: basicResources, } - require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) + must.NoError(t, task.EncodeConcreteDriverConfig(cfg)) client, driver, handle, cleanup := dockerSetup(t, task, map[string]interface{}{ "gc": map[string]interface{}{ @@ -686,24 +691,24 @@ func TestDockerDriver_Cleanup(t *testing.T) { }) defer cleanup() - require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) + must.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) // Cleanup - require.NoError(t, driver.DestroyTask(task.ID, true)) + must.NoError(t, driver.DestroyTask(task.ID, true)) // Ensure image was removed tu.WaitForResult(func() (bool, error) { - if _, err := client.InspectImage(cfg.Image); err == nil { + if _, _, err := client.ImageInspectWithRaw(context.Background(), cfg.Image); err == nil { return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image) } return true, nil }, func(err error) { - require.NoError(t, err) + must.NoError(t, err) }) // The image doesn't exist which shouldn't be an error when calling // Cleanup, so call it again to make sure. - require.NoError(t, driver.Impl().(*Driver).cleanupImage(handle)) + must.NoError(t, driver.Impl().(*Driver).cleanupImage(handle)) } // Tests that images prefixed with "https://" are supported