docker笔记(18)——如何创建新的image

(1)通过当前运行的container创建image

# docker commit 281e7d886185 <docker image name>

(2)压缩image

# docker save <docker image name> | gzip > <docker image name>.tar.gz

(3)加载image

# zcat <docker image name>.tar.gz | docker load

参考资料:
HOW TO CREATE A DOCKER IMAGE FROM A CONTAINER
How to copy Docker images from one host to another without using a repository

 

docker笔记(17)——为image,container和Docker daemon加label

可以通过为imagecontainerDocker daemonlabel的方式(key=value格式)来存储metadata:比如licensevendor等等:

(1)为imagelabel,在Dockerfile中使用LABEL指令(尽量把所有的label放在1LABEL指令中,因为每一个LABEL指令都会为image增加一层layer):

LABEL [<namespace>.]<key>=<value> ...

(2)为containerlabel

docker run \
   -d \
   --label com.example.group="webservers" \
   --label com.example.environment="production" \
   busybox \
   top

(3)为Docker daemonlabel

docker daemon \
  --dns 8.8.8.8 \
  --dns 8.8.4.4 \
  -H unix:///var/run/docker.sock \
  --label com.example.environment="production" \
  --label com.example.storage="ssd"

参考资料:
Apply custom metadata

 

docker笔记(16)——为container指定CPU资源

Docker run命令的--cpuset-cpus选项,指定container运行在特定的CPU core上。举例如下:

# docker run -ti --rm --cpuset-cpus=1,6 redis

另外还有一个--cpu-shares选项,它是一个相对权重(relative weight),其默认值是1024。即如果两个运行的container--cpu-shares值都是1024的话,则占用CPU资源的比例就相等。

docker笔记(15)——docker swarm功能代码分析(2)

Docker daemon初始化响应docker client swarm相关命令的处理函数位于api/server/router/swarm/cluster.go

// buildRouter is a router to talk with the build controller
type swarmRouter struct {
    backend Backend
    routes  []router.Route
}

// NewRouter initializes a new build router
func NewRouter(b Backend) router.Router {
    r := &swarmRouter{
        backend: b,
    }
    r.initRoutes()
    return r
}

// Routes returns the available routers to the swarm controller
func (sr *swarmRouter) Routes() []router.Route {
    return sr.routes
}

func (sr *swarmRouter) initRoutes() {
    sr.routes = []router.Route{
        router.NewPostRoute("/swarm/init", sr.initCluster),
        router.NewPostRoute("/swarm/join", sr.joinCluster),
        router.NewPostRoute("/swarm/leave", sr.leaveCluster),
        router.NewGetRoute("/swarm", sr.inspectCluster),
        router.NewPostRoute("/swarm/update", sr.updateCluster),
        router.NewGetRoute("/services", sr.getServices),
        router.NewGetRoute("/services/{id:.*}", sr.getService),
        router.NewPostRoute("/services/create", sr.createService),
        router.NewPostRoute("/services/{id:.*}/update", sr.updateService),
        router.NewDeleteRoute("/services/{id:.*}", sr.removeService),
        router.NewGetRoute("/nodes", sr.getNodes),
        router.NewGetRoute("/nodes/{id:.*}", sr.getNode),
        router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode),
        router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode),
        router.NewGetRoute("/tasks", sr.getTasks),
        router.NewGetRoute("/tasks/{id:.*}", sr.getTask),
    }
}

以处理“/swarm/init”请求为例,实际的处理函数位于daemon/cluster/cluster.go

// Init initializes new cluster from user provided request.
func (c *Cluster) Init(req types.InitRequest) (string, error) {
    c.Lock()
    if node := c.node; node != nil {
        if !req.ForceNewCluster {
            c.Unlock()
            return "", ErrSwarmExists
        }
        if err := c.stopNode(); err != nil {
            c.Unlock()
            return "", err
        }
    }

    if err := validateAndSanitizeInitRequest(&req); err != nil {
        c.Unlock()
        return "", err
    }

    listenHost, listenPort, err := resolveListenAddr(req.ListenAddr)
    if err != nil {
        c.Unlock()
        return "", err
    }

    advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort)
    if err != nil {
        c.Unlock()
        return "", err
    }

    localAddr := listenHost

    // If the advertise address is not one of the system's
    // addresses, we also require a listen address.
    listenAddrIP := net.ParseIP(listenHost)
    if listenAddrIP != nil && listenAddrIP.IsUnspecified() {
        advertiseIP := net.ParseIP(advertiseHost)
        if advertiseIP == nil {
            // not an IP
            c.Unlock()
            return "", errMustSpecifyListenAddr
        }

        systemIPs := listSystemIPs()

        found := false
        for _, systemIP := range systemIPs {
            if systemIP.Equal(advertiseIP) {
                found = true
                break
            }
        }
        if !found {
            c.Unlock()
            return "", errMustSpecifyListenAddr
        }
        localAddr = advertiseIP.String()
    }

    // todo: check current state existing
    n, err := c.startNewNode(req.ForceNewCluster, localAddr, "", net.JoinHostPort(listenHost, listenPort), net.JoinHostPort(advertiseHost, advertisePort), "", "")
    if err != nil {
        c.Unlock()
        return "", err
    }
    c.Unlock()

    select {
    case <-n.Ready():
        if err := initClusterSpec(n, req.Spec); err != nil {
            return "", err
        }
        go c.reconnectOnFailure(n)
        return n.NodeID(), nil
    case <-n.done:
        c.RLock()
        defer c.RUnlock()
        if !req.ForceNewCluster { // if failure on first attempt don't keep state
            if err := c.clearState(); err != nil {
                return "", err
            }
        }
        return "", c.err
    }
}

它的核心是c.startNewNode函数,其主要逻辑如下:

......
n, err := swarmagent.NewNode(&swarmagent.NodeConfig{
    Hostname:           c.config.Name,
    ForceNewCluster:    forceNewCluster,
    ListenControlAPI:   filepath.Join(c.root, controlSocket),
    ListenRemoteAPI:    listenAddr,
    AdvertiseRemoteAPI: advertiseAddr,
    JoinAddr:           joinAddr,
    StateDir:           c.root,
    JoinToken:          joinToken,
    Executor:           container.NewExecutor(c.config.Backend),
    HeartbeatTick:      1,
    ElectionTick:       3,
})
if err != nil {
    return nil, err
}
ctx := context.Background()
if err := n.Start(ctx); err != nil {
    return nil, err
}
......

即调用swarmkitNewNode创建一个node,接下来便start这个节点。这个nodemanager角色。

docker笔记(14)——docker swarm功能代码分析(1)

Docker 1.12集成了docker swarm功能,其client的相关代码位于api/client/swarm文件夹下。以docker swarm init命令的代码(api/client/swarm/init.go)为例:

const (
    generatedSecretEntropyBytes = 16
    generatedSecretBase         = 36
    // floor(log(2^128-1, 36)) + 1
    maxGeneratedSecretLength = 25
)

type initOptions struct {
    swarmOptions
    listenAddr NodeAddrOption
    // Not a NodeAddrOption because it has no default port.
    advertiseAddr   string
    forceNewCluster bool
}

func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
    opts := initOptions{
        listenAddr: NewListenAddrOption(),
    }

    cmd := &cobra.Command{
        Use:   "init [OPTIONS]",
        Short: "Initialize a swarm",
        Args:  cli.NoArgs,
        RunE: func(cmd *cobra.Command, args []string) error {
            return runInit(dockerCli, cmd.Flags(), opts)
        },
    }

    flags := cmd.Flags()
    flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
    flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
    flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.")
    addSwarmFlags(flags, &opts.swarmOptions)
    return cmd
}

func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions) error {
    client := dockerCli.Client()
    ctx := context.Background()

    req := swarm.InitRequest{
        ListenAddr:      opts.listenAddr.String(),
        AdvertiseAddr:   opts.advertiseAddr,
        ForceNewCluster: opts.forceNewCluster,
        Spec:            opts.swarmOptions.ToSpec(),
    }

    nodeID, err := client.SwarmInit(ctx, req)
    if err != nil {
        if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") {
            return errors.New(err.Error() + " - specify one with --advertise-addr")
        }
        return err
    }

    fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID)

    if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil {
        return err
    }

    fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n")
    return nil
}

其中client.DockerCli代表docker command line client

// DockerCli represents the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
    // initializing closure
    init func() error

    // configFile has the client configuration file
    configFile *configfile.ConfigFile
    // in holds the input stream and closer (io.ReadCloser) for the client.
    in io.ReadCloser
    // out holds the output stream (io.Writer) for the client.
    out io.Writer
    // err holds the error stream (io.Writer) for the client.
    err io.Writer
    // keyFile holds the key file as a string.
    keyFile string
    // inFd holds the file descriptor of the client's STDIN (if valid).
    inFd uintptr
    // outFd holds file descriptor of the client's STDOUT (if valid).
    outFd uintptr
    // isTerminalIn indicates whether the client's STDIN is a TTY
    isTerminalIn bool
    // isTerminalOut indicates whether the client's STDOUT is a TTY
    isTerminalOut bool
    // client is the http client that performs all API operations
    client client.APIClient
    // state holds the terminal input state
    inState *term.State
    // outState holds the terminal output state
    outState *term.State
}

其中的client成员便是engine-api/client,所以上述client.SwarmInit的代码位于engine-api/client/swarm_init.go

// SwarmInit initializes the Swarm.
func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
    serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
    if err != nil {
        return "", err
    }

    var response string
    err = json.NewDecoder(serverResp.body).Decode(&response)
    ensureReaderClosed(serverResp)
    return response, err
}

docker笔记(13)——nvidia-docker简介

因为GPU属于特定的厂商产品,需要特定的driverDocker本身并不支持GPU。以前如果要在Docker中使用GPU,就需要在container中安装主机上使用GPUdriver,然后把主机上的GPU设备(例如:/dev/nvidia0)映射到container中。所以这样的Docker image并不具备可移植性。

Nvidia-docker项目就是为了解决这个问题,它让Docker image不需要知道底层GPU的相关信息,而是通过启动containermount设备和驱动文件来实现的。

从源码编译安装nvidia-docker(如果需要设置代理,请参考这个issue):

# go get -d github.com/NVIDIA/nvidia-docker
# cd $GOPATH/src/github.com/NVIDIA/nvidia-docker
# make install

其实从nvidia-dockermain函数来看:

func main() {
    args := os.Args[1:]
    defer exit()

    assert(LoadEnvironment())

    command, off, err := docker.ParseArgs(args)
    assert(err)

    if command != "create" && command != "run" {
        if command == "version" {
            fmt.Printf("NVIDIA Docker: %s\n\n", Version)
        }
        assert(docker.Docker(args...))
    }

    opt, i, err := docker.ParseArgs(args[off+1:], command)
    assert(err)
    off += i + 1

    if (command == "create" || command == "run") && opt != "" {
        vols, err := VolumesNeeded(opt)
        assert(err)

        if vols != nil {
            var nargs []string
            var err error

            if Host != nil {
                nargs, err = GenerateRemoteArgs(opt, vols)
            } else {
                assert(nvidia.LoadUVM())
                assert(nvidia.Init())
                nargs, err = GenerateLocalArgs(opt, vols)
                nvidia.Shutdown()
            }
            assert(err)
            args = append(args[:off], append(nargs, args[off:]...)...)
        }
    }

    assert(docker.Docker(args...))
}

除了createrun命令以外,其它的命令还是由本机的docker来处理。

此外,nvidia-docker还提供了使用plug-in模式(参考Internals):

$ curl -s http://localhost:3476/docker/cli --device=/dev/nvidiactl --device=/dev/nvidia-uvm --device=/dev/nvidia3 --device=/dev/nvidia2 --device=/dev/nvidia1 --device=/dev/nvidia0 --volume-driver=nvidia-docker --volume=nvidia_driver_361.48:/usr/local/nvidia:ro
$ docker run -ti --rm `curl -s http://localhost:3476/docker/cli` nvidia/cuda nvidia-smi

这种方式则无需使用nvidia-docker,而可以直接使用docker。然而这种方式不会检查imagenvidia driver是否兼容。

还有一种方式是使用Nvidia提供的用Go实现的package

参考资料:
Why NVIDIA Docker

docker笔记(12)——docker 1.12集成docker swarm功能

docker 1.12集成了docker swarm功能。根据Docker Swarm Is Dead. Long Live Docker Swarm.这篇文章,对比docker swarmdocker 1.12有以下优点:
(1)

With swarm mode you create a swarm with the ‘init’ command, and add workers to the cluster with the ‘join’ command. The commands to create and join a swarm literally take a second or two to complete. Mouat said “Comparing getting a Kubernetes or Mesos cluster running, Docker Swarm is a snap”.

Communication between nodes on the swarm is all secured with Transport Layer Security (TLS). For simple setups, Docker 1.12 generates self-signed certificates to use when you create the swarm, or you can provide certificates from your own certificate authority. Those certificates are only used internally by the nodes; any services you publicly expose use your own certs as usual.

docker 1.12实现的swarm模式更简单,并且node之间使用TLS机制进行通信。

(2)

The self-awareness of the swarm is the biggest and most significant change. Every node in the swarm can reach every other node, and is able to route traffic where it needs to go. You no longer need to run your own load balancer and integrate it with a dynamic discovery agent, using tools like Nginx and Interlock.

Now if a node receives a request which it can’t fulfil, because it isn’t running an instance of the container that can process the request, it routes the request on to a node which can fulfil it. This is transparent to the consumer, all they see is the response to their request, they don’t know about any redirections that happened within the swarm.

docker 1.12swarm模式自带“self-awareness”和“load-balance”机制,并且可以把请求路由到符合要求的node

docker 1.12swarm模式相关的文件默认存放在/var/lib/docker/swarm这个文件夹下面。

关于docker 1.12swarm模式的demo,可参考这个video

Update:docker 1.12其实是利用swarmkit这个project来实现docker swarm cluster功能(相关代码位于daemon/cluster这个目录)。

参考资料:
The relation between “docker/swarm” and “docker/swarmkit”
Comparing Swarm, Swarmkit and Swarm Mode
Docker 1.12 Swarm Mode – Under the hood

docker笔记(11)——一些有用的清除命令

以下命令参考自这篇文章

(1)清除已经终止的container

docker rm -v $(docker ps --filter status=exited -q)

(2)清除已经没用的volume

docker volume rm $(docker volume ls -q -f 'dangling=true')

(3)清除已经没用的image

docker rmi $(docker images -f "dangling=true" -q) 

(4)清除所有的container(包括正在运行的和已经退出的):

docker rm -f $(docker ps -a | awk 'NR > 1 {print $1}')

docker笔记(10)—— “EXPOSE”,“–expose=[]”,“-P”和“-p=[]”

A Brief Primer on Docker Networking Rules: EXPOSE, -p, -P, –link对“EXPOSE”,“--expose=[]”,“-P”和“-p=[]”做了详细介绍:

 

Capture

EXPOSE”和“--expose=[]”会expose container中的端口,但是不会映射到host上的端口,因此这些端口只能在这个host上访问。“-P”把所有expose出来的端口映射到host上的端口,而“-p=[]”则会动态指定containerhost之间的端口映射。

其它参考资料:
Why does a Docker container running a server expose port to the outside world even though said port is blocked by iptables?
Exposing a container port on host

docker笔记(9)—— 通过systemd管理docker

包括RHEL在内的很多Linux操作系统通过systemd管理docker。例如启动和停止docker daemon

# systemctl start docker
# systemctl stop docker

另外,可以使用systemctl status docker检查目前docker daemon的运行状态:

# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/lib/systemd/system/docker.service; enabled)
  Drop-In: /etc/systemd/system/docker.service.d
           └─http-proxy.conf
   Active: active (running) since Mon 2016-03-28 23:04:48 EDT; 21min ago
     Docs: https://docs.docker.com
 Main PID: 64991 (docker)
   CGroup: /system.slice/docker.service
           └─64991 /usr/bin/docker daemon -D -H fd://

Mar 28 23:08:51 lxc-dl980-g7-1-hLinux docker[64991]: time="2016-03-28T23:08:51.685679667-04:00" level=debug msg="devmapper: Delete...START"
Mar 28 23:08:51 lxc-dl980-g7-1-hLinux docker[64991]: time="2016-03-28T23:08:51.688765554-04:00" level=debug msg="devmapper: issueD...START"
Mar 28 23:08:51 lxc-dl980-g7-1-hLinux docker[64991]: time="2016-03-28T23:08:51.689292872-04:00" level=debug msg="devmapper: activa...c9f6)"
Mar 28 23:08:53 lxc-dl980-g7-1-hLinux docker[64991]: time="2016-03-28T23:08:53.050572512-04:00" level=debug msg="devmapper: issueD.... END"
.....

配置文件是/lib/systemd/system/docker.service,修改这个文件后要记得使用systemctl daemon-reload命令重新加载一下。

systemctl show docker命令显示docker的各种配置信息:

~# systemctl show docker
Type=notify
Restart=no
NotifyAccess=main
RestartUSec=100ms
TimeoutStartUSec=0
TimeoutStopUSec=1min 30s
WatchdogUSec=0
WatchdogTimestampMonotonic=0
StartLimitInterval=10000000
StartLimitBurst=5
StartLimitAction=none
FailureAction=none
PermissionsStartOnly=no
......

关于如何使用journalctl查看log,可以参考https://www.loggly.com/ultimate-guide/using-journalctl/

绑定service和运行的CPUhttps://www.golinuxhub.com/2018/02/how-to-assign-service-to-specific-core/ 。

参考资料:
Control and configure Docker with systemd