跳到主要内容

70.clientInterface

PR #1937

Rework client.Interface

PR #1946

There have been a few folks who have asked about machine constraints for scheduling. Let's use this issue as a place to gather ideas and requirements.

Add requirements based scheduling.


// Pod is a collection of containers, used as either input (create, update) or as output (list, get).
type Pod struct {
TypeMeta `json:",inline" yaml:",inline"`
ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`

DesiredState PodState `json:"desiredState,omitempty" yaml:"desiredState,omitempty"`
CurrentState PodState `json:"currentState,omitempty" yaml:"currentState,omitempty"`
// NodeSelector is a selector which must be true for the pod to fit on a node
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
}


func NewSelectorMatchPredicate(info NodeInfo) FitPredicate {
selector := &NodeSelector{
info: info,
}
return selector.PodSelectorMatches
}

type NodeSelector struct {
info NodeInfo
}

func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
if len(pod.NodeSelector) == 0 {
return true, nil
}
selector := labels.SelectorFromSet(pod.NodeSelector)
minion, err := n.info.GetNodeInfo(node)
if err != nil {
return false, err
}
return selector.Matches(labels.Set(minion.Labels)), nil
}
  • 这段代码主要是在Kubernetes中处理Pod调度到节点上的逻辑,其核心是NodeSelector和Pod的NodeSelector字段的使用。
  1. Pod结构定义了一个Pod对象,包括元数据(如名称,命名空间和标签等)、期望状态、当前状态以及NodeSelectorNodeSelector是一个键值对的map,表示这个Pod可以被调度到哪些节点上。
  2. NewSelectorMatchPredicate函数返回一个FitPredicate函数,该函数用于判断一个Pod是否可以被调度到某个节点上。这里使用的是NodeSelectorPodSelectorMatches方法。
  3. NodeSelector是一个结构,持有一个NodeInfo,用于获取节点的信息。
  4. PodSelectorMatches方法接收一个Pod对象,一个现有的Pod列表,和一个节点的名称。首先,如果Pod的NodeSelector是空的,那么这个Pod可以被调度到任何节点上,所以返回true。然后,它会从Pod的NodeSelector中创建一个标签选择器,这个选择器可以用来判断节点的标签是否满足Pod的NodeSelector。之后,获取节点的信息,并且检查节点的标签是否满足选择器,如果满足则返回true,否则返回false

PR #1716

docker: add cluster bootstrap and doc

Getting started locally with docker

This method runs a local kubernetes cluster self hosted in Docker itself. The Kubelet is started in a container with access to the Docker API. It then launches a pod of containers that comprise the rest of a local-only kubernetes cluster.

Pre-requisites
With boot2docker
boot2docker up
$(boot2docker shellinit)
export DOCKER_HOST_IP=$(boot2docker ip 2>/dev/null)
export KUBERNETES_MASTER=$DOCKER_HOST_IP:8080
With local docker daemon
export DOCKER_HOST_IP=127.0.0.1
export KUBERNETES_MASTER=$DOCKER_HOST_IP:8080
Build the kubernetes docker images
./build/make-run-images.sh 
Bootstrap the cluster
docker run -v /var/run/docker.sock:/var/run/docker.sock kubernetes-bootstrap
Build kubernetes clean
./build/make-client.sh
# set $host_os and $host_arch to your local host os and architecture.
export PATH=$(readlink -f _output/build/$host_os/$host_arch):$PATH
Manage your pods
kubecfg list /pods
kubecfg -p 8181:80 run nginx 1 kube-nginx
kubecfg list /pods
curl $DOCKER_HOST_IP:8181

PR #194

Add support for git volumes.

type execInterface interface {
ExecCommand(cmd []string, dir string) ([]byte, error)
}

type GitDir struct {
Source string
Revision string
PodID string
RootDir string
Name string
exec exec.Interface
}

func newGitRepo(volume *api.Volume, podID, rootDir string) *GitDir {
return &GitDir{
Source: volume.Source.GitRepo.Repository,
Revision: volume.Source.GitRepo.Revision,
PodID: podID,
RootDir: rootDir,
Name: volume.Name,
exec: exec.New(),
}
}

func (g *GitDir) ExecCommand(command string, args []string, dir string) ([]byte, error) {
cmd := g.exec.Command(command, args...)
cmd.SetDir(dir)
return cmd.CombinedOutput()
}

func (g *GitDir) SetUp() error {
volumePath := g.GetPath()
if err := os.MkdirAll(volumePath, 0750); err != nil {
return err
}
if _, err := g.ExecCommand("git", []string{"clone", g.Source}, g.GetPath()); err != nil {
return err
}
files, err := ioutil.ReadDir(g.GetPath())
if err != nil {
return err
}
if len(g.Revision) == 0 {
return nil
}

if len(files) != 1 {
return fmt.Errorf("Unexpected directory contents: %v", files)
}
dir := path.Join(g.GetPath(), files[0].Name())
if _, err := g.ExecCommand("git", []string{"checkout", g.Revision}, dir); err != nil {
return err
}
if _, err := g.ExecCommand("git", []string{"reset", "--hard"}, dir); err != nil {
return err
}
return nil
}

func (g *GitDir) GetPath() string {
return path.Join(g.RootDir, g.PodID, "volumes", "git", g.Name)
}

// TearDown simply deletes everything in the directory.
func (g *GitDir) TearDown() error {
tmpDir, err := renameDirectory(g.GetPath(), g.Name+"~deleting")
if err != nil {
return err
}
err = os.RemoveAll(tmpDir)
if err != nil {
return err
}
return nil
}

Kubernetes支持各种各样的数据卷类型,其中包括gitRepo类型。gitRepo类型的卷允许你将Git仓库自动装载到Pod中的一个目录。当Pod启动时,Kubernetes将会执行git clone来拉取仓库代码到对应的目录。

在 Kubernetes 1.9 之后, gitRepo 已经被废弃。对于拉取代码,推荐使用 initContainers 来达到类似的效果。

PR#1782

Allow clients to determine the difference between create or update on PUT

// MakeAsync takes a function and executes it, delivering the result in the way required
// by RESTStorage's Update, Delete, and Create methods.
func MakeAsync(fn WorkFunc) <-chan RESTResult {
channel := make(chan RESTResult)
go func() {
defer util.HandleCrash()
obj, err := fn()
if err != nil {
channel <- RESTResult{Object: errToAPIStatus(err)}
} else {
channel <- RESTResult{Object: obj}
}
// 'close' is used to signal that no further values will
// be written to the channel. Not strictly necessary, but
// also won't hurt.
close(channel)
}()
return channel
}