Skip to content

Commit e49e4d1

Browse files
authored
Merge pull request #56 from sp-yduck/feature/multi-node
Support multi node proxmox (use websocket instead of ssh)
2 parents 1d70f39 + d77607b commit e49e4d1

File tree

20 files changed

+135
-275
lines changed

20 files changed

+135
-275
lines changed

README.md

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,15 +24,10 @@ clusterctl init --infrastructure=proxmox:v0.2.3 --config https://raw.githubuserc
2424
2. Create your first workload cluster
2525
```sh
2626
# export env variables
27-
export CONTROLPLANE_HOST=X.X.X.X # control-plane vip
27+
export CONTROLPLANE_HOST=X.X.X.X # control-plane vip
2828
export PROXMOX_URL=https://X.X.X.X:8006/api2/json
29-
# export PROXMOX_PASSWORD=password # (optional)
30-
# export PROXMOX_USER=user@pam # (optional)
31-
export PROXMOX_TOKENID='root@pam!api-token-id' # (optional)
32-
export PROXMOX_SECRET=aaaaaaaa-bbbb-cccc-dddd-ee12345678 # (optional)
33-
export NODE_URL=node.ssh.url:22
34-
export NODE_USER=node-ssh-user
35-
export NODE_PASSWORD=node-ssh-password
29+
export PROXMOX_PASSWORD=password
30+
export PROXMOX_USER=user@pam
3631

3732
# generate manifests (available flags: --target-namespace, --kubernetes-version, --control-plane-machine-count, --worker-machine-count)
3833
clusterctl generate cluster cappx-test --control-plane-machine-count=3 --infrastructure=proxmox:v0.2.3 --config https://raw.githubusercontent.com/sp-yduck/cluster-api-provider-proxmox/main/clusterctl.yaml > cappx-test.yaml
@@ -62,9 +57,11 @@ kubectl delete cluster cappx-test
6257

6358
## Fetures
6459

65-
- No need to prepare vm templates. You can specify any vm image in `ProxmoxMachine.Spec.Image`.
60+
- No need to prepare vm templates. You can specify any vm image in `ProxmoxMachine.Spec.Image`. CAPPX bootstrap your vm from scratch.
6661

67-
- Supports custom cloud-config (user data). CAPPX uses ssh for bootstrapping nodes so it can applies custom cloud-config that can not be achieved by only Proxmox API.
62+
- Supports qcow2 image format. CAPPX uses VNC websocket for downloading/installing node images so it can support raw image format not ISO (Proxmox API can only support ISO)
63+
64+
- Supports custom cloud-config (user data). CAPPX uses VNC websockert for bootstrapping nodes so it can applies custom cloud-config that can not be achieved by only Proxmox API.
6865

6966
### Node Images
7067

@@ -99,7 +96,7 @@ This project aims to follow the Cluster API [Provider contract](https://cluster-
9996

10097
### ProxmoxCluster
10198

102-
Because Proxmox-VE does not provide LBaaS solution, CAPPX does not follow the [typical infra-cluster logic](https://cluster-api.sigs.k8s.io/developer/providers/cluster-infrastructure.html#behavior). ProxmoxCluster controller reconciles only Proxmox storages used for instances. You need to prepare control plane load balancer by yourself if you creates HA control plane workload cluster.
99+
Because Proxmox-VE does not provide LBaaS solution, CAPPX does not follow the [typical infra-cluster logic](https://cluster-api.sigs.k8s.io/developer/providers/cluster-infrastructure.html#behavior). ProxmoxCluster controller reconciles only Proxmox storages used for instances. You need to prepare control plane load balancer by yourself if you creates HA control plane workload cluster. In the [cluster-template.yaml](./templates/cluster-template.yaml), you can find HA control plane example with [kube-vip](https://github.com/kube-vip/kube-vip).
103100

104101
### ProxmoxMachine
105102

api/v1beta1/proxmoxcluster_types.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,6 @@ type ProxmoxClusterSpec struct {
3838
// ServerRef is used for configuring Proxmox client
3939
ServerRef ServerRef `json:"serverRef"`
4040

41-
// NodesRef contains reference of nodes used for ProxmoxCluster
42-
NodeRefs []NodeRef `json:"nodeRefs,omitempty"`
43-
4441
// storage is for proxmox storage used by vm instances
4542
// +optional
4643
Storage Storage `json:"storage"`

api/v1beta1/zz_generated.deepcopy.go

Lines changed: 0 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

cloud/cloudinit/user.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ func GenerateUserYaml(config infrav1.User) (string, error) {
2525
return fmt.Sprintf("#cloud-config\n%s", string(b)), nil
2626
}
2727

28-
func MergeUsers(a, b infrav1.User) (*infrav1.User, error) {
29-
if err := mergo.Merge(&a, b, mergo.WithAppendSlice); err != nil {
28+
func MergeUsers(a, b *infrav1.User) (*infrav1.User, error) {
29+
if err := mergo.Merge(a, b, mergo.WithAppendSlice); err != nil {
3030
return nil, err
3131
}
32-
return &a, nil
32+
return a, nil
3333
}

cloud/cloudinit/user_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ func TestMergeUsers(t *testing.T) {
6767
User: "override-user",
6868
RunCmd: []string{"command A", "command B", "command C"},
6969
}
70-
c, err := cloudinit.MergeUsers(a, b)
70+
c, err := cloudinit.MergeUsers(&a, &b)
7171
if err != nil {
7272
t.Errorf("failed to merge cloud init user data: %v", err)
7373
}

cloud/interfaces.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import (
77
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
88

99
infrav1 "github.com/sp-yduck/cluster-api-provider-proxmox/api/v1beta1"
10-
"github.com/sp-yduck/cluster-api-provider-proxmox/cloud/scope"
1110
)
1211

1312
type Reconciler interface {
@@ -17,7 +16,6 @@ type Reconciler interface {
1716

1817
type Client interface {
1918
CloudClient() *proxmox.Service
20-
RemoteClient() *scope.SSHClient
2119
}
2220

2321
type Cluster interface {

cloud/scope/clients.go

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ import (
2929

3030
type ProxmoxServices struct {
3131
Compute *proxmox.Service
32-
Remote *SSHClient
3332
}
3433

3534
func newComputeService(ctx context.Context, serverRef infrav1.ServerRef, crClient client.Client) (*proxmox.Service, error) {
@@ -53,30 +52,3 @@ func newComputeService(ctx context.Context, serverRef infrav1.ServerRef, crClien
5352

5453
return proxmox.NewService(serverRef.Endpoint, authConfig, true)
5554
}
56-
57-
func newRemoteClient(ctx context.Context, secretRef *infrav1.ObjectReference, crClient client.Client) (*SSHClient, error) {
58-
if secretRef == nil {
59-
return nil, errors.New("failed to get proxmox client form nil secretRef")
60-
}
61-
62-
var secret corev1.Secret
63-
key := client.ObjectKey{Namespace: secretRef.Namespace, Name: secretRef.Name}
64-
if err := crClient.Get(ctx, key, &secret); err != nil {
65-
return nil, err
66-
}
67-
68-
nodeurl, ok := secret.Data["NODE_URL"]
69-
if !ok {
70-
return nil, errors.Errorf("failed to fetch NODE_URL from Secret : %v", key)
71-
}
72-
nodeuser, ok := secret.Data["NODE_USER"]
73-
if !ok {
74-
return nil, errors.Errorf("failed to fetch PROXMOX_USER from Secret : %v", key)
75-
}
76-
nodepassword, ok := secret.Data["NODE_PASSWORD"]
77-
if !ok {
78-
return nil, errors.Errorf("failed to fetch PROXMOX_PASSWORD from Secret : %v", key)
79-
}
80-
81-
return NewSSHClient(string(nodeurl), string(nodeuser), string(nodepassword))
82-
}

cloud/scope/cluster.go

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,6 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc
5252
params.ProxmoxServices.Compute = computeSvc
5353
}
5454

55-
if params.ProxmoxServices.Remote == nil {
56-
// current CAPPX is compatible with only single node proxmox cluster
57-
remote, err := newRemoteClient(ctx, params.ProxmoxCluster.Spec.NodeRefs[0].SecretRef, params.Client)
58-
if err != nil {
59-
return nil, errors.Errorf("failed to create remote client: %v", err)
60-
}
61-
params.ProxmoxServices.Remote = remote
62-
}
63-
6455
helper, err := patch.NewHelper(params.ProxmoxCluster, params.Client)
6556
if err != nil {
6657
return nil, errors.Wrap(err, "failed to init patch helper")
@@ -79,11 +70,6 @@ func populateNamespace(proxmoxCluster *infrav1.ProxmoxCluster) {
7970
if proxmoxCluster.Spec.ServerRef.SecretRef.Namespace == "" {
8071
proxmoxCluster.Spec.ServerRef.SecretRef.Namespace = proxmoxCluster.Namespace
8172
}
82-
for i, nodeRef := range proxmoxCluster.Spec.NodeRefs {
83-
if nodeRef.SecretRef.Namespace == "" {
84-
proxmoxCluster.Spec.NodeRefs[i].SecretRef.Namespace = proxmoxCluster.Namespace
85-
}
86-
}
8773
}
8874

8975
type ClusterScope struct {
@@ -114,10 +100,6 @@ func (s *ClusterScope) CloudClient() *proxmox.Service {
114100
return s.ProxmoxServices.Compute
115101
}
116102

117-
func (s *ClusterScope) RemoteClient() *SSHClient {
118-
return s.ProxmoxServices.Remote
119-
}
120-
121103
func (s *ClusterScope) Close() error {
122104
return s.PatchObject()
123105
}

cloud/scope/machine.go

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,6 @@ func (m *MachineScope) CloudClient() *proxmox.Service {
8282
return m.ClusterGetter.CloudClient()
8383
}
8484

85-
func (m *MachineScope) RemoteClient() *SSHClient {
86-
return m.ClusterGetter.Remote
87-
}
88-
8985
func (m *MachineScope) GetStorage() infrav1.Storage {
9086
return m.ClusterGetter.ProxmoxCluster.Spec.Storage
9187
}

cloud/scope/remote.go

Lines changed: 0 additions & 71 deletions
This file was deleted.

0 commit comments

Comments
 (0)