From fd8244699b83e8cae54d155cd54437b49d5bccee Mon Sep 17 00:00:00 2001 From: Vitaliy Filippov Date: Sun, 16 May 2021 01:15:43 +0300 Subject: [PATCH] Implement basic CSI driver Currently can create and remove volumes, but resizing and snapshots is not supported yet --- csi/.dockerignore | 3 + csi/Dockerfile | 32 ++ csi/Makefile | 9 + csi/deploy/000-csi-namespace.yaml | 5 + csi/deploy/001-csi-config-map.yaml | 9 + csi/deploy/002-csi-nodeplugin-rbac.yaml | 37 ++ csi/deploy/003-csi-nodeplugin-psp.yaml | 72 +++ csi/deploy/004-csi-nodeplugin.yaml | 140 ++++++ csi/deploy/005-csi-provisioner-rbac.yaml | 102 +++++ csi/deploy/006-csi-provisioner-psp.yaml | 60 +++ csi/deploy/007-csi-provisioner.yaml | 159 +++++++ csi/deploy/008-csi-driver.yaml | 11 + csi/deploy/example-pvc.yaml | 12 + csi/deploy/storage-class.yaml | 19 + csi/go.mod | 35 ++ csi/src/config.go | 22 + csi/src/controllerserver.go | 530 +++++++++++++++++++++++ csi/src/grpc.go | 137 ++++++ csi/src/identityserver.go | 60 +++ csi/src/nodeserver.go | 279 ++++++++++++ csi/src/server.go | 36 ++ csi/vitastor-csi.go | 39 ++ 22 files changed, 1808 insertions(+) create mode 100644 csi/.dockerignore create mode 100644 csi/Dockerfile create mode 100644 csi/Makefile create mode 100644 csi/deploy/000-csi-namespace.yaml create mode 100644 csi/deploy/001-csi-config-map.yaml create mode 100644 csi/deploy/002-csi-nodeplugin-rbac.yaml create mode 100644 csi/deploy/003-csi-nodeplugin-psp.yaml create mode 100644 csi/deploy/004-csi-nodeplugin.yaml create mode 100644 csi/deploy/005-csi-provisioner-rbac.yaml create mode 100644 csi/deploy/006-csi-provisioner-psp.yaml create mode 100644 csi/deploy/007-csi-provisioner.yaml create mode 100644 csi/deploy/008-csi-driver.yaml create mode 100644 csi/deploy/example-pvc.yaml create mode 100644 csi/deploy/storage-class.yaml create mode 100644 csi/go.mod create mode 100644 csi/src/config.go create mode 100644 csi/src/controllerserver.go create mode 100644 csi/src/grpc.go create mode 100644 csi/src/identityserver.go create mode 100644 csi/src/nodeserver.go create mode 100644 csi/src/server.go create mode 100644 csi/vitastor-csi.go diff --git a/csi/.dockerignore b/csi/.dockerignore new file mode 100644 index 000000000..2d1d3baba --- /dev/null +++ b/csi/.dockerignore @@ -0,0 +1,3 @@ +vitastor-csi +go.sum +Dockerfile diff --git a/csi/Dockerfile b/csi/Dockerfile new file mode 100644 index 000000000..eea29631c --- /dev/null +++ b/csi/Dockerfile @@ -0,0 +1,32 @@ +# Compile stage +FROM golang:buster AS build + +ADD go.mod /app/ +RUN cd /app; CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go mod download -x +ADD . /app +RUN perl -i -e '$/ = undef; while(<>) { s/\n\s*(\{\s*\n)/$1\n/g; s/\}(\s*\n\s*)else\b/$1} else/g; print; }' `find /app -name '*.go'` +RUN cd /app; CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o vitastor-csi + +# Final stage +FROM debian:buster + +LABEL maintainers="Vitaliy Filippov " +LABEL description="Vitastor CSI Driver" + +ENV NODE_ID="" +ENV CSI_ENDPOINT="" + +RUN apt-get update && \ + apt-get install -y wget && \ + wget -q -O /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg && \ + (echo deb http://vitastor.io/debian buster main > /etc/apt/sources.list.d/vitastor.list) && \ + (echo deb http://deb.debian.org/debian buster-backports main > /etc/apt/sources.list.d/backports.list) && \ + (echo "APT::Install-Recommends false;" > /etc/apt/apt.conf) && \ + apt-get update && \ + apt-get install -y e2fsprogs xfsprogs vitastor kmod && \ + apt-get clean && \ + (echo options nbd nbds_max=128 > /etc/modprobe.d/nbd.conf) + +COPY --from=build /app/vitastor-csi /bin/ + +ENTRYPOINT ["/bin/vitastor-csi"] diff --git a/csi/Makefile b/csi/Makefile new file mode 100644 index 000000000..726f71c49 --- /dev/null +++ b/csi/Makefile @@ -0,0 +1,9 @@ +VERSION ?= v0.6.3 + +all: build push + +build: + @docker build --rm -t vitalif/vitastor-csi:$(VERSION) . + +push: + @docker push vitalif/vitastor-csi:$(VERSION) diff --git a/csi/deploy/000-csi-namespace.yaml b/csi/deploy/000-csi-namespace.yaml new file mode 100644 index 000000000..786b65532 --- /dev/null +++ b/csi/deploy/000-csi-namespace.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: vitastor-system diff --git a/csi/deploy/001-csi-config-map.yaml b/csi/deploy/001-csi-config-map.yaml new file mode 100644 index 000000000..7a39523b5 --- /dev/null +++ b/csi/deploy/001-csi-config-map.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ConfigMap +data: + vitastor.conf: |- + {"etcd_address":"http://192.168.7.2:2379","etcd_prefix":"/vitastor"} +metadata: + namespace: vitastor-system + name: vitastor-config diff --git a/csi/deploy/002-csi-nodeplugin-rbac.yaml b/csi/deploy/002-csi-nodeplugin-rbac.yaml new file mode 100644 index 000000000..de5bd721d --- /dev/null +++ b/csi/deploy/002-csi-nodeplugin-rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow to read Vault Token and connection options from the Tenants namespace + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: vitastor-csi-nodeplugin + namespace: vitastor-system +roleRef: + kind: ClusterRole + name: vitastor-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io diff --git a/csi/deploy/003-csi-nodeplugin-psp.yaml b/csi/deploy/003-csi-nodeplugin-psp.yaml new file mode 100644 index 000000000..b0d601974 --- /dev/null +++ b/csi/deploy/003-csi-nodeplugin-psp.yaml @@ -0,0 +1,72 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin-psp +spec: + allowPrivilegeEscalation: true + allowedCapabilities: + - 'SYS_ADMIN' + fsGroup: + rule: RunAsAny + privileged: true + hostNetwork: true + hostPID: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'hostPath' + allowedHostPaths: + - pathPrefix: '/dev' + readOnly: false + - pathPrefix: '/run/mount' + readOnly: false + - pathPrefix: '/sys' + readOnly: false + - pathPrefix: '/lib/modules' + readOnly: true + - pathPrefix: '/var/lib/kubelet/pods' + readOnly: false + - pathPrefix: '/var/lib/kubelet/plugins/csi.vitastor.io' + readOnly: false + - pathPrefix: '/var/lib/kubelet/plugins_registry' + readOnly: false + - pathPrefix: '/var/lib/kubelet/plugins' + readOnly: false + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin-psp +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['vitastor-csi-nodeplugin-psp'] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-nodeplugin-psp +subjects: + - kind: ServiceAccount + name: vitastor-csi-nodeplugin + namespace: vitastor-system +roleRef: + kind: Role + name: vitastor-csi-nodeplugin-psp + apiGroup: rbac.authorization.k8s.io diff --git a/csi/deploy/004-csi-nodeplugin.yaml b/csi/deploy/004-csi-nodeplugin.yaml new file mode 100644 index 000000000..5dcf2cc63 --- /dev/null +++ b/csi/deploy/004-csi-nodeplugin.yaml @@ -0,0 +1,140 @@ +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + namespace: vitastor-system + name: csi-vitastor +spec: + selector: + matchLabels: + app: csi-vitastor + template: + metadata: + namespace: vitastor-system + labels: + app: csi-vitastor + spec: + serviceAccountName: vitastor-csi-nodeplugin + hostNetwork: true + hostPID: true + priorityClassName: system-node-critical + # to use e.g. Rook orchestrated cluster, and mons' FQDN is + # resolved through k8s service, set dns policy to cluster first + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: driver-registrar + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + securityContext: + privileged: true + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 + args: + - "--v=5" + - "--csi-address=/csi/csi.sock" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.vitastor.io/csi.sock" + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: csi-vitastor + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + image: vitalif/vitastor-csi:v0.6.3 + args: + - "--node=$(NODE_ID)" + - "--endpoint=$(CSI_ENDPOINT)" + env: + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + volumeMounts: + - name: socket-dir + mountPath: /csi + - mountPath: /dev + name: host-dev + - mountPath: /sys + name: host-sys + - mountPath: /run/mount + name: host-mount + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - name: vitastor-config + mountPath: /etc/vitastor + - name: plugin-dir + mountPath: /var/lib/kubelet/plugins + mountPropagation: "Bidirectional" + - name: mountpoint-dir + mountPath: /var/lib/kubelet/pods + mountPropagation: "Bidirectional" + - name: liveness-probe + securityContext: + privileged: true + image: quay.io/k8scsi/livenessprobe:v1.1.0 + args: + - "--csi-address=$(CSI_ENDPOINT)" + - "--health-port=9898" + env: + - name: CSI_ENDPOINT + value: unix://csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.vitastor.io + type: DirectoryOrCreate + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins + type: Directory + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: host-dev + hostPath: + path: /dev + - name: host-sys + hostPath: + path: /sys + - name: host-mount + hostPath: + path: /run/mount + - name: lib-modules + hostPath: + path: /lib/modules + - name: vitastor-config + configMap: + name: vitastor-config diff --git a/csi/deploy/005-csi-provisioner-rbac.yaml b/csi/deploy/005-csi-provisioner-rbac.yaml new file mode 100644 index 000000000..d289dc7ed --- /dev/null +++ b/csi/deploy/005-csi-provisioner-rbac.yaml @@ -0,0 +1,102 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: vitastor-system + name: vitastor-csi-provisioner + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: vitastor-csi-provisioner + namespace: vitastor-system +roleRef: + kind: ClusterRole + name: vitastor-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-external-provisioner-cfg +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vitastor-csi-provisioner-role-cfg + namespace: vitastor-system +subjects: + - kind: ServiceAccount + name: vitastor-csi-provisioner + namespace: vitastor-system +roleRef: + kind: Role + name: vitastor-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io diff --git a/csi/deploy/006-csi-provisioner-psp.yaml b/csi/deploy/006-csi-provisioner-psp.yaml new file mode 100644 index 000000000..7cb812c8f --- /dev/null +++ b/csi/deploy/006-csi-provisioner-psp.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + namespace: vitastor-system + name: vitastor-csi-provisioner-psp +spec: + allowPrivilegeEscalation: true + allowedCapabilities: + - 'SYS_ADMIN' + fsGroup: + rule: RunAsAny + privileged: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'hostPath' + allowedHostPaths: + - pathPrefix: '/dev' + readOnly: false + - pathPrefix: '/sys' + readOnly: false + - pathPrefix: '/lib/modules' + readOnly: true + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: vitastor-system + name: vitastor-csi-provisioner-psp +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['vitastor-csi-provisioner-psp'] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: vitastor-csi-provisioner-psp + namespace: vitastor-system +subjects: + - kind: ServiceAccount + name: vitastor-csi-provisioner + namespace: vitastor-system +roleRef: + kind: Role + name: vitastor-csi-provisioner-psp + apiGroup: rbac.authorization.k8s.io diff --git a/csi/deploy/007-csi-provisioner.yaml b/csi/deploy/007-csi-provisioner.yaml new file mode 100644 index 000000000..07a721ea6 --- /dev/null +++ b/csi/deploy/007-csi-provisioner.yaml @@ -0,0 +1,159 @@ +--- +kind: Service +apiVersion: v1 +metadata: + namespace: vitastor-system + name: csi-vitastor-provisioner + labels: + app: csi-metrics +spec: + selector: + app: csi-vitastor-provisioner + ports: + - name: http-metrics + port: 8080 + protocol: TCP + targetPort: 8680 + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + namespace: vitastor-system + name: csi-vitastor-provisioner +spec: + replicas: 3 + selector: + matchLabels: + app: csi-vitastor-provisioner + template: + metadata: + namespace: vitastor-system + labels: + app: csi-vitastor-provisioner + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-vitastor-provisioner + topologyKey: "kubernetes.io/hostname" + serviceAccountName: vitastor-csi-provisioner + priorityClassName: system-cluster-critical + containers: + - name: csi-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.0 + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=150s" + - "--retry-interval-start=500ms" + - "--leader-election=true" + # set it to true to use topology based provisioning + - "--feature-gates=Topology=false" + # if fstype is not specified in storageclass, ext4 is default + - "--default-fstype=ext4" + - "--extra-create-metadata=true" + env: + - name: ADDRESS + value: unix:///csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-snapshotter + image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.0.0 + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=150s" + - "--leader-election=true" + env: + - name: ADDRESS + value: unix:///csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + image: k8s.gcr.io/sig-storage/csi-attacher:v3.1.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election=true" + - "--retry-interval-start=500ms" + env: + - name: ADDRESS + value: /csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + image: k8s.gcr.io/sig-storage/csi-resizer:v1.1.0 + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=150s" + - "--leader-election" + - "--retry-interval-start=500ms" + - "--handle-volume-inuse-error=false" + env: + - name: ADDRESS + value: unix:///csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-vitastor + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + image: vitalif/vitastor-csi:v0.6.3 + args: + - "--node=$(NODE_ID)" + - "--endpoint=$(CSI_ENDPOINT)" + env: + - name: NODE_ID + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi-provisioner.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + - mountPath: /dev + name: host-dev + - mountPath: /sys + name: host-sys + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - name: vitastor-config + mountPath: /etc/vitastor + volumes: + - name: host-dev + hostPath: + path: /dev + - name: host-sys + hostPath: + path: /sys + - name: lib-modules + hostPath: + path: /lib/modules + - name: socket-dir + emptyDir: { + medium: "Memory" + } + - name: vitastor-config + configMap: + name: vitastor-config diff --git a/csi/deploy/008-csi-driver.yaml b/csi/deploy/008-csi-driver.yaml new file mode 100644 index 000000000..747283b49 --- /dev/null +++ b/csi/deploy/008-csi-driver.yaml @@ -0,0 +1,11 @@ +--- +# if Kubernetes version is less than 1.18 change +# apiVersion to storage.k8s.io/v1betav1 +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + namespace: vitastor-system + name: csi.vitastor.io +spec: + attachRequired: true + podInfoOnMount: false diff --git a/csi/deploy/example-pvc.yaml b/csi/deploy/example-pvc.yaml new file mode 100644 index 000000000..fc9c3171e --- /dev/null +++ b/csi/deploy/example-pvc.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-vitastor-pvc +spec: + storageClassName: vitastor-csi-storage-class + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi diff --git a/csi/deploy/storage-class.yaml b/csi/deploy/storage-class.yaml new file mode 100644 index 000000000..fcf46286e --- /dev/null +++ b/csi/deploy/storage-class.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + namespace: vitastor-system + name: vitastor-csi-storage-class + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: csi.vitastor.io +volumeBindingMode: Immediate +parameters: + etcdVolumePrefix: "" + poolId: "1" + # you can choose other configuration file if you have it in the config map + #configPath: "/etc/vitastor/vitastor.conf" + # you can also specify etcdUrl here, maybe to connect to another Vitastor cluster + # multiple etcdUrls may be specified, delimited by comma + #etcdUrl: "http://192.168.7.2:2379" + #etcdPrefix: "/vitastor" diff --git a/csi/go.mod b/csi/go.mod new file mode 100644 index 000000000..7c9143e3e --- /dev/null +++ b/csi/go.mod @@ -0,0 +1,35 @@ +module vitastor.io/csi + +go 1.15 + +require ( + github.com/container-storage-interface/spec v1.4.0 + github.com/coreos/bbolt v0.0.0-00010101000000-000000000000 // indirect + github.com/coreos/etcd v3.3.25+incompatible // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/gorilla/websocket v1.4.2 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/kubernetes-csi/csi-lib-utils v0.9.1 + github.com/soheilhy/cmux v0.1.5 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + go.etcd.io/bbolt v0.0.0-00010101000000-000000000000 // indirect + go.etcd.io/etcd v3.3.25+incompatible + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb + google.golang.org/grpc v1.33.1 + k8s.io/klog v1.0.0 + k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 +) + +replace github.com/coreos/bbolt => go.etcd.io/bbolt v1.3.5 + +replace go.etcd.io/bbolt => github.com/coreos/bbolt v1.3.5 + +replace google.golang.org/grpc => google.golang.org/grpc v1.25.1 diff --git a/csi/src/config.go b/csi/src/config.go new file mode 100644 index 000000000..024afde13 --- /dev/null +++ b/csi/src/config.go @@ -0,0 +1,22 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package vitastor + +const ( + vitastorCSIDriverName = "csi.vitastor.io" + vitastorCSIDriverVersion = "0.6.3" +) + +// Config struct fills the parameters of request or user input +type Config struct +{ + Endpoint string + NodeID string +} + +// NewConfig returns config struct to initialize new driver +func NewConfig() *Config +{ + return &Config{} +} diff --git a/csi/src/controllerserver.go b/csi/src/controllerserver.go new file mode 100644 index 000000000..78b199bfd --- /dev/null +++ b/csi/src/controllerserver.go @@ -0,0 +1,530 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package vitastor + +import ( + "context" + "encoding/json" + "strings" + "bytes" + "strconv" + "time" + "fmt" + "os" + "os/exec" + "io/ioutil" + + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + "k8s.io/klog" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.etcd.io/etcd/clientv3" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +const ( + KB int64 = 1024 + MB int64 = 1024 * KB + GB int64 = 1024 * MB + TB int64 = 1024 * GB + ETCD_TIMEOUT time.Duration = 15*time.Second +) + +type InodeIndex struct +{ + Id uint64 `json:"id"` + PoolId uint64 `json:"pool_id"` +} + +type InodeConfig struct +{ + Name string `json:"name"` + Size uint64 `json:"size,omitempty"` + ParentPool uint64 `json:"parent_pool,omitempty"` + ParentId uint64 `json:"parent_id,omitempty"` + Readonly bool `json:"readonly,omitempty"` +} + +type ControllerServer struct +{ + *Driver +} + +// NewControllerServer create new instance controller +func NewControllerServer(driver *Driver) *ControllerServer +{ + return &ControllerServer{ + Driver: driver, + } +} + +func GetConnectionParams(params map[string]string) (map[string]string, []string, string) +{ + ctxVars := make(map[string]string) + configPath := params["configPath"] + if (configPath == "") + { + configPath = "/etc/vitastor/vitastor.conf" + } + else + { + ctxVars["configPath"] = configPath + } + config := make(map[string]interface{}) + if configFD, err := os.Open(configPath); err == nil + { + defer configFD.Close() + data, _ := ioutil.ReadAll(configFD) + json.Unmarshal(data, &config) + } + // Try to load prefix & etcd URL from the config + var etcdUrl []string + if (params["etcdUrl"] != "") + { + ctxVars["etcdUrl"] = params["etcdUrl"] + etcdUrl = strings.Split(params["etcdUrl"], ",") + } + if (len(etcdUrl) == 0) + { + switch config["etcd_address"].(type) + { + case string: + etcdUrl = strings.Split(config["etcd_address"].(string), ",") + case []string: + etcdUrl = config["etcd_address"].([]string) + } + } + etcdPrefix := params["etcdPrefix"] + if (etcdPrefix == "") + { + etcdPrefix, _ = config["etcd_prefix"].(string) + if (etcdPrefix == "") + { + etcdPrefix = "/vitastor" + } + } + else + { + ctxVars["etcdPrefix"] = etcdPrefix + } + return ctxVars, etcdUrl, etcdPrefix +} + +// Create the volume +func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) +{ + klog.Infof("received controller create volume request %+v", protosanitizer.StripSecrets(req)) + if (req == nil) + { + return nil, status.Errorf(codes.InvalidArgument, "request cannot be empty") + } + if (req.GetName() == "") + { + return nil, status.Error(codes.InvalidArgument, "name is a required field") + } + volumeCapabilities := req.GetVolumeCapabilities() + if (volumeCapabilities == nil) + { + return nil, status.Error(codes.InvalidArgument, "volume capabilities is a required field") + } + + etcdVolumePrefix := req.Parameters["etcdVolumePrefix"] + poolId, _ := strconv.ParseUint(req.Parameters["poolId"], 10, 64) + if (poolId == 0) + { + return nil, status.Error(codes.InvalidArgument, "poolId is missing in storage class configuration") + } + + volName := etcdVolumePrefix + req.GetName() + volSize := 1 * GB + if capRange := req.GetCapacityRange(); capRange != nil + { + volSize = ((capRange.GetRequiredBytes() + MB - 1) / MB) * MB + } + + // FIXME: The following should PROBABLY be implemented externally in a management tool + + ctxVars, etcdUrl, etcdPrefix := GetConnectionParams(req.Parameters) + if (len(etcdUrl) == 0) + { + return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") + } + + // Connect to etcd + cli, err := clientv3.New(clientv3.Config{ + DialTimeout: ETCD_TIMEOUT, + Endpoints: etcdUrl, + }) + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to connect to etcd at "+strings.Join(etcdUrl, ",")+": "+err.Error()) + } + defer cli.Close() + + var imageId uint64 = 0 + for + { + // Check if the image exists + ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) + resp, err := cli.Get(ctx, etcdPrefix+"/index/image/"+volName) + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) + } + if (len(resp.Kvs) > 0) + { + kv := resp.Kvs[0] + var v InodeIndex + err := json.Unmarshal(kv.Value, &v) + if (err != nil) + { + return nil, status.Error(codes.Internal, "invalid /index/image/"+volName+" key in etcd: "+err.Error()) + } + poolId = v.PoolId + imageId = v.Id + inodeCfgKey := fmt.Sprintf("/config/inode/%d/%d", poolId, imageId) + ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) + resp, err := cli.Get(ctx, etcdPrefix+inodeCfgKey) + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) + } + if (len(resp.Kvs) == 0) + { + return nil, status.Error(codes.Internal, "missing "+inodeCfgKey+" key in etcd") + } + var inodeCfg InodeConfig + err = json.Unmarshal(resp.Kvs[0].Value, &inodeCfg) + if (err != nil) + { + return nil, status.Error(codes.Internal, "invalid "+inodeCfgKey+" key in etcd: "+err.Error()) + } + if (inodeCfg.Size < uint64(volSize)) + { + return nil, status.Error(codes.Internal, "image "+volName+" is already created, but size is less than expected") + } + } + else + { + // Find a free ID + // Create image metadata in a transaction verifying that the image doesn't exist yet AND ID is still free + maxIdKey := fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId) + ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) + resp, err := cli.Get(ctx, maxIdKey) + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) + } + var modRev int64 + var nextId uint64 + if (len(resp.Kvs) > 0) + { + var err error + nextId, err = strconv.ParseUint(string(resp.Kvs[0].Value), 10, 64) + if (err != nil) + { + return nil, status.Error(codes.Internal, maxIdKey+" contains invalid ID") + } + modRev = resp.Kvs[0].ModRevision + nextId++ + } + else + { + nextId = 1 + } + inodeIdxJson, _ := json.Marshal(InodeIndex{ + Id: nextId, + PoolId: poolId, + }) + inodeCfgJson, _ := json.Marshal(InodeConfig{ + Name: volName, + Size: uint64(volSize), + }) + ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) + txnResp, err := cli.Txn(ctx).If( + clientv3.Compare(clientv3.ModRevision(fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId)), "=", modRev), + clientv3.Compare(clientv3.CreateRevision(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName)), "=", 0), + clientv3.Compare(clientv3.CreateRevision(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, poolId, nextId)), "=", 0), + ).Then( + clientv3.OpPut(fmt.Sprintf("%s/index/maxid/%d", etcdPrefix, poolId), fmt.Sprintf("%d", nextId)), + clientv3.OpPut(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName), string(inodeIdxJson)), + clientv3.OpPut(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, poolId, nextId), string(inodeCfgJson)), + ).Commit() + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to commit transaction in etcd: "+err.Error()) + } + if (txnResp.Succeeded) + { + imageId = nextId + break + } + // Start over if the transaction fails + } + } + + ctxVars["name"] = volName + volumeIdJson, _ := json.Marshal(ctxVars) + return &csi.CreateVolumeResponse{ + Volume: &csi.Volume{ + // Ugly, but VolumeContext isn't passed to DeleteVolume :-( + VolumeId: string(volumeIdJson), + CapacityBytes: volSize, + }, + }, nil +} + +// DeleteVolume deletes the given volume +func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) +{ + klog.Infof("received controller delete volume request %+v", protosanitizer.StripSecrets(req)) + if (req == nil) + { + return nil, status.Error(codes.InvalidArgument, "request cannot be empty") + } + + ctxVars := make(map[string]string) + err := json.Unmarshal([]byte(req.VolumeId), &ctxVars) + if (err != nil) + { + return nil, status.Error(codes.Internal, "volume ID not in JSON format") + } + volName := ctxVars["name"] + + _, etcdUrl, etcdPrefix := GetConnectionParams(ctxVars) + if (len(etcdUrl) == 0) + { + return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") + } + + cli, err := clientv3.New(clientv3.Config{ + DialTimeout: ETCD_TIMEOUT, + Endpoints: etcdUrl, + }) + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to connect to etcd at "+strings.Join(etcdUrl, ",")+": "+err.Error()) + } + defer cli.Close() + + // Find inode by name + ctx, cancel := context.WithTimeout(context.Background(), ETCD_TIMEOUT) + resp, err := cli.Get(ctx, etcdPrefix+"/index/image/"+volName) + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) + } + if (len(resp.Kvs) == 0) + { + return nil, status.Error(codes.NotFound, "volume "+volName+" does not exist") + } + var idx InodeIndex + err = json.Unmarshal(resp.Kvs[0].Value, &idx) + if (err != nil) + { + return nil, status.Error(codes.Internal, "invalid /index/image/"+volName+" key in etcd: "+err.Error()) + } + + // Get inode config + inodeCfgKey := fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, idx.PoolId, idx.Id) + ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) + resp, err = cli.Get(ctx, inodeCfgKey) + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to read key from etcd: "+err.Error()) + } + if (len(resp.Kvs) == 0) + { + return nil, status.Error(codes.NotFound, "volume "+volName+" does not exist") + } + var inodeCfg InodeConfig + err = json.Unmarshal(resp.Kvs[0].Value, &inodeCfg) + if (err != nil) + { + return nil, status.Error(codes.Internal, "invalid "+inodeCfgKey+" key in etcd: "+err.Error()) + } + + // Delete inode data by invoking vitastor-rm + args := []string{ + "--etcd_address", strings.Join(etcdUrl, ","), + "--pool", fmt.Sprintf("%d", idx.PoolId), + "--inode", fmt.Sprintf("%d", idx.Id), + } + if (ctxVars["configPath"] != "") + { + args = append(args, "--config_path", ctxVars["configPath"]) + } + c := exec.Command("/usr/bin/vitastor-rm", args...) + var stderr bytes.Buffer + c.Stdout = nil + c.Stderr = &stderr + err = c.Run() + stderrStr := string(stderr.Bytes()) + if (err != nil) + { + klog.Errorf("vitastor-rm failed: %s, status %s\n", stderrStr, err) + return nil, status.Error(codes.Internal, stderrStr+" (status "+err.Error()+")") + } + + // Delete inode config in etcd + ctx, cancel = context.WithTimeout(context.Background(), ETCD_TIMEOUT) + txnResp, err := cli.Txn(ctx).Then( + clientv3.OpDelete(fmt.Sprintf("%s/index/image/%s", etcdPrefix, volName)), + clientv3.OpDelete(fmt.Sprintf("%s/config/inode/%d/%d", etcdPrefix, idx.PoolId, idx.Id)), + ).Commit() + cancel() + if (err != nil) + { + return nil, status.Error(codes.Internal, "failed to delete keys in etcd: "+err.Error()) + } + if (!txnResp.Succeeded) + { + return nil, status.Error(codes.Internal, "failed to delete keys in etcd: transaction failed") + } + + return &csi.DeleteVolumeResponse{}, nil +} + +// ControllerPublishVolume return Unimplemented error +func (cs *ControllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerUnpublishVolume return Unimplemented error +func (cs *ControllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ValidateVolumeCapabilities checks whether the volume capabilities requested are supported. +func (cs *ControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) +{ + klog.Infof("received controller validate volume capability request %+v", protosanitizer.StripSecrets(req)) + if (req == nil) + { + return nil, status.Errorf(codes.InvalidArgument, "request is nil") + } + volumeID := req.GetVolumeId() + if (volumeID == "") + { + return nil, status.Error(codes.InvalidArgument, "volumeId is nil") + } + volumeCapabilities := req.GetVolumeCapabilities() + if (volumeCapabilities == nil) + { + return nil, status.Error(codes.InvalidArgument, "volumeCapabilities is nil") + } + + var volumeCapabilityAccessModes []*csi.VolumeCapability_AccessMode + for _, mode := range []csi.VolumeCapability_AccessMode_Mode{ + csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + } { + volumeCapabilityAccessModes = append(volumeCapabilityAccessModes, &csi.VolumeCapability_AccessMode{Mode: mode}) + } + + capabilitySupport := false + for _, capability := range volumeCapabilities + { + for _, volumeCapabilityAccessMode := range volumeCapabilityAccessModes + { + if (volumeCapabilityAccessMode.Mode == capability.AccessMode.Mode) + { + capabilitySupport = true + } + } + } + + if (!capabilitySupport) + { + return nil, status.Errorf(codes.NotFound, "%v not supported", req.GetVolumeCapabilities()) + } + + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeCapabilities: req.VolumeCapabilities, + }, + }, nil +} + +// ListVolumes returns a list of volumes +func (cs *ControllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// GetCapacity returns the capacity of the storage pool +func (cs *ControllerServer) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerGetCapabilities returns the capabilities of the controller service. +func (cs *ControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) +{ + functionControllerServerCapabilities := func(cap csi.ControllerServiceCapability_RPC_Type) *csi.ControllerServiceCapability + { + return &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: cap, + }, + }, + } + } + + var controllerServerCapabilities []*csi.ControllerServiceCapability + for _, capability := range []csi.ControllerServiceCapability_RPC_Type{ + csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + } { + controllerServerCapabilities = append(controllerServerCapabilities, functionControllerServerCapabilities(capability)) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: controllerServerCapabilities, + }, nil +} + +// CreateSnapshot create snapshot of an existing PV +func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// DeleteSnapshot delete provided snapshot of a PV +func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ListSnapshots list the snapshots of a PV +func (cs *ControllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerExpandVolume resizes a volume +func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// ControllerGetVolume get volume info +func (cs *ControllerServer) ControllerGetVolume(ctx context.Context, req *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} diff --git a/csi/src/grpc.go b/csi/src/grpc.go new file mode 100644 index 000000000..9db30e277 --- /dev/null +++ b/csi/src/grpc.go @@ -0,0 +1,137 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vitastor + +import ( + "fmt" + "net" + "os" + "strings" + "sync" + + "github.com/golang/glog" + "golang.org/x/net/context" + "google.golang.org/grpc" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" +) + +// Defines Non blocking GRPC server interfaces +type NonBlockingGRPCServer interface { + // Start services at the endpoint + Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) + // Waits for the service to stop + Wait() + // Stops the service gracefully + Stop() + // Stops the service forcefully + ForceStop() +} + +func NewNonBlockingGRPCServer() NonBlockingGRPCServer { + return &nonBlockingGRPCServer{} +} + +// NonBlocking server +type nonBlockingGRPCServer struct { + wg sync.WaitGroup + server *grpc.Server +} + +func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { + + s.wg.Add(1) + + go s.serve(endpoint, ids, cs, ns) + + return +} + +func (s *nonBlockingGRPCServer) Wait() { + s.wg.Wait() +} + +func (s *nonBlockingGRPCServer) Stop() { + s.server.GracefulStop() +} + +func (s *nonBlockingGRPCServer) ForceStop() { + s.server.Stop() +} + +func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { + + proto, addr, err := ParseEndpoint(endpoint) + if err != nil { + glog.Fatal(err.Error()) + } + + if proto == "unix" { + addr = "/" + addr + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + glog.Fatalf("Failed to remove %s, error: %s", addr, err.Error()) + } + } + + listener, err := net.Listen(proto, addr) + if err != nil { + glog.Fatalf("Failed to listen: %v", err) + } + + opts := []grpc.ServerOption{ + grpc.UnaryInterceptor(logGRPC), + } + server := grpc.NewServer(opts...) + s.server = server + + if ids != nil { + csi.RegisterIdentityServer(server, ids) + } + if cs != nil { + csi.RegisterControllerServer(server, cs) + } + if ns != nil { + csi.RegisterNodeServer(server, ns) + } + + glog.Infof("Listening for connections on address: %#v", listener.Addr()) + + server.Serve(listener) +} + +func ParseEndpoint(ep string) (string, string, error) { + if strings.HasPrefix(strings.ToLower(ep), "unix://") || strings.HasPrefix(strings.ToLower(ep), "tcp://") { + s := strings.SplitN(ep, "://", 2) + if s[1] != "" { + return s[0], s[1], nil + } + } + return "", "", fmt.Errorf("Invalid endpoint: %v", ep) +} + +func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + glog.V(3).Infof("GRPC call: %s", info.FullMethod) + glog.V(5).Infof("GRPC request: %s", protosanitizer.StripSecrets(req)) + resp, err := handler(ctx, req) + if err != nil { + glog.Errorf("GRPC error: %v", err) + } else { + glog.V(5).Infof("GRPC response: %s", protosanitizer.StripSecrets(resp)) + } + return resp, err +} diff --git a/csi/src/identityserver.go b/csi/src/identityserver.go new file mode 100644 index 000000000..b10504119 --- /dev/null +++ b/csi/src/identityserver.go @@ -0,0 +1,60 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package vitastor + +import ( + "context" + + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + "k8s.io/klog" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +// IdentityServer struct of Vitastor CSI driver with supported methods of CSI identity server spec. +type IdentityServer struct +{ + *Driver +} + +// NewIdentityServer create new instance identity +func NewIdentityServer(driver *Driver) *IdentityServer +{ + return &IdentityServer{ + Driver: driver, + } +} + +// GetPluginInfo returns metadata of the plugin +func (is *IdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) +{ + klog.Infof("received identity plugin info request %+v", protosanitizer.StripSecrets(req)) + return &csi.GetPluginInfoResponse{ + Name: vitastorCSIDriverName, + VendorVersion: vitastorCSIDriverVersion, + }, nil +} + +// GetPluginCapabilities returns available capabilities of the plugin +func (is *IdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) +{ + klog.Infof("received identity plugin capabilities request %+v", protosanitizer.StripSecrets(req)) + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, nil +} + +// Probe returns the health and readiness of the plugin +func (is *IdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) +{ + return &csi.ProbeResponse{}, nil +} diff --git a/csi/src/nodeserver.go b/csi/src/nodeserver.go new file mode 100644 index 000000000..01fa526ce --- /dev/null +++ b/csi/src/nodeserver.go @@ -0,0 +1,279 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package vitastor + +import ( + "context" + "os" + "os/exec" + "encoding/json" + "strings" + "bytes" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/utils/mount" + utilexec "k8s.io/utils/exec" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + "k8s.io/klog" +) + +// NodeServer struct of Vitastor CSI driver with supported methods of CSI node server spec. +type NodeServer struct +{ + *Driver + mounter mount.Interface +} + +// NewNodeServer create new instance node +func NewNodeServer(driver *Driver) *NodeServer +{ + return &NodeServer{ + Driver: driver, + mounter: mount.New(""), + } +} + +// NodeStageVolume mounts the volume to a staging path on the node. +func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) +{ + return &csi.NodeStageVolumeResponse{}, nil +} + +// NodeUnstageVolume unstages the volume from the staging path +func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) +{ + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func Contains(list []string, s string) bool +{ + for i := 0; i < len(list); i++ + { + if (list[i] == s) + { + return true + } + } + return false +} + +// NodePublishVolume mounts the volume mounted to the staging path to the target path +func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) +{ + klog.Infof("received node publish volume request %+v", protosanitizer.StripSecrets(req)) + + targetPath := req.GetTargetPath() + + // Check that it's not already mounted + free, error := mount.IsNotMountPoint(ns.mounter, targetPath) + if (error != nil) + { + if (os.IsNotExist(error)) + { + error := os.MkdirAll(targetPath, 0777) + if (error != nil) + { + return nil, status.Error(codes.Internal, error.Error()) + } + free = true + } + else + { + return nil, status.Error(codes.Internal, error.Error()) + } + } + if (!free) + { + return &csi.NodePublishVolumeResponse{}, nil + } + + ctxVars := make(map[string]string) + err := json.Unmarshal([]byte(req.VolumeId), &ctxVars) + if (err != nil) + { + return nil, status.Error(codes.Internal, "volume ID not in JSON format") + } + volName := ctxVars["name"] + + _, etcdUrl, etcdPrefix := GetConnectionParams(ctxVars) + if (len(etcdUrl) == 0) + { + return nil, status.Error(codes.InvalidArgument, "no etcdUrl in storage class configuration and no etcd_address in vitastor.conf") + } + + // Map NBD device + // FIXME: Check if already mapped + args := []string{ + "map", "--etcd_address", strings.Join(etcdUrl, ","), + "--etcd_prefix", etcdPrefix, + "--image", volName, + }; + if (ctxVars["configPath"] != "") + { + args = append(args, "--config_path", ctxVars["configPath"]) + } + if (req.GetReadonly()) + { + args = append(args, "--readonly", "1") + } + c := exec.Command("/usr/bin/vitastor-nbd", args...) + var stdout, stderr bytes.Buffer + c.Stdout, c.Stderr = &stdout, &stderr + err = c.Run() + stdoutStr, stderrStr := string(stdout.Bytes()), string(stderr.Bytes()) + if (err != nil) + { + klog.Errorf("vitastor-nbd map failed: %s, status %s\n", stdoutStr+stderrStr, err) + return nil, status.Error(codes.Internal, stdoutStr+stderrStr+" (status "+err.Error()+")") + } + devicePath := strings.TrimSpace(stdoutStr) + + // Check existing format + diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()} + existingFormat, err := diskMounter.GetDiskFormat(devicePath) + if (err != nil) + { + klog.Errorf("failed to get disk format for path %s, error: %v", err) + // unmap NBD device + unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput() + if (unmapErr != nil) + { + klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr) + } + return nil, err + } + + // Format the device (ext4 or xfs) + fsType := req.GetVolumeCapability().GetMount().GetFsType() + isBlock := req.GetVolumeCapability().GetBlock() != nil + opt := req.GetVolumeCapability().GetMount().GetMountFlags() + opt = append(opt, "_netdev") + if ((req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY || + req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) && + !Contains(opt, "ro")) + { + opt = append(opt, "ro") + } + if (fsType == "xfs") + { + opt = append(opt, "nouuid") + } + readOnly := Contains(opt, "ro") + if (existingFormat == "" && !readOnly) + { + args := []string{} + switch fsType + { + case "ext4": + args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath} + case "xfs": + args = []string{"-K", devicePath} + } + if (len(args) > 0) + { + cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput() + if (cmdErr != nil) + { + klog.Errorf("failed to run mkfs error: %v, output: %v", cmdErr, string(cmdOut)) + // unmap NBD device + unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput() + if (unmapErr != nil) + { + klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr) + } + return nil, status.Error(codes.Internal, cmdErr.Error()) + } + } + } + if (isBlock) + { + opt = append(opt, "bind") + err = diskMounter.Mount(devicePath, targetPath, fsType, opt) + } + else + { + err = diskMounter.FormatAndMount(devicePath, targetPath, fsType, opt) + } + if (err != nil) + { + klog.Errorf( + "failed to mount device path (%s) to path (%s) for volume (%s) error: %s", + devicePath, targetPath, volName, err, + ) + // unmap NBD device + unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput() + if (unmapErr != nil) + { + klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr) + } + return nil, status.Error(codes.Internal, err.Error()) + } + return &csi.NodePublishVolumeResponse{}, nil +} + +// NodeUnpublishVolume unmounts the volume from the target path +func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) +{ + klog.Infof("received node unpublish volume request %+v", protosanitizer.StripSecrets(req)) + targetPath := req.GetTargetPath() + devicePath, refCount, err := mount.GetDeviceNameFromMount(ns.mounter, targetPath) + if (err != nil) + { + if (os.IsNotExist(err)) + { + return nil, status.Error(codes.NotFound, "Target path not found") + } + return nil, status.Error(codes.Internal, err.Error()) + } + if (devicePath == "") + { + return nil, status.Error(codes.NotFound, "Volume not mounted") + } + // unmount + err = mount.CleanupMountPoint(targetPath, ns.mounter, false) + if (err != nil) + { + return nil, status.Error(codes.Internal, err.Error()) + } + // unmap NBD device + if (refCount == 1) + { + unmapOut, unmapErr := exec.Command("/usr/bin/vitastor-nbd", "unmap", devicePath).CombinedOutput() + if (unmapErr != nil) + { + klog.Errorf("failed to unmap NBD device %s: %s, error: %v", devicePath, unmapOut, unmapErr) + } + } + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +// NodeGetVolumeStats returns volume capacity statistics available for the volume +func (ns *NodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// NodeExpandVolume expanding the file system on the node +func (ns *NodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) +{ + return nil, status.Error(codes.Unimplemented, "") +} + +// NodeGetCapabilities returns the supported capabilities of the node server +func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) +{ + return &csi.NodeGetCapabilitiesResponse{}, nil +} + +// NodeGetInfo returns NodeGetInfoResponse for CO. +func (ns *NodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) +{ + klog.Infof("received node get info request %+v", protosanitizer.StripSecrets(req)) + return &csi.NodeGetInfoResponse{ + NodeId: ns.NodeID, + }, nil +} diff --git a/csi/src/server.go b/csi/src/server.go new file mode 100644 index 000000000..b097184e6 --- /dev/null +++ b/csi/src/server.go @@ -0,0 +1,36 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package vitastor + +import ( + "k8s.io/klog" +) + +type Driver struct +{ + *Config +} + +// NewDriver create new instance driver +func NewDriver(config *Config) (*Driver, error) +{ + if (config == nil) + { + klog.Errorf("Vitastor CSI driver initialization failed") + return nil, nil + } + driver := &Driver{ + Config: config, + } + klog.Infof("Vitastor CSI driver initialized") + return driver, nil +} + +// Start server +func (driver *Driver) Run() +{ + server := NewNonBlockingGRPCServer() + server.Start(driver.Endpoint, NewIdentityServer(driver), NewControllerServer(driver), NewNodeServer(driver)) + server.Wait() +} diff --git a/csi/vitastor-csi.go b/csi/vitastor-csi.go new file mode 100644 index 000000000..a160ac198 --- /dev/null +++ b/csi/vitastor-csi.go @@ -0,0 +1,39 @@ +// Copyright (c) Vitaliy Filippov, 2019+ +// License: VNPL-1.1 or GNU GPL-2.0+ (see README.md for details) + +package main + +import ( + "flag" + "fmt" + "os" + "k8s.io/klog" + "vitastor.io/csi/src" +) + +func main() +{ + var config = vitastor.NewConfig() + flag.StringVar(&config.Endpoint, "endpoint", "", "CSI endpoint") + flag.StringVar(&config.NodeID, "node", "", "Node ID") + flag.Parse() + if (config.Endpoint == "") + { + config.Endpoint = os.Getenv("CSI_ENDPOINT") + } + if (config.NodeID == "") + { + config.NodeID = os.Getenv("NODE_ID") + } + if (config.Endpoint == "" && config.NodeID == "") + { + fmt.Fprintf(os.Stderr, "Please set -endpoint and -node / CSI_ENDPOINT & NODE_ID env vars\n") + os.Exit(1) + } + drv, err := vitastor.NewDriver(config) + if (err != nil) + { + klog.Fatalln(err) + } + drv.Run() +}