diff --git a/.drone.yml b/.drone.yml
index c9159588ad17516b919003c8ec3f476b2817c85c..0b5bbf97d47ead90f3bf2048e32e341ce9791845 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -31,13 +31,26 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
 - name: docker-publish
+  image: plugins/docker
+  settings:
+    dockerfile: package/Dockerfile
+    password:
+      from_secret: docker_password
+    repo: "rancher/local-path-provisioner"
+    tag: "${DRONE_BRANCH}-head-amd64"
+    username:
+      from_secret: docker_username
+  when:
+    instance:
+    - drone-publish.rancher.io
+    event:
+    - push
+
+- name: docker-tagged-publish
   image: plugins/docker
   settings:
     dockerfile: package/Dockerfile
@@ -50,9 +63,6 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
@@ -94,13 +104,26 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
 - name: docker-publish
+  image: plugins/docker
+  settings:
+    dockerfile: package/Dockerfile
+    password:
+      from_secret: docker_password
+    repo: "rancher/local-path-provisioner"
+    tag: "${DRONE_BRANCH}-head-arm64"
+    username:
+      from_secret: docker_username
+  when:
+    instance:
+    - drone-publish.rancher.io
+    event:
+    - push
+
+- name: docker-tagged-publish
   image: plugins/docker
   settings:
     dockerfile: package/Dockerfile
@@ -113,9 +136,6 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
@@ -156,13 +176,26 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
 - name: docker-publish
+  image: plugins/docker
+  settings:
+    dockerfile: package/Dockerfile
+    password:
+      from_secret: docker_password
+    repo: "rancher/local-path-provisioner"
+    tag: "${DRONE_BRANCH}-head-arm"
+    username:
+      from_secret: docker_username
+  when:
+    instance:
+    - drone-publish.rancher.io
+    event:
+    - push
+
+- name: docker-tagged-publish
   image: plugins/docker
   settings:
     dockerfile: package/Dockerfile
@@ -175,9 +208,6 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
@@ -222,13 +252,29 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
 - name: docker-publish
+  image: rancher/drone-images:docker-s390x
+  volumes:
+    - name: docker
+      path: /var/run/docker.sock
+  settings:
+    dockerfile: package/Dockerfile
+    password:
+      from_secret: docker_password
+    repo: "rancher/local-path-provisioner"
+    tag: "${DRONE_BRANCH}-head-s390x"
+    username:
+      from_secret: docker_username
+  when:
+    instance:
+    - drone-publish.rancher.io
+    event:
+    - push
+
+- name: docker-tagged-publish
   image: rancher/drone-images:docker-s390x
   volumes:
     - name: docker
@@ -244,9 +290,6 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
@@ -265,6 +308,26 @@ platform:
 
 steps:
 - name: manifest
+  image: plugins/manifest:1.0.2
+  settings:
+    username:
+      from_secret: docker_username
+    password:
+      from_secret: docker_password
+    platforms:
+      - linux/amd64
+      - linux/arm64
+      - linux/arm
+      - linux/s390x
+    target: "rancher/local-path-provisioner:${DRONE_BRANCH}-head"
+    template: "rancher/local-path-provisioner:${DRONE_BRANCH}-head-ARCH"
+  when:
+    instance:
+    - drone-publish.rancher.io
+    event:
+    - push
+
+- name: manifest-tag
   image: plugins/manifest:1.0.2
   settings:
     username:
@@ -281,9 +344,6 @@ steps:
   when:
     instance:
     - drone-publish.rancher.io
-    ref:
-    - refs/head/master
-    - refs/tags/*
     event:
     - tag
 
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5a360b2ca3f6d96853c5f04738546e63d1492674..e4a1fdbc9156e43eb558ea5bb61e383d1e80c96c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -4,7 +4,7 @@ include:
 
 variables:
   CHART_NAME: local-path-provisioner
-  CHART_DIR: deploy/chart/
+  CHART_DIR: deploy/chart/local-path-provisioner/
   KANIKO_BUILD_IMAGENAME: local-path-provisioner
 
 stages:
diff --git a/README.md b/README.md
index dc3a06e4cd5d191395390d440ac3c3ed3008372e..cfc30af4c6ebe5c18c8fd4eb99d2a886e180ff3e 100644
--- a/README.md
+++ b/README.md
@@ -24,11 +24,23 @@ Kubernetes v1.12+.
 
 In this setup, the directory `/opt/local-path-provisioner` will be used across all the nodes as the path for provisioning (a.k.a, store the persistent volume data). The provisioner will be installed in `local-path-storage` namespace by default.
 
+- Stable
+```
+kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml
+```
+
+- Development
 ```
 kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
 ```
 
 Or, use `kustomize` to deploy.
+- Stable
+```
+kustomize build "github.com/rancher/local-path-provisioner/deploy?ref=v0.0.22" | kubectl apply -f -
+```
+
+- Development
 ```
 kustomize build "github.com/rancher/local-path-provisioner/deploy?ref=master" | kubectl apply -f -
 ```
@@ -119,7 +131,7 @@ Now you've verified that the provisioner works as expected.
 
 ### Customize the ConfigMap
 
-The configuration of the provisioner is a json file `config.json` and two bash scripts `setup` and `teardown`, stored in the a config map, e.g.:
+The configuration of the provisioner is a json file `config.json`, a Pod template `helperPod.yaml` and two bash scripts `setup` and `teardown`, stored in a config map, e.g.:
 ```
 kind: ConfigMap
 apiVersion: v1
@@ -146,40 +158,12 @@ data:
         }
   setup: |-
         #!/bin/sh
-        while getopts "m:s:p:" opt
-        do
-            case $opt in
-                p)
-                absolutePath=$OPTARG
-                ;;
-                s)
-                sizeInBytes=$OPTARG
-                ;;
-                m)
-                volMode=$OPTARG
-                ;;
-            esac
-        done
-
-        mkdir -m 0777 -p ${absolutePath}
+        set -eu
+        mkdir -m 0777 -p "$VOL_DIR"
   teardown: |-
         #!/bin/sh
-        while getopts "m:s:p:" opt
-        do
-            case $opt in
-                p)
-                absolutePath=$OPTARG
-                ;;
-                s)
-                sizeInBytes=$OPTARG
-                ;;
-                m)
-                volMode=$OPTARG
-                ;;
-            esac
-        done
-
-        rm -rf ${absolutePath}
+        set -eu
+        rm -rf "$VOL_DIR"
   helperPod.yaml: |-
         apiVersion: v1
         kind: Pod
@@ -209,16 +193,19 @@ The configuration must obey following rules:
 3. No duplicate paths allowed for one node.
 4. No duplicate node allowed.
 
-#### Scripts `setup` and `teardown` and `helperPod.yaml`
+#### Scripts `setup` and `teardown` and the `helperPod.yaml` template
 
-The script `setup` will be executed before the volume is created, to prepare the directory on the node for the volume.
+* The `setup` script is run before the volume is created, to prepare the volume directory on the node.
+* The `teardown` script is run after the volume is deleted, to cleanup the volume directory on the node.
+* The `helperPod.yaml` template is used to create a helper Pod that runs the `setup` or `teardown` script.
 
-The script `teardown` will be executed after the volume is deleted, to cleanup the directory on the node for the volume.
+The scripts receive their input as environment variables:
 
-The yaml file `helperPod.yaml` will be created by local-path-storage to execute `setup` or `teardown` script with three paramemters  `-p <path> -s <size> -m <mode>` :
-* path: the absolute path provisioned on the node
-- size: pvc.Spec.resources.requests.storage in bytes
-* mode: pvc.Spec.VolumeMode
+| Environment variable | Description |
+| -------------------- | ----------- |
+| `VOL_DIR` | Volume directory that should be created or removed. |
+| `VOL_MODE` | The PersistentVolume mode (`Block` or `Filesystem`). |
+| `VOL_SIZE_BYTES` | Requested volume size in bytes. |
 
 #### Reloading
 
@@ -242,6 +229,12 @@ Before uninstallation, make sure the PVs created by the provisioner have already
 
 To uninstall, execute:
 
+- Stable
+```
+kubectl delete -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml
+```
+
+- Development
 ```
 kubectl delete -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
 ```
diff --git a/deploy/chart/.helmignore b/deploy/chart/local-path-provisioner/.helmignore
similarity index 100%
rename from deploy/chart/.helmignore
rename to deploy/chart/local-path-provisioner/.helmignore
diff --git a/deploy/chart/Chart.yaml b/deploy/chart/local-path-provisioner/Chart.yaml
similarity index 88%
rename from deploy/chart/Chart.yaml
rename to deploy/chart/local-path-provisioner/Chart.yaml
index 80c946762b9cd3a18ff6a1cc1b1b892bf1f3423a..db136e910f34e6838705b4e53e14e3bf859a4ea5 100644
--- a/deploy/chart/Chart.yaml
+++ b/deploy/chart/local-path-provisioner/Chart.yaml
@@ -1,8 +1,8 @@
 apiVersion: v1
 description: Use HostPath for persistent local storage with Kubernetes
 name: local-path-provisioner
-version: 0.0.21
-appVersion: "v0.0.21"
+version: 0.0.22
+appVersion: "v0.0.22"
 keywords:
   - storage
   - hostpath
@@ -10,4 +10,3 @@ kubeVersion: ">=1.12.0-r0"
 home: https://github.com/rancher/local-path-provisioner
 sources:
   - https://github.com/rancher/local-path-provisioner.git
-
diff --git a/deploy/chart/README.md b/deploy/chart/local-path-provisioner/README.md
similarity index 99%
rename from deploy/chart/README.md
rename to deploy/chart/local-path-provisioner/README.md
index 07f52dbc5f79106a069d58e15fbcdb6ab5a20f1b..dde34f15c264a5a50531098d3746835632d6872b 100644
--- a/deploy/chart/README.md
+++ b/deploy/chart/local-path-provisioner/README.md
@@ -56,7 +56,7 @@ default values.
 | Parameter                           | Description                                                                     | Default                                                                             |
 | ----------------------------------- | ------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
 | `image.repository`                  | Local Path Provisioner image name                                               | `rancher/local-path-provisioner`                                                    |
-| `image.tag`                         | Local Path Provisioner image tag                                                | `v0.0.21`                                                                            |
+| `image.tag`                         | Local Path Provisioner image tag                                                | `v0.0.22`                                                                       |
 | `image.pullPolicy`                  | Image pull policy                                                               | `IfNotPresent`                                                                      |
 | `storageClass.create`               | If true, create a `StorageClass`                                                | `true`                                                                              |
 | `storageClass.provisionerName`      | The provisioner name for the storage class                                      | `nil`                                                                               |
diff --git a/deploy/chart/templates/NOTES.txt b/deploy/chart/local-path-provisioner/templates/NOTES.txt
similarity index 100%
rename from deploy/chart/templates/NOTES.txt
rename to deploy/chart/local-path-provisioner/templates/NOTES.txt
diff --git a/deploy/chart/templates/_helpers.tpl b/deploy/chart/local-path-provisioner/templates/_helpers.tpl
similarity index 100%
rename from deploy/chart/templates/_helpers.tpl
rename to deploy/chart/local-path-provisioner/templates/_helpers.tpl
diff --git a/deploy/chart/templates/clusterrole.yaml b/deploy/chart/local-path-provisioner/templates/clusterrole.yaml
similarity index 100%
rename from deploy/chart/templates/clusterrole.yaml
rename to deploy/chart/local-path-provisioner/templates/clusterrole.yaml
diff --git a/deploy/chart/templates/clusterrolebinding.yaml b/deploy/chart/local-path-provisioner/templates/clusterrolebinding.yaml
similarity index 100%
rename from deploy/chart/templates/clusterrolebinding.yaml
rename to deploy/chart/local-path-provisioner/templates/clusterrolebinding.yaml
diff --git a/deploy/chart/templates/configmap.yaml b/deploy/chart/local-path-provisioner/templates/configmap.yaml
similarity index 100%
rename from deploy/chart/templates/configmap.yaml
rename to deploy/chart/local-path-provisioner/templates/configmap.yaml
diff --git a/deploy/chart/templates/deployment.yaml b/deploy/chart/local-path-provisioner/templates/deployment.yaml
similarity index 100%
rename from deploy/chart/templates/deployment.yaml
rename to deploy/chart/local-path-provisioner/templates/deployment.yaml
diff --git a/deploy/chart/templates/registry-secret.yaml b/deploy/chart/local-path-provisioner/templates/registry-secret.yaml
similarity index 100%
rename from deploy/chart/templates/registry-secret.yaml
rename to deploy/chart/local-path-provisioner/templates/registry-secret.yaml
diff --git a/deploy/chart/templates/serviceaccount.yaml b/deploy/chart/local-path-provisioner/templates/serviceaccount.yaml
similarity index 100%
rename from deploy/chart/templates/serviceaccount.yaml
rename to deploy/chart/local-path-provisioner/templates/serviceaccount.yaml
diff --git a/deploy/chart/templates/storageclass.yaml b/deploy/chart/local-path-provisioner/templates/storageclass.yaml
similarity index 100%
rename from deploy/chart/templates/storageclass.yaml
rename to deploy/chart/local-path-provisioner/templates/storageclass.yaml
diff --git a/deploy/chart/values.yaml b/deploy/chart/local-path-provisioner/values.yaml
similarity index 84%
rename from deploy/chart/values.yaml
rename to deploy/chart/local-path-provisioner/values.yaml
index c43a7444bea23cda2b856812aa3e581de80adb37..ce0ac0d9b24715315e5f2304e637c0b9a3c89055 100644
--- a/deploy/chart/values.yaml
+++ b/deploy/chart/local-path-provisioner/values.yaml
@@ -4,7 +4,7 @@ replicaCount: 1
 
 image:
   repository: rancher/local-path-provisioner
-  tag: v0.0.21
+  tag: v0.0.22
   pullPolicy: IfNotPresent
 
 helperImage:
@@ -93,40 +93,12 @@ configmap:
   # specify the custom script for setup and teardown
   setup: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    mkdir -m 0777 -p ${absolutePath}
+    set -eu
+    mkdir -m 0777 -p "$VOL_DIR"
   teardown: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    rm -rf ${absolutePath}
+    set -eu
+    rm -rf "$VOL_DIR"
   # specify the custom helper pod yaml
   helperPod: |-
     apiVersion: v1
diff --git a/deploy/example-config.yaml b/deploy/example-config.yaml
index bfca41b77e8bdca1a3fcdf269f59e60c84a1d6f7..bf3617884a5e931ae18a52eafc9dd8409337a08c 100644
--- a/deploy/example-config.yaml
+++ b/deploy/example-config.yaml
@@ -23,40 +23,12 @@ data:
     }
   setup: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    mkdir -m 0777 -p ${absolutePath}
+    set -eu
+    mkdir -m 0777 -p "$VOL_DIR"
   teardown: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    rm -rf ${absolutePath}
+    set -eu
+    rm -rf "$VOL_DIR"
   helperPod.yaml: |-
     apiVersion: v1
     kind: Pod
diff --git a/deploy/local-path-storage.yaml b/deploy/local-path-storage.yaml
index 32db7e10ef3f4bdd823267e2370e36f322b15abb..a5806a1358919f4de4d0175ce6447c9d5a1e62f5 100644
--- a/deploy/local-path-storage.yaml
+++ b/deploy/local-path-storage.yaml
@@ -62,7 +62,7 @@ spec:
       serviceAccountName: local-path-provisioner-service-account
       containers:
         - name: local-path-provisioner
-          image: rancher/local-path-provisioner:v0.0.21
+          image: rancher/local-path-provisioner:v0.0.22
           imagePullPolicy: IfNotPresent
           command:
             - local-path-provisioner
@@ -110,40 +110,12 @@ data:
     }
   setup: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    mkdir -m 0777 -p ${absolutePath}
+    set -eu
+    mkdir -m 0777 -p "$VOL_DIR"
   teardown: |-
     #!/bin/sh
-    while getopts "m:s:p:" opt
-    do
-        case $opt in
-            p)
-            absolutePath=$OPTARG
-            ;;
-            s)
-            sizeInBytes=$OPTARG
-            ;;
-            m)
-            volMode=$OPTARG
-            ;;
-        esac
-    done
-
-    rm -rf ${absolutePath}
+    set -eu
+    rm -rf "$VOL_DIR"
   helperPod.yaml: |-
     apiVersion: v1
     kind: Pod
diff --git a/deploy/provisioner.yaml b/deploy/provisioner.yaml
index 17aa6f92998127f27d18a410c3268372dd2d318c..2fdf77b96be8dc98ad3b80df33faf4f1e387b280 100644
--- a/deploy/provisioner.yaml
+++ b/deploy/provisioner.yaml
@@ -16,7 +16,7 @@ spec:
       serviceAccountName: local-path-provisioner-service-account
       containers:
         - name: local-path-provisioner
-          image: rancher/local-path-provisioner:v0.0.21
+          image: rancher/local-path-provisioner:v0.0.22
           imagePullPolicy: Always
           command:
             - local-path-provisioner
diff --git a/examples/quota/README.md b/examples/quota/README.md
index 49d61569ddf0653849933cc9418190908a2a0161..0f3c92cb6dbc2f9d45e164f587c8be81214c600a 100644
--- a/examples/quota/README.md
+++ b/examples/quota/README.md
@@ -3,7 +3,7 @@ this is an example to enable quota for xfs
 
 # Usage
 > 1. build a helper image using the sample dockerfile to replace helper image xxx/storage-xfs-quota:v0.1 at configmap(helperPod.yaml) of debug.yaml.
-> 2. use the sample setup and teardown script at configmap of debug.yaml
+> 2. use the sample setup and teardown scripts contained within the kustomization.
 
 Notice:
 > 1. make sure the path at nodePathMap is the mountpoint of xfs which enables pquota
@@ -13,6 +13,7 @@ Notice:
 > git clone https://github.com/rancher/local-path-provisioner.git
 > cd local-path-provisioner
 > go build
-> kubectl apply -f debug.yaml
+> kubectl apply -k examples/quota
+> kubectl delete -n local-path-storage deployment local-path-provisioner
 > ./local-path-provisioner --debug start --namespace=local-path-storage
 ```
diff --git a/examples/quota/helper-pod.yaml b/examples/quota/helperPod.yaml
similarity index 100%
rename from examples/quota/helper-pod.yaml
rename to examples/quota/helperPod.yaml
diff --git a/examples/quota/kustomization.yaml b/examples/quota/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..17af701a6db0d2b6018b9bfccd079e1de289eb31
--- /dev/null
+++ b/examples/quota/kustomization.yaml
@@ -0,0 +1,17 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../../deploy
+
+configMapGenerator:
+- name: local-path-config
+  namespace: local-path-storage
+  behavior: merge
+  files:
+  - helperPod.yaml
+  - setup
+  - teardown
+
+generatorOptions:
+  disableNameSuffixHash: true
diff --git a/examples/quota/local-path-storage.yaml b/examples/quota/local-path-storage.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ccb5ed62412f13c3cb027ddc1145a27c2c119fab
--- /dev/null
+++ b/examples/quota/local-path-storage.yaml
@@ -0,0 +1,274 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: local-path-storage
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: local-path-provisioner-service-account
+  namespace: local-path-storage
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: local-path-provisioner-role
+rules:
+  - apiGroups: [ "" ]
+    resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
+    verbs: [ "get", "list", "watch" ]
+  - apiGroups: [ "" ]
+    resources: [ "endpoints", "persistentvolumes", "pods" ]
+    verbs: [ "*" ]
+  - apiGroups: [ "" ]
+    resources: [ "events" ]
+    verbs: [ "create", "patch" ]
+  - apiGroups: [ "storage.k8s.io" ]
+    resources: [ "storageclasses" ]
+    verbs: [ "get", "list", "watch" ]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: local-path-provisioner-bind
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: local-path-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: local-path-provisioner-service-account
+    namespace: local-path-storage
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: local-path-provisioner
+  namespace: local-path-storage
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: local-path-provisioner
+  template:
+    metadata:
+      labels:
+        app: local-path-provisioner
+    spec:
+      serviceAccountName: local-path-provisioner-service-account
+      containers:
+        - name: local-path-provisioner
+          image: rancher/local-path-provisioner:v0.0.22
+          imagePullPolicy: IfNotPresent
+          command:
+            - local-path-provisioner
+            - --debug
+            - start
+            - --config
+            - /etc/config/config.json
+          volumeMounts:
+            - name: config-volume
+              mountPath: /etc/config/
+          env:
+            - name: POD_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+      volumes:
+        - name: config-volume
+          configMap:
+            name: local-path-config
+
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: local-path
+provisioner: rancher.io/local-path
+volumeBindingMode: WaitForFirstConsumer
+reclaimPolicy: Delete
+
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: local-path-config
+  namespace: local-path-storage
+data:
+  config.json: |-
+    {
+            "nodePathMap":[
+            {
+                    "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
+                    "paths":["/opt/local-path-provisioner"]
+            }
+            ]
+    }
+  setup: |-
+    #!/bin/sh
+    while getopts "m:s:p:" opt
+    do
+        case $opt in
+            p)
+            absolutePath=$OPTARG
+            ;;
+            s)
+            sizeInBytes=$OPTARG
+            ;;
+            m)
+            volMode=$OPTARG
+            ;;
+        esac
+    done
+
+    mkdir -m 0777 -p ${absolutePath}
+  teardown: |-
+    #!/bin/sh
+    while getopts "m:s:p:" opt
+    do
+        case $opt in
+            p)
+            absolutePath=$OPTARG
+            ;;
+            s)
+            sizeInBytes=$OPTARG
+            ;;
+            m)
+            volMode=$OPTARG
+            ;;
+        esac
+    done
+
+    rm -rf ${absolutePath}
+  helperPod.yaml: |-
+    apiVersion: v1
+    kind: Pod
+    metadata:
+      name: helper-pod
+    spec:
+      containers:
+      - name: helper-pod
+        image: busybox
+        imagePullPolicy: IfNotPresent
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: local-path-config
+  namespace: local-path-storage
+data:
+  config.json: |-
+    {
+            "nodePathMap":[
+            {
+                    "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
+                    "paths":["/mnt/disk"]
+            }
+            ]
+    }
+  setup: |-
+    #!/bin/sh
+    while getopts "m:s:p:" opt
+    do
+        case $opt in
+            p)
+            absolutePath=$OPTARG
+            ;;
+            s)
+            sizeInBytes=$OPTARG
+            ;;
+            m)
+            volMode=$OPTARG
+            ;;
+        esac
+    done
+
+    xfsPath=$(dirname "$absolutePath")
+    pvcName=$(basename "$absolutePath")
+    mkdir -p ${absolutePath}
+
+    # support xfs quota
+    type=`stat -f -c %T ${xfsPath}`
+    if [ ${type} == 'xfs' ]; then
+
+        echo "support xfs quota"
+
+        project=`cat /etc/projects | tail -n 1`
+        id=`echo ${project%:*}`
+
+        if [ ! ${project} ]; then
+            id=1
+        else
+            id=$[${id}+1]
+        fi
+
+        echo "${id}:${absolutePath}" >> /etc/projects
+        echo "${pvcName}:${id}" >> /etc/projid
+
+        xfs_quota -x -c "project -s ${pvcName}"
+        xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath}
+        xfs_quota -x -c "report -pbih" ${xfsPath}
+    fi
+
+  teardown: |-
+    #!/bin/sh
+    while getopts "m:s:p:" opt
+    do
+        case $opt in
+            p)
+            absolutePath=$OPTARG
+            ;;
+            s)
+            sizeInBytes=$OPTARG
+            ;;
+            m)
+            volMode=$OPTARG
+            ;;
+        esac
+    done
+
+    xfsPath=$(dirname "$absolutePath")
+    pvcName=$(basename "$absolutePath")
+
+    # support xfs quota
+    type=`stat -f -c %T ${xfsPath}`
+    if [ ${type} == 'xfs' ]; then
+
+        echo "support xfs quota"
+        xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath}
+    fi
+
+    rm -rf ${absolutePath}
+    if [ ${type} == 'xfs' ]; then
+        echo "$(sed "/${pvcName}/d" /etc/projects)" >  /etc/projects
+        echo "$(sed "/${pvcName}/d" /etc/projid)" >  /etc/projid
+        xfs_quota -x -c "report -pbih" ${xfsPath}
+    fi
+
+  helperPod.yaml: |-
+    apiVersion: v1
+    kind: Pod
+    metadata:
+      name: helper-pod
+    spec:
+      containers:
+      - name: helper-pod
+        image: xxx/storage-xfs-quota:v0.1
+        imagePullPolicy: Always
+        securityContext:
+          privileged: true
+        volumeMounts:
+        - name: xfs-quota-projects
+          subPath: projects
+          mountPath: /etc/projects
+        - name: xfs-quota-projects
+          subPath: projid
+          mountPath: /etc/projid
+      volumes:
+      - name: xfs-quota-projects
+        hostPath:
+          path: /etc
diff --git a/examples/quota/setup b/examples/quota/setup
index 958a308fb0179af2eb4c7bb19186701a325a7192..8abcb458acc3bda4b46b83d422f6a951479a0346 100755
--- a/examples/quota/setup
+++ b/examples/quota/setup
@@ -1,23 +1,9 @@
 #!/bin/sh
 
-while getopts "m:s:p:" opt
-do
-    case $opt in
-        p)
-        absolutePath=$OPTARG
-        ;;
-        s)
-        sizeInBytes=$OPTARG
-        ;;
-        m)
-        volMode=$OPTARG
-        ;;
-    esac
-done
-
-xfsPath=$(dirname "$absolutePath")
-pvcName=$(basename "$absolutePath")
-mkdir -p ${absolutePath}
+xfsPath=$(dirname "$VOL_DIR")
+pvcName=$(basename "$VOL_DIR")
+
+mkdir -p "$VOL_DIR"
 
 # support xfs quota
 type=`stat -f -c %T ${xfsPath}`
@@ -34,10 +20,10 @@ if [ ${type} == 'xfs' ]; then
         id=$[${id}+1]
     fi
 
-    echo "${id}:${absolutePath}" >> /etc/projects
+    echo "${id}:${VOL_DIR}" >> /etc/projects
     echo "${pvcName}:${id}" >> /etc/projid
 
     xfs_quota -x -c "project -s ${pvcName}"
-    xfs_quota -x -c "limit -p bhard=${sizeInBytes} ${pvcName}" ${xfsPath}
+    xfs_quota -x -c "limit -p bhard=${VOL_SIZE_BYTES} ${pvcName}" ${xfsPath}
     xfs_quota -x -c "report -pbih" ${xfsPath}
-fi
\ No newline at end of file
+fi
diff --git a/examples/quota/teardown b/examples/quota/teardown
index 3e9268dbb1b587b25a4108f5e259eefc982d035d..67dbad85026b296639f955ef4e4839ae4d3e31d0 100755
--- a/examples/quota/teardown
+++ b/examples/quota/teardown
@@ -1,22 +1,7 @@
 #!/bin/sh
 
-while getopts "m:s:p:" opt
-do
-    case $opt in
-        p)
-        absolutePath=$OPTARG
-        ;;
-        s)
-        sizeInBytes=$OPTARG
-        ;;
-        m)
-        volMode=$OPTARG
-        ;;
-    esac
-done
-
-xfsPath=$(dirname "$absolutePath")
-pvcName=$(basename "$absolutePath")
+xfsPath=$(dirname "$VOL_DIR")
+pvcName=$(basename "$VOL_DIR")
 
 # support xfs quota
 type=`stat -f -c %T ${xfsPath}`
@@ -26,10 +11,10 @@ if [ ${type} == 'xfs' ]; then
     xfs_quota -x -c "limit -p bhard=0 ${pvcName}" ${xfsPath}
 fi
 
-rm -rf ${absolutePath}
+rm -rf "$VOL_DIR"
+
 if [ ${type} == 'xfs' ]; then
     echo "$(sed "/${pvcName}/d" /etc/projects)" >  /etc/projects
     echo "$(sed "/${pvcName}/d" /etc/projid)" >  /etc/projid
     xfs_quota -x -c "report -pbih" ${xfsPath}
 fi
-
diff --git a/provisioner.go b/provisioner.go
index 20c2d4841ae7fb7e7f06838ea64eb950c379cf2a..114505c791497fedbc069d0b08bf26a6b79a3417 100644
--- a/provisioner.go
+++ b/provisioner.go
@@ -31,11 +31,21 @@ const (
 	KeyNode = "kubernetes.io/hostname"
 
 	NodeDefaultNonListedNodes = "DEFAULT_PATH_FOR_NON_LISTED_NODES"
+
+	helperScriptDir     = "/script"
+	helperDataVolName   = "data"
+	helperScriptVolName = "script"
+
+	envVolDir  = "VOL_DIR"
+	envVolMode = "VOL_MODE"
+	envVolSize = "VOL_SIZE_BYTES"
 )
 
-var (
-	CmdTimeoutCounts = 120
+const (
+	defaultCmdTimeoutSeconds = 120
+)
 
+var (
 	ConfigFileCheckInterval = 30 * time.Second
 
 	HelperPodNameMaxLength = 128
@@ -63,6 +73,8 @@ type NodePathMapData struct {
 
 type ConfigData struct {
 	NodePathMap []*NodePathMapData `json:"nodePathMap,omitempty"`
+
+	CmdTimeoutSeconds int `json:"cmdTimeoutSeconds,omitempty"`
 }
 
 type NodePathMap struct {
@@ -70,7 +82,8 @@ type NodePathMap struct {
 }
 
 type Config struct {
-	NodePathMap map[string]*NodePathMap
+	NodePathMap       map[string]*NodePathMap
+	CmdTimeoutSeconds int
 }
 
 func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset,
@@ -204,13 +217,14 @@ func (p *LocalPathProvisioner) Provision(opts pvController.ProvisionOptions) (*v
 	logrus.Infof("Creating volume %v at %v:%v", name, node.Name, path)
 
 	storage := pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
-	volMode := string(*pvc.Spec.VolumeMode)
-
-	createCmdsForPath := []string{
-		"/bin/sh",
-		"/script/setup",
-	}
-	if err := p.createHelperPod(ActionTypeCreate, createCmdsForPath, name, path, node.Name, volMode, storage.Value()); err != nil {
+	provisionCmd := []string{"/bin/sh", "/script/setup"}
+	if err := p.createHelperPod(ActionTypeCreate, provisionCmd, volumeOptions{
+		Name:        name,
+		Path:        path,
+		Mode:        *pvc.Spec.VolumeMode,
+		SizeInBytes: storage.Value(),
+		Node:        node.Name,
+	}); err != nil {
 		return nil, err
 	}
 
@@ -269,9 +283,14 @@ func (p *LocalPathProvisioner) Delete(pv *v1.PersistentVolume) (err error) {
 	if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {
 		logrus.Infof("Deleting volume %v at %v:%v", pv.Name, node, path)
 		storage := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
-		volMode := string(*pv.Spec.VolumeMode)
-		cleanupCmdsForPath := []string{"/bin/sh", "/script/teardown"}
-		if err := p.createHelperPod(ActionTypeDelete, cleanupCmdsForPath, pv.Name, path, node, volMode, storage.Value()); err != nil {
+		cleanupCmd := []string{"/bin/sh", "/script/teardown"}
+		if err := p.createHelperPod(ActionTypeDelete, cleanupCmd, volumeOptions{
+			Name:        pv.Name,
+			Path:        path,
+			Mode:        *pv.Spec.VolumeMode,
+			SizeInBytes: storage.Value(),
+			Node:        node,
+		}); err != nil {
 			logrus.Infof("clean up volume %v failed: %v", pv.Name, err)
 			return err
 		}
@@ -322,29 +341,30 @@ func (p *LocalPathProvisioner) getPathAndNodeForPV(pv *v1.PersistentVolume) (pat
 	return path, node, nil
 }
 
-func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []string, name, path, node, volumeMode string, sizeInBytes int64) (err error) {
+type volumeOptions struct {
+	Name        string
+	Path        string
+	Mode        v1.PersistentVolumeMode
+	SizeInBytes int64
+	Node        string
+}
+
+func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmd []string, o volumeOptions) (err error) {
 	defer func() {
-		err = errors.Wrapf(err, "failed to %v volume %v", action, name)
+		err = errors.Wrapf(err, "failed to %v volume %v", action, o.Name)
 	}()
-	if name == "" || path == "" || node == "" {
+	if o.Name == "" || o.Path == "" || o.Node == "" {
 		return fmt.Errorf("invalid empty name or path or node")
 	}
-	path, err = filepath.Abs(path)
-	if err != nil {
-		return err
-	}
-	path = strings.TrimSuffix(path, "/")
-	parentDir, volumeDir := filepath.Split(path)
-	parentDir = strings.TrimSuffix(parentDir, "/")
-	volumeDir = strings.TrimSuffix(volumeDir, "/")
-	if parentDir == "" || volumeDir == "" {
-		// it covers the `/` case
-		return fmt.Errorf("invalid path %v for %v: cannot find parent dir or volume dir", action, path)
+	if !filepath.IsAbs(o.Path) {
+		return fmt.Errorf("volume path %s is not absolute", o.Path)
 	}
+	o.Path = filepath.Clean(o.Path)
+	parentDir, volumeDir := filepath.Split(o.Path)
 	hostPathType := v1.HostPathDirectoryOrCreate
 	lpvVolumes := []v1.Volume{
 		{
-			Name: "data",
+			Name: helperDataVolName,
 			VolumeSource: v1.VolumeSource{
 				HostPath: &v1.HostPathVolumeSource{
 					Path: parentDir,
@@ -353,7 +373,7 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
 			},
 		},
 		{
-			Name: "script",
+			Name: helperScriptVolName,
 			VolumeSource: v1.VolumeSource{
 				ConfigMap: &v1.ConfigMapVolumeSource{
 					LocalObjectReference: v1.LocalObjectReference{
@@ -373,18 +393,6 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
 			},
 		},
 	}
-	lpvVolumeMounts := []v1.VolumeMount{
-		{
-			Name:      "data",
-			ReadOnly:  false,
-			MountPath: parentDir,
-		},
-		{
-			Name:      "script",
-			ReadOnly:  false,
-			MountPath: "/script",
-		},
-	}
 	lpvTolerations := []v1.Toleration{
 		{
 			Operator: v1.TolerationOpExists,
@@ -392,23 +400,39 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
 	}
 	helperPod := p.helperPod.DeepCopy()
 
+	scriptMount := addVolumeMount(&helperPod.Spec.Containers[0].VolumeMounts, helperScriptVolName, helperScriptDir)
+	scriptMount.MountPath = helperScriptDir
+	dataMount := addVolumeMount(&helperPod.Spec.Containers[0].VolumeMounts, helperDataVolName, parentDir)
+	parentDir = dataMount.MountPath
+	parentDir = strings.TrimSuffix(parentDir, string(filepath.Separator))
+	volumeDir = strings.TrimSuffix(volumeDir, string(filepath.Separator))
+	if parentDir == "" || volumeDir == "" || !filepath.IsAbs(parentDir) {
+		// it covers the `/` case
+		return fmt.Errorf("invalid path %v for %v: cannot find parent dir or volume dir or parent dir is relative", action, o.Path)
+	}
+	env := []v1.EnvVar{
+		{Name: envVolDir, Value: filepath.Join(parentDir, volumeDir)},
+		{Name: envVolMode, Value: string(o.Mode)},
+		{Name: envVolSize, Value: strconv.FormatInt(o.SizeInBytes, 10)},
+	}
+
 	// use different name for helper pods
 	// https://github.com/rancher/local-path-provisioner/issues/154
-	helperPod.Name = (helperPod.Name + "-" + string(action) + "-" + name)
+	helperPod.Name = (helperPod.Name + "-" + string(action) + "-" + o.Name)
 	if len(helperPod.Name) > HelperPodNameMaxLength {
 		helperPod.Name = helperPod.Name[:HelperPodNameMaxLength]
 	}
 	helperPod.Namespace = p.namespace
-	helperPod.Spec.NodeName = node
+	helperPod.Spec.NodeName = o.Node
 	helperPod.Spec.ServiceAccountName = p.serviceAccountName
 	helperPod.Spec.RestartPolicy = v1.RestartPolicyNever
 	helperPod.Spec.Tolerations = append(helperPod.Spec.Tolerations, lpvTolerations...)
 	helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, lpvVolumes...)
-	helperPod.Spec.Containers[0].VolumeMounts = append(helperPod.Spec.Containers[0].VolumeMounts, lpvVolumeMounts...)
-	helperPod.Spec.Containers[0].Command = cmdsForPath
+	helperPod.Spec.Containers[0].Command = cmd
+	helperPod.Spec.Containers[0].Env = append(helperPod.Spec.Containers[0].Env, env...)
 	helperPod.Spec.Containers[0].Args = []string{"-p", filepath.Join(parentDir, volumeDir),
-		"-s", strconv.FormatInt(sizeInBytes, 10),
-		"-m", volumeMode}
+		"-s", strconv.FormatInt(o.SizeInBytes, 10),
+		"-m", string(o.Mode)}
 
 	// If it already exists due to some previous errors, the pod will be cleaned up later automatically
 	// https://github.com/rancher/local-path-provisioner/issues/27
@@ -426,7 +450,7 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
 	}()
 
 	completed := false
-	for i := 0; i < CmdTimeoutCounts; i++ {
+	for i := 0; i < p.config.CmdTimeoutSeconds; i++ {
 		if pod, err := p.kubeClient.CoreV1().Pods(p.namespace).Get(helperPod.Name, metav1.GetOptions{}); err != nil {
 			return err
 		} else if pod.Status.Phase == v1.PodSucceeded {
@@ -436,13 +460,26 @@ func (p *LocalPathProvisioner) createHelperPod(action ActionType, cmdsForPath []
 		time.Sleep(1 * time.Second)
 	}
 	if !completed {
-		return fmt.Errorf("create process timeout after %v seconds", CmdTimeoutCounts)
+		return fmt.Errorf("create process timeout after %v seconds", p.config.CmdTimeoutSeconds)
 	}
 
-	logrus.Infof("Volume %v has been %vd on %v:%v", name, action, node, path)
+	logrus.Infof("Volume %v has been %vd on %v:%v", o.Name, action, o.Node, o.Path)
 	return nil
 }
 
+func addVolumeMount(mounts *[]v1.VolumeMount, name, mountPath string) *v1.VolumeMount {
+	for i, m := range *mounts {
+		if m.Name == name {
+			if m.MountPath == "" {
+				(*mounts)[i].MountPath = mountPath
+			}
+			return &(*mounts)[i]
+		}
+	}
+	*mounts = append(*mounts, v1.VolumeMount{Name: name, MountPath: mountPath})
+	return &(*mounts)[len(*mounts)-1]
+}
+
 func isJSONFile(configFile string) bool {
 	return strings.HasSuffix(configFile, ".json")
 }
@@ -506,5 +543,10 @@ func canonicalizeConfig(data *ConfigData) (cfg *Config, err error) {
 			npMap.Paths[path] = struct{}{}
 		}
 	}
+	if data.CmdTimeoutSeconds > 0 {
+		cfg.CmdTimeoutSeconds = data.CmdTimeoutSeconds
+	} else {
+		cfg.CmdTimeoutSeconds = defaultCmdTimeoutSeconds
+	}
 	return cfg, nil
 }
diff --git a/util.go b/util.go
index 5849dab1c27fa74abfa909b3f749ac7474bd9172..0a0b5415d1547a3a4060846dae8fbeb324a38d45 100644
--- a/util.go
+++ b/util.go
@@ -33,5 +33,8 @@ func loadHelperPodFile(helperPodYaml string) (*v1.Pod, error) {
 	if err != nil {
 		return nil, fmt.Errorf("invalid unmarshal the helper pod with helperPodJson: %v", string(helperPodJSON))
 	}
+	if len(p.Spec.Containers) == 0 {
+		return nil, fmt.Errorf("helper pod template does not specify any container")
+	}
 	return &p, nil
 }