kubectl apply -f - <<EOF
apiVersion: v1
data:
dataType: integer
defaultValue: "1"
descriptionEn: Number of GPU jobs for resource quota. When create workload, declare how many physical GPUs needs and the requests of gpu core and gpu memory are the usage of per physical GPU
descriptionZh: 资源配额代表 GPU 任务数。创建负载时代表申请的物理 gpu 个数, 申请的算力和显存都是每个物理 GPU 的使用量
group: hami-nvidia
groupI18n: '{"zh": "HAMi NVIDIA", "en": "HAMi NVIDIA"}'
key: nvidia.com/gpualloc
labelEn: gpu number
labelZh: gpu 个数
limits: optional
requests: disabled
resourceUnit: "count"
relatedResources: "nvidia.com/gpucores,nvidia.com/gpumem"
excludeResources: "nvidia.com/mps-core,nvidia.com/mps-memory,tencent.com/vcuda-core,tencent.com/vcuda-memory"
runtimeClassName: ""
kind: ConfigMap
metadata:
labels:
features.cpaas.io/enabled: "true"
features.cpaas.io/group: hami-nvidia
features.cpaas.io/type: CustomResourceLimitation
name: cf-crl-hami-nvidia-gpualloc
namespace: kube-public
---
apiVersion: v1
data:
dataType: integer
defaultValue: "20"
descriptionEn: vgpu cores, 100 cores represents the all computing power of a physical GPU
descriptionZh: vgpu 算力, 100 算力代表一个物理 GPU 的全部算力
group: hami-nvidia
groupI18n: '{"zh": "HAMi NVIDIA", "en": "HAMi NVIDIA"}'
key: nvidia.com/gpucores
prefix: limits
labelEn: vgpu cores
labelZh: vgpu 算力
limits: optional
requests: disabled
relatedResources: "nvidia.com/gpualloc,nvidia.com/gpumem"
excludeResources: "nvidia.com/mps-core,nvidia.com/mps-memory,tencent.com/vcuda-core,tencent.com/vcuda-memory"
runtimeClassName: ""
ignoreNodeCheck: "true"
kind: ConfigMap
metadata:
labels:
features.cpaas.io/enabled: "true"
features.cpaas.io/group: hami-nvidia
features.cpaas.io/type: CustomResourceLimitation
name: cf-crl-hami-nvidia-gpucores
namespace: kube-public
---
apiVersion: v1
data:
dataType: integer
defaultValue: "4000"
group: hami-nvidia
groupI18n: '{"zh": "HAMi NVIDIA", "en": "HAMi NVIDIA"}'
key: nvidia.com/gpumem
prefix: limits
labelEn: vgpu memory
labelZh: vgpu 显存
limits: optional
requests: disabled
resourceUnit: "Mi"
relatedResources: "nvidia.com/gpualloc,nvidia.com/gpucores"
excludeResources: "nvidia.com/mps-core,nvidia.com/mps-memory,tencent.com/vcuda-core,tencent.com/vcuda-memory"
runtimeClassName: ""
ignoreNodeCheck: "true"
kind: ConfigMap
metadata:
labels:
features.cpaas.io/enabled: "true"
features.cpaas.io/group: hami-nvidia
features.cpaas.io/type: CustomResourceLimitation
name: cf-crl-hami-nvidia-gpumem
namespace: kube-public
---
apiVersion: v1
kind: ConfigMap
metadata:
name: cf-crl-hami-config
namespace: kube-public
labels:
device-plugin.cpaas.io/config: "true"
data:
deviceName: "HAMi"
nodeLabelKey: "gpu"
nodeLabelValue: "on"
EOF