pull
docker pull nvidia/cuda:11.2.0-cudnn8-devel-ubi7docker tag nvidia/cuda:11.2.0-cudnn8-devel-ubi7 registry.cn-hangzhou.aliyuncs.com/mkmk/all:nvidia-cuda-11.2.0-cudnn8-devel-ubi7docker push registry.cn-hangzhou.aliyuncs.com/mkmk/all:nvidia-cuda-11.2.0-cudnn8-devel-ubi7
gpu tf
docker run --gpus all -it tensorflow/tensorflow:latest-gpu bashdocker pull tensorflow/tensorflow:2.4.1-gpudocker pull tensorflow/tensorflow:2.4.1-gpu-jupyterdocker tag tensorflow/tensorflow:2.4.1-gpu registry.cn-hangzhou.aliyuncs.com/mkmk/all:tf-2.4.1-gpudocker tag tensorflow/tensorflow:2.4.1-gpu-jupyter registry.cn-hangzhou.aliyuncs.com/mkmk/all:tf-2.4.1-gpu-jupyterdocker push registry.cn-hangzhou.aliyuncs.com/mkmk/all:tf-2.4.1-gpudocker push registry.cn-hangzhou.aliyuncs.com/mkmk/all:tf-2.4.1-gpu-jupyter
应用镜像
register_url='192.168.170.100:5000'docker stop gpu-jupyter1 && docker rm gpu-jupyter1 docker run -it --gpus=all --name gpu-jupyter1 -p 8888:8888 ${register_url}/tensorflow/tensorflow:2.4.1-gpu-jupyter # 将输入的 ip 批改为 实在 ip# 残缺 命令 调试 应用docker run -d --gpus=all --name gpu-jupyter1 -p 8888:8888 ${register_url}/tensorflow/tensorflow:2.4.1-gpu-jupyter bash -c "source /etc/bash.bashrc && jupyter notebook --notebook-dir=/tf --ip 0.0.0.0 --no-browser --allow-root"
测试 是否 应用的 gpu
import tensorflow as tftf.test.is_gpu_available( cuda_only=False, min_cuda_compute_capability=None) # 简化一点print("is_gpu: ", tf.test.is_gpu_available())# 查看 所有的 可用设施from tensorflow.python.client import device_libprint(device_lib.list_local_devices())# 加法 # cpu%%timewith tf.device("/device:CPU:0"): a=tf.zeros([1000,1000]) print("a on gpu:",a.device.endswith('GPU:0')) for i in range(10000): b=tf.add(a,a)a on gpu: FalseCPU times: user 7.74 s, sys: 1.2 s, total: 8.94 sWall time: 3.39 s# gpu%%timewith tf.device("/device:GPU:0"): a=tf.zeros([1000,1000]) print("a on gpu:",a.device.endswith('GPU:0')) for i in range(10000): b=tf.add(a,a)a on gpu: TrueCPU times: user 900 ms, sys: 1.22 s, total: 2.12 sWall time: 2.12 s
来来来聊一晚