diff --git a/frameworks/onnxruntime/1.24.1-cpu/Dockerfile b/frameworks/onnxruntime/1.24.1-cpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..26ba70c225e19d82722967fdab901b2921325dc9 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.1 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.1 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.1 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.1-cpu/README.md b/frameworks/onnxruntime/1.24.1-cpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d877a1dc5c204dcf16dfad5eb433c47f5bf9e9c6 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.1 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.1 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.1 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.1 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.1 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.1 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.1 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.1-cpu/build.conf b/frameworks/onnxruntime/1.24.1-cpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..b6b502815e345237c8c2c0cdddd2d9ff55ef352c --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.1 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.1 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.1-cpu/test.sh b/frameworks/onnxruntime/1.24.1-cpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..16a261c3a67ad50d4e55887d70e0ca6ba71411d5 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.1}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.1 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.1" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.1, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.1-gpu/Dockerfile b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..34f02faaef9ea618f053e0721b0856b3ef331f3f --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/Dockerfile @@ -0,0 +1,35 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.1 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.1 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.1 \ + onnx==1.20.1 \ + numpy \ + protobuf + +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.1-gpu/README.md b/frameworks/onnxruntime/1.24.1-gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..40b3aad01ab252717dda2a51b109f3de307108b7 --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.1 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.1 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.1 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.1 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.1 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.1 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.1 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.1-gpu/build.conf b/frameworks/onnxruntime/1.24.1-gpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..0a257d69a82879ac25044457b93a6777712ea40e --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.1 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.1 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.1-gpu/test.sh b/frameworks/onnxruntime/1.24.1-gpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..e49ac5e65b5084ac99048fe5d1ab022a48dbcc6c --- /dev/null +++ b/frameworks/onnxruntime/1.24.1-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.1}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.1 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.1" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.1, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.2-cpu/Dockerfile b/frameworks/onnxruntime/1.24.2-cpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..9dbf76bfc287ce6258adeb74cbbda250e35168ec --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.2 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.2 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.2 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.2-cpu/README.md b/frameworks/onnxruntime/1.24.2-cpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4c486c0ea5f3195b25230d8739d791a6959d207a --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.2 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.2 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.2 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.2 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.2 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.2 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.2 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.2-cpu/build.conf b/frameworks/onnxruntime/1.24.2-cpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..c03bb1c9a27f23a7a5f7ca430c9d61b62734abf0 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.2 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.2 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.2-cpu/test.sh b/frameworks/onnxruntime/1.24.2-cpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..747920b7056e17a03fcadaaa246f2f6cf43436e6 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.2}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.2 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.2" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.2, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.2-gpu/Dockerfile b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..a3e220bffbba237540258963781137c5175ada1b --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/Dockerfile @@ -0,0 +1,35 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.2 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.2 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.2 \ + onnx==1.20.1 \ + numpy \ + protobuf + +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.2-gpu/README.md b/frameworks/onnxruntime/1.24.2-gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1abbddd51a997d628f0166ce01559ad4a68e4279 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.2 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.2 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.2 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.2 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.2 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.2 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.2 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.2-gpu/build.conf b/frameworks/onnxruntime/1.24.2-gpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..44e96cb5e364d4b640131cce869724821d5e8984 --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.2 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.2 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.2-gpu/test.sh b/frameworks/onnxruntime/1.24.2-gpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..5bb7e83004b2e3d1cfef957386a51f80231e4ede --- /dev/null +++ b/frameworks/onnxruntime/1.24.2-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.2}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.2 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.2" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.2, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.3-cpu/Dockerfile b/frameworks/onnxruntime/1.24.3-cpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4a0c7db76e95820a4d775eb4cc0fc0489144686d --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.3 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.3 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.3 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.3-cpu/README.md b/frameworks/onnxruntime/1.24.3-cpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e3846a2f3b19074ceadb4512351de9a520636f9e --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.3 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.3 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.3 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.3 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.3 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.3 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.3 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.3-cpu/build.conf b/frameworks/onnxruntime/1.24.3-cpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..511886bd00296c63a3986c3e26873704b6fec133 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.3 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.3 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.3-cpu/test.sh b/frameworks/onnxruntime/1.24.3-cpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..f3a072b871576a57022ae99c63a49475594f0b5a --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.3}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.3 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.3" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.3, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.3-gpu/Dockerfile b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2da1827044eea029531626e9e611427a2450509b --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/Dockerfile @@ -0,0 +1,35 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.3 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.3 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.3 \ + onnx==1.20.1 \ + numpy \ + protobuf + +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.3-gpu/README.md b/frameworks/onnxruntime/1.24.3-gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..13a058a418c295ac7ef8a9067545eb18395fa68d --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.3 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.3 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.3 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.3 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.3 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.3 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.3 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.3-gpu/build.conf b/frameworks/onnxruntime/1.24.3-gpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..dd6b2d99ba57321d0271153d780ca566e3567ef2 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.3 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.3 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.3-gpu/test.sh b/frameworks/onnxruntime/1.24.3-gpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d266487ed29a703b3aefadc1ada826c97b50a76 --- /dev/null +++ b/frameworks/onnxruntime/1.24.3-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.3}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.3 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.3" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.3, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.4-cpu/Dockerfile b/frameworks/onnxruntime/1.24.4-cpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5b8b9851bc1654ae364af32c3c2fc2059cd015c8 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/Dockerfile @@ -0,0 +1,28 @@ +FROM opencloudos/opencloudos9-minimal:latest + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.4 CPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime CPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.4 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime==1.24.4 \ + onnx==1.20.1 \ + numpy \ + protobuf + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.4-cpu/README.md b/frameworks/onnxruntime/1.24.4-cpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b1495f1fa537490b86a7841b6799840d0ea55d89 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/README.md @@ -0,0 +1,82 @@ +# ONNX Runtime 1.24.4 CPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-minimal:latest +- **Python 版本**: 3.11 +- **ONNX Runtime 版本**: 1.24.4 (CPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-cpu:1.24.4 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-cpu:1.24.4 +``` + +测试项包括: +- Python 环境 +- ONNX Runtime 版本验证 +- CPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-cpu:1.24.4 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm oc9-onnxruntime-cpu:1.24.4 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### CPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CPU Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it oc9-onnxruntime-cpu:1.24.4 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **Docker**: 19.03+ + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CPU Execution Provider](https://onnxruntime.ai/docs/execution-providers/CPU-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.4-cpu/build.conf b/frameworks/onnxruntime/1.24.4-cpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..f86e19148783ec8d03e9d9754281567e4768488a --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.4 CPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-cpu +IMAGE_TAG=1.24.4 +GPU_TEST=false diff --git a/frameworks/onnxruntime/1.24.4-cpu/test.sh b/frameworks/onnxruntime/1.24.4-cpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..aa82543eb0aafbb32af6c5c9814167110258962f --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-cpu/test.sh @@ -0,0 +1,217 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-cpu:1.24.4}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.4 CPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 环境 +echo -n "测试 1: Python 环境... " +docker run --rm "$IMAGE" python3.11 --version > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 --version + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.4" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.4, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: CPU 推理测试 +echo -n "测试 5: CPU 推理测试... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 创建简单的 ONNX 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +# 使用 CPU Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 +fi + +# 测试 6: ONNX ML 域支持 +echo -n "测试 6: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 7: ONNX IR 版本兼容性 +echo -n "测试 7: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "==========================================" diff --git a/frameworks/onnxruntime/1.24.4-gpu/Dockerfile b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8cfcece29a0336dd9252622a9d9788ab1e0c5349 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/Dockerfile @@ -0,0 +1,35 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="Anstarc " +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="ONNX Runtime 1.24.4 GPU on OpenCloudOS 9" + +# 安装系统依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + git \ + wget \ + && dnf clean all \ + && rm -rf /var/cache/yum/* + +# 设置工作目录 +WORKDIR /app + +# 安装 ONNX Runtime GPU 版本及依赖 +# 注意:onnx 需要 1.20 以兼容 onnxruntime 1.24.4 (opset 25) +RUN pip3.11 install --no-cache-dir \ + onnxruntime-gpu==1.24.4 \ + onnx==1.20.1 \ + numpy \ + protobuf + +RUN pip3.11 install --no-cache-dir \ + torch torchvision --index-url https://download.pytorch.org/whl/cu128 + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all +ENV CUDA_MODULE_LOADING=LAZY + +# 默认命令 +CMD ["python3.11", "-c", "import onnxruntime; print(f'ONNX Runtime {onnxruntime.__version__} ready')"] diff --git a/frameworks/onnxruntime/1.24.4-gpu/README.md b/frameworks/onnxruntime/1.24.4-gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fa03bedc5667b117c4392735e31a623d235866c3 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/README.md @@ -0,0 +1,85 @@ +# ONNX Runtime 1.24.4 GPU on OpenCloudOS 9 + +ONNX Runtime 是微软开源的跨平台机器学习推理加速器,支持多种深度学习框架(PyTorch、TensorFlow 等)训练的模型。 + +## 基本信息 + +- **基础镜像**: opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**: 3.11 +- **CUDA 版本**: 12.8 +- **ONNX Runtime 版本**: 1.24.4 (GPU) +- **ONNX 版本**: 1.20.1 (兼容 opset 25) + +## 构建 + +```bash +docker build -t oc9-onnxruntime-gpu:1.24.4 . +``` + +## 测试 + +```bash +./test.sh oc9-onnxruntime-gpu:1.24.4 +``` + +测试项包括: +- Python 和 CUDA 环境 +- ONNX Runtime 版本验证 +- GPU 推理功能 +- ONNX ML 域支持 (ai.onnx.ml) +- ONNX IR 版本兼容性 (v3-v10) + +## 使用示例 + +### 基本使用 + +```bash +# 查看版本信息 +docker run --rm oc9-onnxruntime-gpu:1.24.4 python3.11 -c "import onnxruntime; print(onnxruntime.__version__)" + +# 查看可用的 Execution Providers +docker run --rm --gpus all oc9-onnxruntime-gpu:1.24.4 python3.11 -c "import onnxruntime as ort; print(ort.get_available_providers())" +``` + +### GPU 推理 + +```python +import onnxruntime as ort +import numpy as np + +# 加载模型(使用 CUDA Provider) +session = ort.InferenceSession( + "model.onnx", + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] +) + +# 准备输入数据 +input_name = session.get_inputs()[0].name +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) + +# 运行推理 +outputs = session.run(None, {input_name: input_data}) +``` + +### 交互式使用 + +```bash +docker run --rm -it --gpus all oc9-onnxruntime-gpu:1.24.4 bash +``` + +## 支持的 Opset 和 IR 版本 + +- **ai.onnx**: opset 1-25 (标准推理算子,支持 GPU) +- **ai.onnx.ml**: opset 1-5 (传统机器学习算子,CPU only) +- **ONNX IR**: v3-v10 (2017-2024,完全向后兼容) + +## 系统要求 + +- **GPU**: NVIDIA GPU with CUDA 12.x support +- **显存**: 建议 4GB+ +- **Docker**: 19.03+ with nvidia-docker2 + +## 参考资源 + +- [ONNX Runtime 官方文档](https://onnxruntime.ai/docs/) +- [CUDA Execution Provider](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html) diff --git a/frameworks/onnxruntime/1.24.4-gpu/build.conf b/frameworks/onnxruntime/1.24.4-gpu/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..13e9c2fccb531411912721bfccdd0b2f1a1dcde5 --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/build.conf @@ -0,0 +1,4 @@ +# ONNX Runtime 1.24.4 GPU on OpenCloudOS 9 +IMAGE_NAME=oc9-onnxruntime-gpu +IMAGE_TAG=1.24.4 +GPU_TEST=true diff --git a/frameworks/onnxruntime/1.24.4-gpu/test.sh b/frameworks/onnxruntime/1.24.4-gpu/test.sh new file mode 100755 index 0000000000000000000000000000000000000000..de01cd1927e77dd023442f7ece638712f28b631f --- /dev/null +++ b/frameworks/onnxruntime/1.24.4-gpu/test.sh @@ -0,0 +1,243 @@ +#!/bin/bash +set -e + +IMAGE="${1:-oc9-onnxruntime-gpu:1.24.4}" + +echo "==========================================" +echo "测试 ONNX Runtime 1.24.4 GPU 镜像" +echo "镜像: $IMAGE" +echo "==========================================" + +# 测试 1: Python 和 CUDA 环境 +echo -n "测试 1: Python 和 CUDA 环境... " +docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" bash -c "python3.11 --version && /usr/local/cuda/bin/nvcc --version" + exit 1 +fi + +# 测试 2: ONNX Runtime 版本 +echo -n "测试 2: ONNX Runtime 版本... " +VERSION=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime; print(onnxruntime.__version__)") +if [ "$VERSION" = "1.24.4" ]; then + echo "✓ (版本: $VERSION)" +else + echo "✗ (期望: 1.24.4, 实际: $VERSION)" + exit 1 +fi + +# 测试 3: 核心依赖 +echo -n "测试 3: 核心依赖 (numpy, protobuf)... " +docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c "import numpy; import google.protobuf" + exit 1 +fi + +# 测试 4: 可用的 Execution Providers +echo -n "测试 4: 可用的 Execution Providers... " +PROVIDERS=$(docker run --rm "$IMAGE" python3.11 -c "import onnxruntime as ort; print(','.join(ort.get_available_providers()))") +echo "✓" +echo " 可用 Providers: $PROVIDERS" + +# 测试 5: GPU 环境检查 +echo -n "测试 5: GPU 环境检查... " +if command -v nvidia-smi &> /dev/null; then + GPU_COUNT=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + echo "✓ (检测到 $GPU_COUNT 个 GPU)" + + # 测试 6: CUDA Execution Provider + echo -n "测试 6: CUDA Execution Provider... " + HAS_CUDA=$(docker run --rm --gpus all "$IMAGE" python3.11 -c "import onnxruntime as ort; print('CUDAExecutionProvider' in ort.get_available_providers())") + if [ "$HAS_CUDA" = "True" ]; then + echo "✓" + else + echo "✗ (CUDA Provider 不可用)" + exit 1 + fi + + # 测试 7: 简单推理测试(使用 CUDA) + echo -n "测试 7: 简单推理测试 (CUDA)... " + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np + +# 创建简单的 ONNX 模型(identity 操作) +from onnxruntime import InferenceSession +import onnx +from onnx import helper, TensorProto + +# 创建一个简单的 identity 模型 +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) + +# 保存模型 +onnx.save(model, '/tmp/test.onnx') + +# 使用 CUDA Provider 进行推理 +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) + +print('推理成功') +" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "✓" + else + echo "✗" + docker run --rm --gpus all "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3, 224, 224]) +Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3, 224, 224]) +node = helper.make_node('Identity', ['X'], ['Y']) +graph = helper.make_graph([node], 'test', [X], [Y]) +model = helper.make_model(graph) +onnx.save(model, '/tmp/test.onnx') + +sess = ort.InferenceSession('/tmp/test.onnx', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) +input_data = np.random.randn(1, 3, 224, 224).astype(np.float32) +output = sess.run(None, {'X': input_data}) +print('推理成功') +" + exit 1 + fi +else + echo "⊘ (未检测到 GPU,跳过 GPU 测试)" + echo "测试 6-7: 跳过 (需要 GPU 环境)" +fi + +# 测试 8: ONNX ML 域支持 +echo -n "测试 8: ONNX ML 域 (ai.onnx.ml)... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试 LabelEncoder +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +assert list(output[0]) == [0, 1], 'ML domain test failed' +print('ML domain OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +X = helper.make_tensor_value_info('X', TensorProto.STRING, [None]) +Y = helper.make_tensor_value_info('Y', TensorProto.INT64, [None]) +node = helper.make_node( + 'LabelEncoder', ['X'], ['Y'], + domain='ai.onnx.ml', + keys_strings=['cat', 'dog'], + values_int64s=[0, 1], + default_int64=999 +) +graph = helper.make_graph([node], 'test_ml', [X], [Y]) +model = helper.make_model(graph, opset_imports=[ + helper.make_opsetid('', 21), + helper.make_opsetid('ai.onnx.ml', 3) +]) +onnx.save(model, '/tmp/test_ml.onnx') + +sess = ort.InferenceSession('/tmp/test_ml.onnx', providers=['CPUExecutionProvider']) +input_data = np.array(['cat', 'dog'], dtype=object) +output = sess.run(None, {'X': input_data}) +print('Output:', output[0]) +" + exit 1 +fi + +# 测试 9: ONNX IR 版本兼容性 +echo -n "测试 9: ONNX IR 版本兼容性... " +docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +# 测试多个 IR 版本 +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + +print('IR versions OK') +" > /dev/null 2>&1 +if [ $? -eq 0 ]; then + echo "✓ (测试 IR v3, v5, v7, v9)" +else + echo "✗" + docker run --rm "$IMAGE" python3.11 -c " +import onnxruntime as ort +import numpy as np +from onnx import helper, TensorProto +import onnx + +test_cases = [(3, 7), (5, 11), (7, 15), (9, 19)] +for ir_ver, opset_ver in test_cases: + X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 3]) + Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 3]) + node = helper.make_node('Relu', ['X'], ['Y']) + graph = helper.make_graph([node], f'test_ir{ir_ver}', [X], [Y]) + model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', opset_ver)]) + model.ir_version = ir_ver + onnx.save(model, f'/tmp/test_ir{ir_ver}.onnx') + + sess = ort.InferenceSession(f'/tmp/test_ir{ir_ver}.onnx', providers=['CPUExecutionProvider']) + input_data = np.random.randn(1, 3).astype(np.float32) + output = sess.run(None, {'X': input_data}) + print(f'IR v{ir_ver} OK') +" + exit 1 +fi + +echo "" +echo "==========================================" +echo "所有测试通过!" +echo "=========================================="