diff --git a/frameworks/peft/0.18.1/Dockerfile b/frameworks/peft/0.18.1/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..da58ae7de9a678e6220b8787e8cd4eccfac3701a --- /dev/null +++ b/frameworks/peft/0.18.1/Dockerfile @@ -0,0 +1,30 @@ +FROM opencloudos/opencloudos9-cuda-devel:12.8 + +LABEL maintainer="OpenCloudOS Community" +LABEL org.opencontainers.image.source="https://gitee.com/OpenCloudOS/ai-agent-container" +LABEL org.opencontainers.image.description="PEFT (GPU) on OpenCloudOS 9" + +# 安装 Python 3.11 及依赖 +RUN dnf install -y \ + python3.11 \ + python3.11-pip \ + && dnf clean all \ + && rm -rf /var/cache/yum/* \ + && ln -sf /usr/bin/python3.11 /usr/bin/python3 + +# 安装 PEFT 及常用的 GPU 依赖 +RUN pip3.11 install --no-cache-dir uv \ + && uv pip install --no-cache-dir \ + peft==0.18.1 \ + torch \ + transformers \ + accelerate \ + safetensors \ + --system + +# 设置 GPU 环境变量 +ENV NVIDIA_VISIBLE_DEVICES=all + +RUN echo $(date +"%Y-%m-%dT%H:%M:%S%z") > /opencloudos_build_date.txt + +CMD ["python3.11"] \ No newline at end of file diff --git a/frameworks/peft/0.18.1/README.md b/frameworks/peft/0.18.1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e27d6f7ed6b117832c31962e7c6e6d580eaa3ffa --- /dev/null +++ b/frameworks/peft/0.18.1/README.md @@ -0,0 +1,60 @@ +# PEFT on OpenCloudOS 9 + +## 基本信息 +- **框架版本**:v0.18.1 +- **基础镜像**:opencloudos/opencloudos9-cuda-devel:12.8 +- **Python 版本**:3.11 +- **CUDA 版本**:12.8 + +## 简介 + +PEFT(Parameter-Efficient Fine-Tuning)是 HuggingFace 开发的参数高效微调库,支持 LoRA、QLoRA、Prefix Tuning、P-Tuning 等多种微调方法,可在消费级 GPU 上微调大语言模型。 + +## 构建 + +```bash +docker build -t oc9-peft:0.18.1 . +``` + +## 使用示例 + +```bash +# 验证 peft 版本 +docker run --rm --gpus all oc9-peft:0.18.1 python3 -c "import peft; print(peft.__version__)" + +# 验证 CUDA 可用性 +docker run --rm --gpus all oc9-peft:0.18.1 python3 -c "import torch; print(torch.cuda.is_available())" + +# LoRA 微调示例(需要联网下载模型) +docker run --rm --gpus all oc9-peft:0.18.1 python3 -c " +from peft import LoraConfig, get_peft_model, TaskType +from transformers import AutoModelForCausalLM, AutoTokenizer + +model = AutoModelForCausalLM.from_pretrained('gpt2') +lora_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + r=8, + lora_alpha=32, + lora_dropout=0.1, + target_modules=['c_attn', 'c_proj'] +) +model = get_peft_model(model, lora_config) +model.print_trainable_parameters() +" +``` + +## 测试验证 + +```bash +# 执行测试脚本(需要 GPU 环境) +bash test.sh oc9-peft:0.18.1 +``` + +测试脚本会验证以下内容: +- CUDA 环境是否可用 +- peft 是否正常导入 +- 核心组件(LoraConfig、get_peft_model、TaskType)是否可加载 + +## 已知问题 +- 首次运行时需要联网下载模型权重,建议提前挂载本地模型缓存目录以加速启动。 +- QLoRA 微调需要安装 `bitsandbytes`,如需使用请额外安装。 \ No newline at end of file diff --git a/frameworks/peft/0.18.1/build.conf b/frameworks/peft/0.18.1/build.conf new file mode 100644 index 0000000000000000000000000000000000000000..8ddb92079a2d6d0e084931281f1bea72c7d6c5b0 --- /dev/null +++ b/frameworks/peft/0.18.1/build.conf @@ -0,0 +1,4 @@ +# peft 0.18.1 on OpenCloudOS 9 (GPU) +IMAGE_NAME=oc9-peft +IMAGE_TAG=0.18.1 +GPU_TEST=true \ No newline at end of file diff --git a/frameworks/peft/0.18.1/test.sh b/frameworks/peft/0.18.1/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..315d8b8002e6500866b6a5a432152efba089402f --- /dev/null +++ b/frameworks/peft/0.18.1/test.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -e + +IMAGE="${1:?ERROR: 缺少镜像参数。用法: bash test.sh }" + +echo "=== GPU 镜像功能测试 ===" + +# 1. 验证 CUDA 环境 +echo -n "检查 CUDA... " +docker run --rm --gpus all "$IMAGE" python3 -c " +import torch +assert torch.cuda.is_available(), 'CUDA not available' +print(f'GPU: {torch.cuda.get_device_name(0)}') +" && echo "✓ 通过" || { echo "✗ 失败"; exit 1; } + +# 2. 验证 peft 导入 +echo -n "检查 peft import... " +docker run --rm --gpus all "$IMAGE" python3 -c " +import peft +print(peft.__version__) +" && echo "✓ 通过" || { echo "✗ 失败"; exit 1; } + +# 3. 验证核心组件可用 +echo -n "检查 peft 核心组件... " +docker run --rm --gpus all "$IMAGE" python3 -c " +from peft import LoraConfig, get_peft_model, TaskType +import torch +from transformers import AutoModelForCausalLM + +config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + r=8, + lora_alpha=32, + lora_dropout=0.1 +) +print(f'LoraConfig 创建成功: r={config.r}, alpha={config.lora_alpha}') +" && echo "✓ 通过" || { echo "✗ 失败"; exit 1; } + +echo "=== 所有测试通过 ===" \ No newline at end of file