diff --git a/script/app-mlperf-inference-intel/_cm.yaml b/script/app-mlperf-inference-intel/_cm.yaml index 7e839578a4..4f0cdbacec 100644 --- a/script/app-mlperf-inference-intel/_cm.yaml +++ b/script/app-mlperf-inference-intel/_cm.yaml @@ -330,6 +330,121 @@ variations: - pip-package - optimum + sdxl: + group: model + env: + CM_BENCHMARK: STANDALONE_SDXL + CM_MODEL: stable-diffusion-xl + + sdxl,pytorch: + adr: + conda-package: + tags: _name.sdxl-pt + deps: + - tags: get,conda,_name.sdxl-pt + - tags: get,python,_conda.sdxl-pt + adr: + conda-python: + version: "3.9" + - names: + - conda-package + - mkl + tags: get,generic,conda-package,_package.mkl,_source.conda-forge + - names: + - conda-package + - mkl-include + tags: get,generic,conda-package,_package.mkl-include,_source.intel + - names: + - conda-package + - llvm-openmp + tags: get,generic,conda-package,_package.llvm-openmp,_source.conda-forge + - names: + - conda-package + - ncurses + tags: get,generic,conda-package,_package.ncurses,_source.conda-forge + - tags: get,generic-sys-util,_numactl + - tags: get,generic,conda-package,_package.jemalloc,_source.conda-forge + names: + - conda-package + - jemalloc + - tags: get,generic-python-lib,_package.torch,_path.https://download.pytorch.org/whl/nightly/cpu/torch-2.3.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + names: + - pip-package + - pip-torch + - tags: get,generic-python-lib,_package.torchvision,_path.https://download.pytorch.org/whl/nightly/cpu/torchvision-0.18.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl + names: + - pip-package + - pip-torchvision + - tags: get,generic-python-lib,_torch + names: + - pip-package + - torch + - tags: install,diffusers,from.src,_for-intel-mlperf-inference-v4.0-sdxl + names: + - diffusers-from-src + - tags: install,ipex,from.src,_for-intel-mlperf-inference-v4.0-sdxl + names: + - ipex-from-src + - tags: get,generic,conda-package,_package.ninja + names: + - conda-package + - ninja + - tags: get,mlcommons,inference,src + names: + - inference-src + - tags: get,mlcommons,inference,loadgen,_custom-python,_keep-build + names: + - inference-loadgen + + sdxl,build-harness: + deps: + - tags: get,generic-python-lib,_package.pybind11[global] + names: + - pip-package + - pybind11 + + sdxl,run-harness: + deps: + - tags: get,ml-model,sdxl,_fp32,_pytorch + - tags: get,dataset,coco2014,original,_validation + - tags: get,generic-python-lib,_package.opencv-python + names: + - pip-package + - opencv + - tags: get,generic-python-lib,_package.transformers + names: + - pip-package + - transformers + - tags: get,generic-python-lib,_package.accelerate + names: + - pip-package + - accelerate + - tags: get,generic-python-lib,_package.open-clip-torch + names: + - pip-package + - open-clip-torch + - tags: get,generic-python-lib,_package.pycocotools + names: + - pip-package + - pycocotools + - tags: get,generic-python-lib,_package.torchmetrics[image] + names: + - pip-package + - torchmetrics + - tags: get,generic-python-lib,_torchvision + version: "0.17.1" + names: + - pip-package + - torchvision + - tags: get,generic-python-lib,_package.py-libnuma + names: + - pip-package + - libnuma + + + + + resnet50,pytorch: adr: conda-package: diff --git a/script/app-mlperf-inference-intel/build_sdxl_harness.sh b/script/app-mlperf-inference-intel/build_sdxl_harness.sh new file mode 100644 index 0000000000..a0817e4955 --- /dev/null +++ b/script/app-mlperf-inference-intel/build_sdxl_harness.sh @@ -0,0 +1,27 @@ +cd ${CM_HARNESS_CODE_ROOT} + +cd utils +cmd=" python -m pip install ." + +echo "$cmd" +eval "$cmd" +test "$?" -eq 0 || exit "$?" + +cd ../tools +wget https://raw.githubusercontent.com/mlcommons/inference/master/text_to_image/tools/coco.py +test "$?" -eq 0 || exit "$?" +cd .. + +mkdir -p coco2014/captions +wget -P coco2014/captions/ https://raw.githubusercontent.com/mlcommons/inference/master/text_to_image/coco2014/captions/captions_source.tsv +test "$?" -eq 0 || exit "$?" + +mkdir -p coco2014/latents +wget -P coco2014/latents/ https://github.com/mlcommons/inference/raw/master/text_to_image/tools/latents.pt +test "$?" -eq 0 || exit "$?" + +cd tools/ +bash download-coco-2014-calibration.sh --download-path ${PWD}/../coco2014/warmup_dataset --num-workers 1 +test "$?" -eq 0 || exit "$?" +cd .. + diff --git a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh index 233d5c857f..82aa6906cb 100644 --- a/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh +++ b/script/app-mlperf-inference-intel/calibrate_dlrm_v2_model.sh @@ -1,7 +1,19 @@ #!/bin/bash export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export DATA_DIR=/mnt/dlrm_data echo ${CM_HARNESS_CODE_ROOT} cd ${CM_HARNESS_CODE_ROOT} -numactl -m 1 python python/dump_torch_model.py --model-path=$MODEL_DIR --dataset-path=$DATA_DIR -exit 1 +python -m pip install scikit-learn==1.3.0 torchsnapshot torchrec==0.3.2 +test $? -eq 0 || exit $? +python -m pip install fbgemm-gpu==0.3.2 --index-url https://download.pytorch.org/whl/cpu +test $? -eq 0 || exit $? +python python/dump_torch_model.py --model-path=$MODEL_DIR --dataset-path=$DATA_DIR +test $? -eq 0 || exit $? + +python python/calibration.py \ + --max-batchsize=65536 \ + --model-path=${MODEL_DIR}/../dlrm-multihot-pytorch.pt \ + --dataset-path=/mnt/dlrm_data/ \ + --use-int8 --calibration +test $? -eq 0 || exit $? diff --git a/script/app-mlperf-inference-intel/customize.py b/script/app-mlperf-inference-intel/customize.py index 9a1c164745..cd3328eadc 100644 --- a/script/app-mlperf-inference-intel/customize.py +++ b/script/app-mlperf-inference-intel/customize.py @@ -31,6 +31,8 @@ def preprocess(i): if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == 'v4.0': if 'gptj' in ml_model: code_base_folder = "ITREX" + if 'dlrm-v2' in ml_model: + code_base_folder = "pytorch-cpu-int8" harness_root = os.path.join(env['CM_MLPERF_INFERENCE_RESULTS_PATH'], 'closed', 'Intel', 'code', ml_model, code_base_folder) @@ -91,6 +93,8 @@ def preprocess(i): i['run_script_input']['script_name'] = "build_bert_harness" env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "bert_inference") env['DATA_PATH'] = os.path.join(os.getcwd(), "harness", "bert") + elif "stable-diffusion" in env['CM_MODEL']: + i['run_script_input']['script_name'] = "build_sdxl_harness" elif "resnet50" in env['CM_MODEL']: i['run_script_input']['script_name'] = "build_resnet50_harness" env['CM_MLPERF_INFERENCE_INTEL_HARNESS_PATH'] = os.path.join(os.getcwd(), "harness", "build", "resnet50_inference") @@ -162,6 +166,14 @@ def preprocess(i): env['CM_RUN_DIR'] = env['CM_MLPERF_OUTPUT_DIR'] env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_3d-unet_harness.sh')} " + elif 'dlrm' in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = f"bash {os.path.join(i['run_script_input']['path'],'run_dlrm_v2_harness.sh')} " + + elif 'stable-diffusion' in env['CM_MODEL']: + env['CM_RUN_DIR'] = i['run_script_input']['path'] + env['CM_RUN_CMD'] = "bash run_sdxl_harness.sh " + ("--accuracy" if env['CM_MLPERF_LOADGEN_MODE'] == "accuracy" else "") + elif "gptj" in env['CM_MODEL']: env['CM_RUN_DIR'] = i['run_script_input']['path'] if env.get('CM_MLPERF_INFERENCE_CODE_VERSION', '') == "v3.1": diff --git a/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh new file mode 100644 index 0000000000..65530c621e --- /dev/null +++ b/script/app-mlperf-inference-intel/run_dlrm_v2_harness.sh @@ -0,0 +1,60 @@ +#!/bin/bash +export MODEL_DIR=${CM_ML_MODEL_FILE_WITH_PATH} +export DATA_DIR=/mnt/dlrm_data + + +NUM_SOCKETS=${CM_HOST_CPU_SOCKETS:-2} +export NUM_SOCKETS=$NUM_SOCKETS +export num_physical_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` +export CPUS_PER_SOCKET=$((num_physical_cores/NUM_SOCKETS)) +echo $CPUS_PER_SOCKET +export CPUS_PER_PROCESS=24 +#${CPUS_PER_SOCKET} +export CPUS_PER_INSTANCE=1 +export CPUS_FOR_LOADGEN=1 +export BATCH_SIZE=100 +export DNNL_MAX_CPU_ISA=AVX512_CORE_AMX + +export LD_PRELOAD=${CM_CONDA_LIB_PATH}/libiomp5.so + +export KMP_BLOCKTIME=1 +export OMP_NUM_THREADS=$CPUS_PER_INSTANCE +export KMP_AFFINITY="granularity=fine,compact,1,0" +export DNNL_PRIMITIVE_CACHE_CAPACITY=20971520 +export DLRM_DIR=$PWD/python/model +#export TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD=30469645312 + +mode="Offline" +extra_option="--samples-per-query-offline=204800" + +int8_cfg="--int8-configure-dir=int8_configure.json" +echo "Running $mode bs=$batch_size $dtype $test_type $DNNL_MAX_CPU_ISA" + +export CUDA_VISIBLE_DEVICES="" +extra_option=" $extra_option --use-int8" +export EXTRA_OPS="$extra_option" + +#export number_cores=`lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` + +model_path="$MODEL_DIR/dlrm-multihot-pytorch.pt" +profile=dlrm-multihot-pytorch +cd ${CM_HARNESS_CODE_ROOT} +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" + +if [[ "${CM_MLPERF_LOADGEN_MODE}" == "accuracy" ]]; then + accuracy_opt=" --accuracy" +else + accuracy_opt="" +fi + +USER_CONF="${CM_MLPERF_USER_CONF}" +cmd="python -u python/runner.py --profile $profile $common_opt --model dlrm --model-path $model_path \ +--config ${CM_MLPERF_CONF} --user-config ${CM_MLPERF_USER_CONF} \ +--dataset multihot-criteo --dataset-path $DATA_DIR --output $OUTPUT_DIR $EXTRA_OPS \ +--max-ind-range=40000000 --samples-to-aggregate-quantile-file=${PWD}/tools/dist_quantile.txt \ +--max-batchsize=$BATCH_SIZE --scenario=${CM_MLPERF_LOADGEN_SCENARIO} ${accuracy_opt}" + + +echo "$cmd" +#exit 1 +eval "$cmd" diff --git a/script/app-mlperf-inference-intel/run_sdxl_harness.sh b/script/app-mlperf-inference-intel/run_sdxl_harness.sh new file mode 100644 index 0000000000..3dd71ec83e --- /dev/null +++ b/script/app-mlperf-inference-intel/run_sdxl_harness.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +export KMP_BLOCKTIME=1 +export KMP_AFFINITY=granularity=fine,compact,1,0 +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libiomp5.so +# export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +# + +BATCH_SIZE=${CM_MLPERF_LOADGEN_BATCH_SIZE} + +export num_physical_cores=$(lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l) +num_numa=$(numactl --hardware|grep available|awk -F' ' '{ print $2 }') + + + +OUTPUT_DIR="${CM_MLPERF_OUTPUT_DIR}" +MODEL_PATH="${SDXL_CHECKPOINT_PATH}" +cd ${CM_HARNESS_CODE_ROOT} + +NUM_PROC=1 +CPUS_PER_PROC=16 +WORKERS_PER_PROC=1 +TOTAL_SAMPLE_COUNT=5000 +BATCH_SIZE=8 + +FD_MAX=$(ulimit -n -H) +ulimit -n $((FD_MAX - 1)) + +echo "Start time: $(date)" +cmd="python -u main.py \ + --dtype bfloat16 \ + --device 'cpu' \ + --scenario ${CM_MLPERF_LOADGEN_SCENARIO} \ + --mode ${LOADGEN_MODE} \ + --num-proc ${NUM_PROC} \ + --cpus-per-proc ${CPUS_PER_PROC} \ + --model-path ${MODEL_PATH} \ + --batch-size ${BATCH_SIZE} \ + --mlperf-conf ${CM_MLPERF_CONF} \ + --user-conf ${CM_MLPERF_USER_CONF} \ + --workers-per-proc ${WORKERS_PER_PROC} \ + --total-sample-count ${TOTAL_SAMPLE_COUNT} \ + --log-dir ${OUTPUT_DIR} " + +echo "$cmd" +eval "$cmd" +test $? -eq 0 || exit $? +echo "End time: $(date)" + diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml index d515c9092f..9040aeb8d5 100644 --- a/script/app-mlperf-inference-nvidia/_cm.yaml +++ b/script/app-mlperf-inference-nvidia/_cm.yaml @@ -423,6 +423,9 @@ variations: - tags: get,generic-python-lib,_package.onnxruntime names: - onnxruntime + - tags: get,generic-python-lib,_package.colored + names: + - colored - tags: get,generic-python-lib,_package.nvidia-ammo names: - nvidia-ammo @@ -430,6 +433,7 @@ variations: env: CM_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: "https://pypi.nvidia.com" CM_GENERIC_PYTHON_PIP_EXTRA: "--no-cache-dir" + CM_SDXL_ACCURACY_RUN_DEVICE: " gpu" - tags: get,generic-python-lib,_package.optimum names: - optimum @@ -1015,6 +1019,10 @@ variations: group: device-memory env: CM_NVIDIA_GPU_MEMORY: "80" + gpu_memory.#: + group: device-memory + env: + CM_NVIDIA_GPU_MEMORY: "#" singlestream,resnet50: env: diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index 4e8597b4c0..4fa21d9ba6 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -561,7 +561,6 @@ variations: CM_MLPERF_INFERENCE_TEST_QPS: "0.05" default_variations: precision: float16 - device: cuda add_deps_recursive: mlperf-inference-implementation: tags: _sdxl diff --git a/script/get-dataset-coco2014/customize.py b/script/get-dataset-coco2014/customize.py index c48f71616d..b6984bca10 100644 --- a/script/get-dataset-coco2014/customize.py +++ b/script/get-dataset-coco2014/customize.py @@ -18,7 +18,7 @@ def postprocess(i): env = i['env'] if env.get('CM_DATASET_CALIBRATION','') == "no": env['CM_DATASET_PATH_ROOT'] = os.path.join(os.getcwd(), 'install') - env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') + #env['CM_DATASET_PATH'] = os.path.join(os.getcwd(), 'install', 'validation', 'data') env['CM_DATASET_CAPTIONS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'captions') env['CM_DATASET_LATENTS_DIR_PATH'] = os.path.join(os.getcwd(), 'install', 'latents') else: diff --git a/script/get-dataset-openorca/_cm.json b/script/get-dataset-openorca/_cm.json index ad57e1e9c0..82d5f4dfdc 100644 --- a/script/get-dataset-openorca/_cm.json +++ b/script/get-dataset-openorca/_cm.json @@ -39,7 +39,6 @@ "uid": "9252c4d90d5940b7", "variations": { "60": { - "default": true, "env": { "CM_DATASET_SIZE": "60" }, @@ -58,6 +57,7 @@ "group": "dataset-type" }, "full": { + "default": true, "env": { "CM_DATASET_SIZE": "24576" }, diff --git a/script/get-docker/customize.py b/script/get-docker/customize.py index c4a99f1a23..d030a11432 100644 --- a/script/get-docker/customize.py +++ b/script/get-docker/customize.py @@ -34,7 +34,7 @@ def preprocess(i): return {'return':0} def detect_version(i): - r = i['automation'].parse_version({'match_text': r'Docker version\s*([\d.]+)', + r = i['automation'].parse_version({'match_text': r'[Docker|podman] version\s*([\d.]+)', 'group_number': 1, 'env_key':'CM_DOCKER_VERSION', 'which_env':i['env']}) diff --git a/script/get-generic-sys-util/_cm.json b/script/get-generic-sys-util/_cm.json index a427ce20fb..302810e787 100644 --- a/script/get-generic-sys-util/_cm.json +++ b/script/get-generic-sys-util/_cm.json @@ -128,6 +128,19 @@ } } }, + "libpng-dev": { + "env": { + "CM_SYS_UTIL_NAME": "libpng-dev" + }, + "state": { + "libpng-dev": { + "apt": "libpng-dev", + "dnf": "libpng-devel", + "yum": "libpng-devel", + "brew": "" + } + } + }, "screen": { "env": { "CM_SYS_UTIL_NAME": "screen" diff --git a/script/get-ml-model-gptj/_cm.json b/script/get-ml-model-gptj/_cm.json index d7852c4039..38629a793b 100644 --- a/script/get-ml-model-gptj/_cm.json +++ b/script/get-ml-model-gptj/_cm.json @@ -215,7 +215,7 @@ }, "deps": [ { - "tags": "get,git,repo,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604", + "tags": "get,git,repo,_lfs,_repo.https://github.com/NVIDIA/TensorRT-LLM.git,_sha.0ab9d17a59c284d2de36889832fe9fc7c8697604", "extra_cache_tags": "tensorrt-llm", "env": { "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_TENSORRT_LLM_CHECKOUT_PATH" diff --git a/script/get-mlperf-inference-results/_cm.json b/script/get-mlperf-inference-results/_cm.json index 34bae8885a..cf6e10b1b0 100644 --- a/script/get-mlperf-inference-results/_cm.json +++ b/script/get-mlperf-inference-results/_cm.json @@ -22,7 +22,7 @@ "env": { "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_MLPERF_INFERENCE_RESULTS_PATH" }, - "extra_cache_tags": "mlperf,inference,results" + "extra_cache_tags": "mlperf,inference,results,official" } ], "deps": [ diff --git a/script/get-preprocessed-dataset-criteo/_cm.json b/script/get-preprocessed-dataset-criteo/_cm.json index a5b4c4e1f0..6f12c1f9af 100644 --- a/script/get-preprocessed-dataset-criteo/_cm.json +++ b/script/get-preprocessed-dataset-criteo/_cm.json @@ -4,6 +4,9 @@ "automation_uid": "5b4e0237da074764", "category": "AI/ML datasets", "cache": true, + "docker": { + "real_run": false + }, "deps": [ { "names": [ diff --git a/script/install-diffusers-from-src/_cm.json b/script/install-diffusers-from-src/_cm.json new file mode 100644 index 0000000000..c83be299a2 --- /dev/null +++ b/script/install-diffusers-from-src/_cm.json @@ -0,0 +1,124 @@ +{ + "alias": "install-diffusers-from-src", + "automation_alias": "script", + "automation_uid": "5b4e0237da074764", + "cache": true, + "category": "Compiler automation", + "deps": [ + { + "tags": "detect,os" + }, + { + "tags": "detect,cpu" + }, + { + "names": [ + "python", + "python3" + ], + "skip_if_env": { + "CM_CONDA_ENV": [ + "yes" + ] + }, + "tags": "get,python3" + }, + { + "names": [ + "compiler" + ], + "tags": "get,compiler" + }, + { + "env": { + "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_DIFFUSERS_SRC_REPO_PATH" + }, + "extra_cache_tags": "diffusers,diffusers-src,src,diffusers-src,diffusers-src-repo", + "names": [ + "diffusers-src-repo" + ], + "tags": "get,git,repo", + "update_tags_from_env_with_prefix": { + "_branch.": [ + "CM_GIT_CHECKOUT" + ], + "_repo.": [ + "CM_GIT_URL" + ], + "_sha.": [ + "CM_GIT_CHECKOUT_SHA" + ], + "_tag.": [ + "CM_GIT_CHECKOUT_TAG" + ] + } + } + ], + "env": { + "CM_GIT_URL": "https://github.com/huggingface/diffusers.git" + }, + "name": "Build diffusers from sources", + "new_env_keys": [ + "CM_DIFFUSERS_*" + ], + "prehook_deps": [], + "sort": 1000, + "tags": [ + "install", + "get", + "src", + "from.src", + "diffusers", + "src-diffusers" + ], + "uid": "b2ddda995f63412f", + "variations": { + "branch.#": { + "env": { + "CM_GIT_CHECKOUT": "#" + } + }, + "for-intel-mlperf-inference-v4.0-sdxl": { + "base": [ + "tag.v0.25.1" + ], + "env": { + "CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH": "yes" + } + }, + "python.#": { + "env": { + "CM_PYTHON_BIN_WITH_PATH": "#" + } + }, + "repo.#": { + "env": { + "CM_GIT_URL": "#" + }, + "group": "repo" + }, + "repo.https://github.com/huggingface/diffusers": { + "default": true, + "env": { + "CM_GIT_URL": "https://github.com/huggingface/diffusers" + }, + "group": "repo" + }, + "sha.#": { + "env": { + "CM_GIT_CHECKOUT_SHA": "#" + } + }, + "tag.#": { + "ad": { + "diffusers-src-repo": { + "tags": "_no-recurse-submodules,_full-history" + } + }, + "env": { + "CM_GIT_CHECKOUT_TAG": "#" + } + } + }, + "versions": {} +} diff --git a/script/install-diffusers-from-src/customize.py b/script/install-diffusers-from-src/customize.py new file mode 100644 index 0000000000..0e1ca24f5c --- /dev/null +++ b/script/install-diffusers-from-src/customize.py @@ -0,0 +1,21 @@ +from cmind import utils +import os + +def preprocess(i): + + os_info = i['os_info'] + + if os_info['platform'] == 'windows': + return {'return':1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + + + automation = i['automation'] + + recursion_spaces = i['recursion_spaces'] + + return {'return':0} + +def postprocess(i): + return {'return':0} diff --git a/script/install-diffusers-from-src/run.sh b/script/install-diffusers-from-src/run.sh new file mode 100644 index 0000000000..8d5ca084a2 --- /dev/null +++ b/script/install-diffusers-from-src/run.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +CUR_DIR=$PWD +rm -rf diffusers +cp -r ${CM_DIFFUSERS_SRC_REPO_PATH} diffusers +test "${?}" -eq "0" || exit $? +cd diffusers +rm -rf build + +if [[ ${CM_INTEL_MLPERF_INFERENCE_v4_0_STABLE_DIFFUSION_PATCH} == "yes" ]]; then + wget -nc https://raw.githubusercontent.com/mlcommons/inference_results_v4.0/main/closed/Intel/code/stable-diffusion-xl/pytorch-cpu/diffusers.patch + test "${?}" -eq "0" || exit $? + git apply diffusers.patch + test "${?}" -eq "0" || exit $? +fi + +${CM_PYTHON_BIN_WITH_PATH} -m pip install . +test "${?}" -eq "0" || exit $? diff --git a/script/install-ipex-from-src/_cm.json b/script/install-ipex-from-src/_cm.json index de53756ca8..e41efea533 100644 --- a/script/install-ipex-from-src/_cm.json +++ b/script/install-ipex-from-src/_cm.json @@ -462,6 +462,94 @@ "CM_IPEX_SKIP_PYTORCH": "yes" } }, + "for-intel-mlperf-inference-sdxl": { + "alias": "for-intel-mlperf-inference-v4.0-sdxl" + }, + "for-intel-mlperf-inference-v4.0-sdxl": { + "adr": { + "conda-package": { + "tags": "_name.sdxl-pt" + }, + "pytorch": { + "tags": "_for-intel-mlperf-inference-sdxl" + } + }, + "base": [ + "sha.f27c8d42a734ae0805de2bd0d8396ce205638329" + ], + "deps": [ + { + "names": [ + "conda" + ], + "tags": "get,conda,_name.sdxl-pt" + }, + { + "names": [ + "conda-package", + "python3" + ], + "tags": "get,generic,conda-package,_package.python", + "version": "3.9" + }, + { + "tags": "get,generic-python-lib,_package.torch,_path.https://download.pytorch.org/whl/nightly/cpu/torch-2.3.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl", + "names": [ + "pip-package", + "pip-torch" + ] + }, + { + "tags": "get,generic-python-lib,_package.torchvision,_path.https://download.pytorch.org/whl/nightly/cpu/torchvision-0.18.0.dev20231214%2Bcpu-cp39-cp39-linux_x86_64.whl", + "names": [ + "pip-package", + "pip-torchvision" + ] + }, + { + "names": [ + "conda-package", + "wheel" + ], + "tags": "get,generic,conda-package,_package.wheel,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "setuptools" + ], + "version": "69.5.1", + "tags": "get,generic,conda-package,_package.setuptools,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "typing-extensions" + ], + "tags": "get,generic,conda-package,_package.typing-extensions,_source.conda-forge" + }, + { + "names": [ + "conda-package", + "sympy" + ], + "tags": "get,generic,conda-package,_package.sympy,_source.conda-forge" + }, + { + "tags": "get,cmake", + "version_max": "3.26.4" + }, + { + "tags": "get,gcc", + "version_max": "12.3", + "version_max_usable": "12.3" + } + ], + "env": { + "CM_CONDA_ENV": "yes", + "CM_IPEX_SKIP_PYTORCH": "yes" + } + }, "repo.#": { "env": { "CM_GIT_URL": "#" diff --git a/script/install-pytorch-from-src/_cm.json b/script/install-pytorch-from-src/_cm.json index 2f3440db30..9beb7e8c33 100644 --- a/script/install-pytorch-from-src/_cm.json +++ b/script/install-pytorch-from-src/_cm.json @@ -386,7 +386,15 @@ { "tags": "get,cmake", "version_min": "3.25.0" - } + }, + { + "tags": "get,generic-python-lib,_package.numpy", + "version": "1.22.4" + }, + { + "tags": "get,generic-python-lib,_package.networkx", + "version": "3.1" + } ], "ad": { "pytorch-src-repo": { diff --git a/script/install-torchvision-from-src/_cm.json b/script/install-torchvision-from-src/_cm.json index 50aa95dee8..c608dc2469 100644 --- a/script/install-torchvision-from-src/_cm.json +++ b/script/install-torchvision-from-src/_cm.json @@ -27,6 +27,14 @@ "tags": "get,compiler", "names": [ "compiler" ] }, + { + "tags": "get,generic-sys-util,_libpng-dev", + "enable_if_env": { + "CM_TORCHVISION_NEEDS_PNG": [ + "yes" + ] + } + }, { "env": { "CM_GIT_CHECKOUT_PATH_ENV_NAME": "CM_PYTORCH_VISION_SRC_REPO_PATH" diff --git a/script/install-torchvision-from-src/customize.py b/script/install-torchvision-from-src/customize.py index a4963cca9a..895731add7 100644 --- a/script/install-torchvision-from-src/customize.py +++ b/script/install-torchvision-from-src/customize.py @@ -10,12 +10,6 @@ def preprocess(i): env = i['env'] - if env.get('CM_MLPERF_INFERENCE_INTEL', '') == "yes": - i['run_script_input']['script_name'] = "run-intel-mlperf-inference-v3_1" - run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . " - - env['CM_RUN_CMD'] = run_cmd - automation = i['automation'] recursion_spaces = i['recursion_spaces'] diff --git a/script/process-mlperf-accuracy/customize.py b/script/process-mlperf-accuracy/customize.py index c4d9025c62..0a97c08876 100644 --- a/script/process-mlperf-accuracy/customize.py +++ b/script/process-mlperf-accuracy/customize.py @@ -90,16 +90,23 @@ def preprocess(i): elif dataset == "coco2014": env['+PYTHONPATH'] = [ os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools") ] extra_options = "" + if env.get('CM_SDXL_STATISTICS_FILE_PATH', '') != '': extra_options += f" --statistics-path '{env['CM_SDXL_STATISTICS_FILE_PATH']}' " + if env.get('CM_SDXL_COMPLIANCE_IMAGES_PATH', '') != '': extra_options += f" --compliance-images-path '{env['CM_SDXL_COMPLIANCE_IMAGES_PATH']}' " + elif not os.path.exists(os.path.join(result_dir, "images")): + extra_options += f" --compliance-images-path {os.path.join(result_dir, 'images')} " + if env.get('CM_SDXL_ACCURACY_RUN_DEVICE', '') != '': extra_options += f" --device '{env['CM_SDXL_ACCURACY_RUN_DEVICE']}' " + + #env['DATASET_ANNOTATIONS_FILE_PATH'] = env['CM_DATASET_ANNOTATIONS_FILE_PATH'] CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "tools", "accuracy_coco.py") + "' --mlperf-accuracy-file '" + os.path.join(result_dir, "mlperf_log_accuracy.json") + \ - "' --caption-path '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "coco2014", "captions", "captions_source.tsv") + "' > '" + out_file + "'" + "' --caption-path '" + os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "text_to_image", "coco2014", "captions", "captions_source.tsv") + "'" + extra_options + " > '" + out_file + "'" elif dataset == "kits19": CMD = env['CM_PYTHON_BIN_WITH_PATH'] + " '" + os.path.join(env['CM_MLPERF_INFERENCE_3DUNET_PATH'], diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index 90ebd6bb12..1746280267 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -194,6 +194,7 @@ def postprocess(i): x1 = '' x2 = '' + run_cmd_prefix = "" if env.get('CM_DOCKER_INTERACTIVE_MODE', '') in ['yes', 'True', True]: run_cmd_prefix = "(" x1 = '-it' diff --git a/script/run-mlperf-inference-app/_cm.yaml b/script/run-mlperf-inference-app/_cm.yaml index 7cce0b8ded..ffbe68dcde 100644 --- a/script/run-mlperf-inference-app/_cm.yaml +++ b/script/run-mlperf-inference-app/_cm.yaml @@ -53,6 +53,7 @@ input_mapping: save_console_log: CM_SAVE_CONSOLE_LOG execution_mode: CM_MLPERF_RUN_STYLE find_performance: CM_MLPERF_FIND_PERFORMANCE_MODE + framework: CM_MLPERF_BACKEND gpu_name: CM_NVIDIA_GPU_NAME hw_name: CM_HW_NAME hw_notes_extra: CM_MLPERF_SUT_SW_NOTES_EXTRA