Skip to content

Commit

Permalink
cdx1 mlx
Browse files Browse the repository at this point in the history
Signed-off-by: Prabhu Subramanian <prabhu@appthreat.com>
  • Loading branch information
prabhu committed Feb 7, 2025
1 parent 3f113b5 commit 8689fbc
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 5 deletions.
13 changes: 8 additions & 5 deletions contrib/fine-tuning/fine-tune-mlx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,25 +32,28 @@ mlx_lm.lora --model ${BASE_MODEL_MLX} --train --data dataset --fine-tune-type do

echo "Fuse model to ${FUSED_MODEL} using the cdx1 adapters"
rm -rf ${FUSED_MODEL} ${FUSED_GGUF_MODEL}
mlx_lm.fuse --model ${BASE_MODEL_MLX} --adapter-path adapters --hf-path ${FUSED_MODEL} --save-path ${FUSED_MODEL} --upload-repo ${FUSED_MODEL}
mlx_lm.fuse --model ${BASE_MODEL_MLX} --adapter-path adapters --hf-path ${FUSED_MODEL} --save-path ${FUSED_MODEL} --export-gguf --gguf-path cdx1-bf16.gguf

mkdir -p ${FUSED_GGUF_MODEL}
mv ${FUSED_MODEL}/cdx1-bf16.gguf ${FUSED_GGUF_MODEL}

echo "Test fused model with the prompt 'Tell me about cdxgen'. Must yield a better response."
mlx_lm.generate --model ${FUSED_MODEL} --prompt "Tell me about cdxgen" --temp 0.05

echo "Create quantized models"
rm -rf ${QUANT_MODEL_8BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_8BIT} -q --q-bits 8 --dtype bfloat16 --upload-repo ${QUANT_MODEL_8BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_8BIT} -q --q-bits 8 --dtype bfloat16
echo "Test ${QUANT_MODEL_8BIT} with the prompt 'Tell me about cdxgen'. Must yield a better response."
mlx_lm.generate --model ${QUANT_MODEL_8BIT} --prompt "Tell me about cdxgen" --temp 0.05

rm -rf ${QUANT_MODEL_6BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_6BIT} -q --q-bits 6 --dtype bfloat16 --upload-repo ${QUANT_MODEL_6BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_6BIT} -q --q-bits 6 --dtype bfloat16
echo "Test ${QUANT_MODEL_6BIT} with the prompt 'Tell me about cdxgen'. Must yield a better response."
mlx_lm.generate --model ${QUANT_MODEL_6BIT} --prompt "Tell me about cdxgen" --temp 0.05

rm -rf ${QUANT_MODEL_4BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_4BIT} -q --q-bits 4 --dtype bfloat16 --upload-repo ${QUANT_MODEL_4BIT}
mlx_lm.convert --hf-path ${FUSED_MODEL} --mlx-path ${QUANT_MODEL_4BIT} -q --q-bits 4 --dtype bfloat16
echo "Test ${QUANT_MODEL_4BIT} with the prompt 'Tell me about cdxgen'. Must yield a better response."
mlx_lm.generate --model ${QUANT_MODEL_4BIT} --prompt "Tell me about cdxgen" --temp 0.05

rm -rf dataset adapters
rm -rf dataset adapters ${BASE_MODEL}
22 changes: 22 additions & 0 deletions contrib/fine-tuning/upload-hf.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e
export HF_HUB_ENABLE_HF_TRANSFER=1
HF_ORG=CycloneDX
TUNING_TOOL=mlx
TOOL_BASE_MODEL=cdx1
FUSED_MODEL=${HF_ORG}/${TOOL_BASE_MODEL}-${TUNING_TOOL}
FUSED_GGUF_MODEL=${HF_ORG}/${TOOL_BASE_MODEL}-gguf
QUANT_MODEL_8BIT=${HF_ORG}/${TOOL_BASE_MODEL}-${TUNING_TOOL}-8bit
QUANT_MODEL_6BIT=${HF_ORG}/${TOOL_BASE_MODEL}-${TUNING_TOOL}-6bit
QUANT_MODEL_4BIT=${HF_ORG}/${TOOL_BASE_MODEL}-${TUNING_TOOL}-4bit

huggingface-cli whoami

huggingface-cli upload --repo-type dataset CycloneDX/cdx-docs ./cdxgen-docs cdxgen-docs
huggingface-cli upload --repo-type dataset CycloneDX/cdx-docs ./guides guides

huggingface-cli upload --repo-type model ${FUSED_MODEL} ./${FUSED_MODEL} .
huggingface-cli upload --repo-type model ${FUSED_GGUF_MODEL} ./${FUSED_GGUF_MODEL} .
huggingface-cli upload --repo-type model ${QUANT_MODEL_8BIT} ./${QUANT_MODEL_8BIT} .
huggingface-cli upload --repo-type model ${QUANT_MODEL_6BIT} ./${QUANT_MODEL_6BIT} .
huggingface-cli upload --repo-type model ${QUANT_MODEL_4BIT} ./${QUANT_MODEL_4BIT} .

0 comments on commit 8689fbc

Please sign in to comment.