Spark support

#7
by everphilski - opened

I was able to get it working

ExecStart=/usr/bin/docker run --rm --gpus all
--name vllm-coder-agent
-p 8100:8000
-v /home/user/.cache/huggingface:/root/.cache/huggingface
-v /home/user/.cache/vllm:/root/.cache/vllm
--ipc=host
vllm/vllm-openai:v0.17.1-cu130
--model GadflyII/Qwen3-Coder-Next-NVFP4
--gpu-memory-utilization 0.65
--max-model-len 65536
--kv-cache-dtype fp8
--enforce-eager
--enable-auto-tool-choice
--tool-call-parser hermes

however I can't run it without --enforce-eager so I'm capped at like 10 tok/s. Any ideas?

This is from a working docker compose I play around with:

services:
  vllm-spark-pro:
    image: nvcr.io/nvidia/vllm:26.03-py3
    container_name: vllm-spark-pro
    runtime: nvidia
    ipc: host
    deploy:
      resources:
        limits:
          memory: 100G
    ports:
      - "8000:8000"
    environment:
      - NVIDIA_VISIBLE_DEVICES=all
      - HF_TOKEN=${HF_TOKEN}
      - VLLM_USE_FLASHINFER_MOE_FP4=0
      - VLLM_NVFP4_GEMM_BACKEND=marlin  # FORCES stable, fast GEMM
      - VLLM_TEST_FORCE_FP8_MARLIN=1   # Routes FP8 KV cache through stable path
      - VLLM_MARLIN_USE_ATOMIC_ADD=1 # This specifically speeds up the "summing" of huge attention matrices during those massive 100k+ token context reads.
    command:
      - "vllm"
      - "serve"
      - "GadflyII/Qwen3-Coder-Next-NVFP4"
      - "--max-model-len"
      - "262144"
      #- "--chat-template"
      #- "/app/data/pastebin_quen_template.jinja"
      - "--max-num-batched-tokens"
      - "65536"
      - "--gpu-memory-utilization"
      - "0.85"
      - "--kv-cache-dtype"
      - "fp8"
      - "--enable-prefix-caching" # CRITICAL for coding models (fast multi-turn responses)
      - "--tool-call-parser"
      - "qwen3_xml"
      - "--enable-auto-tool-choice"
      - "--max-num-seqs"
      - "4"
    restart: unless-stopped
    volumes:
      - hf-cache:/root/.cache/huggingface
      - ./:/app/data

Sign up or log in to comment