caddy run & (sleep 10 && curl -s http://localhost:3928/inferences/llamacpp/loadmodel -H 'Content-Type: application/json' -d '{ "llama_model_path": "./AquilaChat2-7B-16K.Q3_K_L.gguf", "ctx_len": 16384 }') & source /opt/intel/oneapi/setvars.sh ./nitro