diff --git a/packages/scripts/configure_local_inference.sh b/packages/scripts/configure_local_inference.sh index 3e061da..f4c725d 100755 --- a/packages/scripts/configure_local_inference.sh +++ b/packages/scripts/configure_local_inference.sh @@ -21,8 +21,8 @@ configure_dev_vars() { echo "OLLAMA_API_KEY=active" >> "${DEV_VARS_PATH}" fi if [[ "$endpoint_url" == *"10240"* ]]; then - echo "MLX_API_KEY=active" >> "${ENV_LOCAL_PATH}" - echo "MLX_API_KEY=active" >> "${DEV_VARS_PATH}" + echo "MLX_API_KEY=active" >> "${ENV_LOCAL_PATH}" + echo "MLX_API_KEY=active" >> "${DEV_VARS_PATH}" fi echo "Local inference is configured for $endpoint_url" @@ -39,8 +39,7 @@ echo "Checking for local inference services..." if nc -z -w1 localhost 11434 >/dev/null 2>&1; then echo "Ollama service detected on port 11434." configure_dev_vars "http://localhost:11434" -# check for mlx-omni-server -if nc -z -w1 localhost 10240 >/dev/null 2>&1; then +elif nc -z -w1 localhost 10240 >/dev/null 2>&1; then echo "mlx-omni-server service detected on port 10240." configure_dev_vars "http://localhost:10240" else @@ -49,4 +48,4 @@ else echo ".dev.vars was not modified by this script for OpenAI local inference settings." fi -echo "Script finished." +echo "Script finished." \ No newline at end of file