fix local inference config

This commit is contained in:
geoffsee
2025-06-18 12:38:38 -04:00
parent 3d16bd94b4
commit 38b364caeb

View File

@@ -21,8 +21,8 @@ configure_dev_vars() {
echo "OLLAMA_API_KEY=active" >> "${DEV_VARS_PATH}"
fi
if [[ "$endpoint_url" == *"10240"* ]]; then
echo "MLX_API_KEY=active" >> "${ENV_LOCAL_PATH}"
echo "MLX_API_KEY=active" >> "${DEV_VARS_PATH}"
echo "MLX_API_KEY=active" >> "${ENV_LOCAL_PATH}"
echo "MLX_API_KEY=active" >> "${DEV_VARS_PATH}"
fi
echo "Local inference is configured for $endpoint_url"
@@ -39,8 +39,7 @@ echo "Checking for local inference services..."
if nc -z -w1 localhost 11434 >/dev/null 2>&1; then
echo "Ollama service detected on port 11434."
configure_dev_vars "http://localhost:11434"
# check for mlx-omni-server
if nc -z -w1 localhost 10240 >/dev/null 2>&1; then
elif nc -z -w1 localhost 10240 >/dev/null 2>&1; then
echo "mlx-omni-server service detected on port 10240."
configure_dev_vars "http://localhost:10240"
else
@@ -49,4 +48,4 @@ else
echo ".dev.vars was not modified by this script for OpenAI local inference settings."
fi
echo "Script finished."
echo "Script finished."