Dev containers native implementation (#52338)

KyleBarton created

## Context

Closes #11473

In-house Zed implementation of devcontainers. Replaces the dependency on
the [reference implementation](https://github.com/devcontainers/cli) via
Node.

This enables additional features with this implementation:
1. Zed extensions can be specified in the `customizations` block, via
this syntax in `devcontainer.json:
```
...
  "customizations": {
    "zed": {
      "extensions": ["vue", "ruby"],
    },
  },

```
2.
[forwardPorts](https://containers.dev/implementors/json_reference/#general-properties)
are supported for multiple ports proxied to the host

## How to Review

<!-- Help reviewers focus their attention:
- For small PRs: note what to focus on (e.g., "error handling in
foo.rs")
- For large PRs (>400 LOC): provide a guided tour — numbered list of
files/commits to read in order. (The `large-pr` label is applied
automatically.)
     - See the review process guidelines for comment conventions -->

## Self-Review Checklist

<!-- Check before requesting review: -->
- [x] I've reviewed my own diff for quality, security, and reliability
- [x] Unsafe blocks (if any) have justifying comments
- [x] The content is consistent with the [UI/UX
checklist](https://github.com/zed-industries/zed/blob/main/CONTRIBUTING.md#uiux-checklist)
- [x] Tests cover the new/changed behavior
- [x] Performance impact has been considered and is acceptable

Release Notes:

- Improved devcontainer implementation by moving initialization and
creation in-house

Change summary

Cargo.lock                                        |    8 
crates/dev_container/Cargo.toml                   |   13 
crates/dev_container/src/command_json.rs          |   64 
crates/dev_container/src/devcontainer_api.rs      |  586 -
crates/dev_container/src/devcontainer_json.rs     | 1358 +++++
crates/dev_container/src/devcontainer_manifest.rs | 4566 +++++++++++++++++
crates/dev_container/src/docker.rs                |  463 +
crates/dev_container/src/features.rs              |  254 
crates/dev_container/src/lib.rs                   |  413 -
crates/dev_container/src/oci.rs                   |  470 +
crates/recent_projects/src/recent_projects.rs     |    9 
crates/recent_projects/src/remote_connections.rs  |    1 
crates/recent_projects/src/remote_servers.rs      |   23 
crates/remote/src/transport/docker.rs             |   22 
crates/settings_content/src/settings_content.rs   |    5 
crates/util/src/command.rs                        |    8 
crates/util/src/command/darwin.rs                 |    8 
crates/workspace/src/persistence.rs               |   27 
18 files changed, 7,636 insertions(+), 662 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -4729,6 +4729,9 @@ dependencies = [
 name = "dev_container"
 version = "0.1.0"
 dependencies = [
+ "async-tar",
+ "async-trait",
+ "env_logger 0.11.8",
  "fs",
  "futures 0.3.31",
  "gpui",
@@ -4736,16 +4739,17 @@ dependencies = [
  "http_client",
  "log",
  "menu",
- "node_runtime",
  "paths",
  "picker",
  "project",
  "serde",
  "serde_json",
+ "serde_json_lenient",
  "settings",
- "smol",
+ "shlex",
  "ui",
  "util",
+ "walkdir",
  "workspace",
  "worktree",
 ]

crates/dev_container/Cargo.toml 🔗

@@ -5,21 +5,26 @@ publish.workspace = true
 edition.workspace = true
 
 [dependencies]
+async-tar.workspace = true
+async-trait.workspace = true
 serde.workspace = true
 serde_json.workspace = true
+serde_json_lenient.workspace = true
+shlex.workspace = true
 http_client.workspace = true
 http.workspace = true
 gpui.workspace = true
+fs.workspace = true
 futures.workspace = true
 log.workspace = true
-node_runtime.workspace = true
 menu.workspace = true
 paths.workspace = true
 picker.workspace = true
+project.workspace = true
 settings.workspace = true
-smol.workspace = true
 ui.workspace = true
 util.workspace = true
+walkdir.workspace = true
 worktree.workspace = true
 workspace.workspace = true
 
@@ -32,6 +37,8 @@ settings = { workspace = true, features = ["test-support"] }
 
 workspace = { workspace = true, features = ["test-support"] }
 worktree = { workspace = true, features = ["test-support"] }
+util = { workspace = true, features = ["test-support"] }
+env_logger.workspace = true
 
 [lints]
-workspace = true
+workspace = true

crates/dev_container/src/command_json.rs 🔗

@@ -0,0 +1,64 @@
+use std::process::Output;
+
+use async_trait::async_trait;
+use serde::Deserialize;
+use util::command::Command;
+
+use crate::devcontainer_api::DevContainerError;
+
+pub(crate) struct DefaultCommandRunner;
+
+impl DefaultCommandRunner {
+    pub(crate) fn new() -> Self {
+        Self
+    }
+}
+
+#[async_trait]
+impl CommandRunner for DefaultCommandRunner {
+    async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
+        command.output().await
+    }
+}
+
+#[async_trait]
+pub(crate) trait CommandRunner: Send + Sync {
+    async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error>;
+}
+
+pub(crate) async fn evaluate_json_command<T>(
+    mut command: Command,
+) -> Result<Option<T>, DevContainerError>
+where
+    T: for<'de> Deserialize<'de>,
+{
+    let output = command.output().await.map_err(|e| {
+        log::error!("Error running command {:?}: {e}", command);
+        DevContainerError::CommandFailed(command.get_program().display().to_string())
+    })?;
+
+    deserialize_json_output(output).map_err(|e| {
+        log::error!("Error running command {:?}: {e}", command);
+        DevContainerError::CommandFailed(command.get_program().display().to_string())
+    })
+}
+
+pub(crate) fn deserialize_json_output<T>(output: Output) -> Result<Option<T>, String>
+where
+    T: for<'de> Deserialize<'de>,
+{
+    if output.status.success() {
+        let raw = String::from_utf8_lossy(&output.stdout);
+        if raw.is_empty() || raw.trim() == "[]" || raw.trim() == "{}" {
+            return Ok(None);
+        }
+        let value = serde_json_lenient::from_str(&raw)
+            .map_err(|e| format!("Error deserializing from raw json: {e}"));
+        value
+    } else {
+        let std_err = String::from_utf8_lossy(&output.stderr);
+        Err(format!(
+            "Sent non-successful output; cannot deserialize. StdErr: {std_err}"
+        ))
+    }
+}

crates/dev_container/src/devcontainer_api.rs 🔗

@@ -2,18 +2,26 @@ use std::{
     collections::{HashMap, HashSet},
     fmt::Display,
     path::{Path, PathBuf},
+    sync::Arc,
 };
 
-use node_runtime::NodeRuntime;
+use futures::TryFutureExt;
+use gpui::{AsyncWindowContext, Entity};
+use project::Worktree;
 use serde::Deserialize;
-use settings::DevContainerConnection;
-use smol::fs;
-use util::command::Command;
+use settings::{DevContainerConnection, infer_json_indent_size, replace_value_in_json_text};
 use util::rel_path::RelPath;
+use walkdir::WalkDir;
 use workspace::Workspace;
 use worktree::Snapshot;
 
-use crate::{DevContainerContext, DevContainerFeature, DevContainerTemplate};
+use crate::{
+    DevContainerContext, DevContainerFeature, DevContainerTemplate,
+    devcontainer_json::DevContainer,
+    devcontainer_manifest::{read_devcontainer_configuration, spawn_dev_container},
+    devcontainer_templates_repository, get_latest_oci_manifest, get_oci_token, ghcr_registry,
+    oci::download_oci_tarball,
+};
 
 /// Represents a discovered devcontainer configuration
 #[derive(Debug, Clone, PartialEq, Eq)]
@@ -42,63 +50,33 @@ impl DevContainerConfig {
 
 #[derive(Debug, Deserialize)]
 #[serde(rename_all = "camelCase")]
-struct DevContainerUp {
-    _outcome: String,
-    container_id: String,
-    remote_user: String,
-    remote_workspace_folder: String,
+pub(crate) struct DevContainerUp {
+    pub(crate) container_id: String,
+    pub(crate) remote_user: String,
+    pub(crate) remote_workspace_folder: String,
+    #[serde(default)]
+    pub(crate) extension_ids: Vec<String>,
+    #[serde(default)]
+    pub(crate) remote_env: HashMap<String, String>,
 }
 
-#[derive(Debug, Deserialize)]
-#[serde(rename_all = "camelCase")]
+#[derive(Debug)]
 pub(crate) struct DevContainerApply {
-    pub(crate) files: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub(crate) struct DevContainerConfiguration {
-    name: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-pub(crate) struct DevContainerConfigurationOutput {
-    configuration: DevContainerConfiguration,
-}
-
-pub(crate) struct DevContainerCli {
-    pub path: PathBuf,
-    node_runtime_path: Option<PathBuf>,
-}
-
-impl DevContainerCli {
-    fn command(&self, use_podman: bool) -> Command {
-        let mut command = if let Some(node_runtime_path) = &self.node_runtime_path {
-            let mut command =
-                util::command::new_command(node_runtime_path.as_os_str().display().to_string());
-            command.arg(self.path.display().to_string());
-            command
-        } else {
-            util::command::new_command(self.path.display().to_string())
-        };
-
-        if use_podman {
-            command.arg("--docker-path");
-            command.arg("podman");
-        }
-        command
-    }
+    pub(crate) project_files: Vec<Arc<RelPath>>,
 }
 
 #[derive(Debug, Clone, PartialEq, Eq)]
 pub enum DevContainerError {
+    CommandFailed(String),
     DockerNotAvailable,
-    DevContainerCliNotAvailable,
+    ContainerNotValid(String),
     DevContainerTemplateApplyFailed(String),
+    DevContainerScriptsFailed,
     DevContainerUpFailed(String),
     DevContainerNotFound,
     DevContainerParseFailed,
-    NodeRuntimeNotAvailable,
+    FilesystemError,
+    ResourceFetchFailed,
     NotInValidProject,
 }
 
@@ -110,8 +88,11 @@ impl Display for DevContainerError {
             match self {
                 DevContainerError::DockerNotAvailable =>
                     "docker CLI not found on $PATH".to_string(),
-                DevContainerError::DevContainerCliNotAvailable =>
-                    "devcontainer CLI not found on path".to_string(),
+                DevContainerError::ContainerNotValid(id) => format!(
+                    "docker image {id} did not have expected configuration for a dev container"
+                ),
+                DevContainerError::DevContainerScriptsFailed =>
+                    "lifecycle scripts could not execute for dev container".to_string(),
                 DevContainerError::DevContainerUpFailed(_) => {
                     "DevContainer creation failed".to_string()
                 }
@@ -122,14 +103,32 @@ impl Display for DevContainerError {
                     "No valid dev container definition found in project".to_string(),
                 DevContainerError::DevContainerParseFailed =>
                     "Failed to parse file .devcontainer/devcontainer.json".to_string(),
-                DevContainerError::NodeRuntimeNotAvailable =>
-                    "Cannot find a valid node runtime".to_string(),
                 DevContainerError::NotInValidProject => "Not within a valid project".to_string(),
+                DevContainerError::CommandFailed(program) =>
+                    format!("Failure running external program {program}"),
+                DevContainerError::FilesystemError =>
+                    "Error downloading resources locally".to_string(),
+                DevContainerError::ResourceFetchFailed =>
+                    "Failed to fetch resources from template or feature repository".to_string(),
             }
         )
     }
 }
 
+pub(crate) async fn read_default_devcontainer_configuration(
+    cx: &DevContainerContext,
+    environment: HashMap<String, String>,
+) -> Result<DevContainer, DevContainerError> {
+    let default_config = DevContainerConfig::default_config();
+
+    read_devcontainer_configuration(default_config, cx, environment)
+        .await
+        .map_err(|e| {
+            log::error!("Default configuration not found: {:?}", e);
+            DevContainerError::DevContainerNotFound
+        })
+}
+
 /// Finds all available devcontainer configurations in the project.
 ///
 /// See [`find_configs_in_snapshot`] for the locations that are scanned.
@@ -241,27 +240,35 @@ pub fn find_configs_in_snapshot(snapshot: &Snapshot) -> Vec<DevContainerConfig>
 pub async fn start_dev_container_with_config(
     context: DevContainerContext,
     config: Option<DevContainerConfig>,
+    environment: HashMap<String, String>,
 ) -> Result<(DevContainerConnection, String), DevContainerError> {
     check_for_docker(context.use_podman).await?;
-    let cli = ensure_devcontainer_cli(&context.node_runtime).await?;
-    let config_path = config.map(|c| context.project_directory.join(&c.config_path));
 
-    match devcontainer_up(&context, &cli, config_path.as_deref()).await {
+    let Some(actual_config) = config.clone() else {
+        return Err(DevContainerError::NotInValidProject);
+    };
+
+    match spawn_dev_container(
+        &context,
+        environment.clone(),
+        actual_config.clone(),
+        context.project_directory.clone().as_ref(),
+    )
+    .await
+    {
         Ok(DevContainerUp {
             container_id,
             remote_workspace_folder,
             remote_user,
+            extension_ids,
+            remote_env,
             ..
         }) => {
             let project_name =
-                match read_devcontainer_configuration(&context, &cli, config_path.as_deref()).await
-                {
-                    Ok(DevContainerConfigurationOutput {
-                        configuration:
-                            DevContainerConfiguration {
-                                name: Some(project_name),
-                            },
-                    }) => project_name,
+                match read_devcontainer_configuration(actual_config, &context, environment).await {
+                    Ok(DevContainer {
+                        name: Some(name), ..
+                    }) => name,
                     _ => get_backup_project_name(&remote_workspace_folder, &container_id),
                 };
 
@@ -270,31 +277,19 @@ pub async fn start_dev_container_with_config(
                 container_id,
                 use_podman: context.use_podman,
                 remote_user,
+                extension_ids,
+                remote_env: remote_env.into_iter().collect(),
             };
 
             Ok((connection, remote_workspace_folder))
         }
         Err(err) => {
-            let message = format!("Failed with nested error: {}", err);
+            let message = format!("Failed with nested error: {:?}", err);
             Err(DevContainerError::DevContainerUpFailed(message))
         }
     }
 }
 
-#[cfg(not(target_os = "windows"))]
-fn dev_container_cli() -> String {
-    "devcontainer".to_string()
-}
-
-#[cfg(target_os = "windows")]
-fn dev_container_cli() -> String {
-    "devcontainer.cmd".to_string()
-}
-
-fn dev_container_script() -> String {
-    "devcontainer.js".to_string()
-}
-
 async fn check_for_docker(use_podman: bool) -> Result<(), DevContainerError> {
     let mut command = if use_podman {
         util::command::new_command("podman")
@@ -312,261 +307,157 @@ async fn check_for_docker(use_podman: bool) -> Result<(), DevContainerError> {
     }
 }
 
-pub(crate) async fn ensure_devcontainer_cli(
-    node_runtime: &NodeRuntime,
-) -> Result<DevContainerCli, DevContainerError> {
-    let mut command = util::command::new_command(&dev_container_cli());
-    command.arg("--version");
-
-    if let Err(e) = command.output().await {
-        log::error!(
-            "Unable to find devcontainer CLI in $PATH. Checking for a zed installed version. Error: {:?}",
-            e
-        );
-
-        let Ok(node_runtime_path) = node_runtime.binary_path().await else {
-            return Err(DevContainerError::NodeRuntimeNotAvailable);
+pub(crate) async fn apply_devcontainer_template(
+    worktree: Entity<Worktree>,
+    template: &DevContainerTemplate,
+    template_options: &HashMap<String, String>,
+    features_selected: &HashSet<DevContainerFeature>,
+    context: &DevContainerContext,
+    cx: &mut AsyncWindowContext,
+) -> Result<DevContainerApply, DevContainerError> {
+    let token = get_oci_token(
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &context.http_client,
+    )
+    .map_err(|e| {
+        log::error!("Failed to get OCI auth token: {e}");
+        DevContainerError::ResourceFetchFailed
+    })
+    .await?;
+    let manifest = get_latest_oci_manifest(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &context.http_client,
+        Some(&template.id),
+    )
+    .map_err(|e| {
+        log::error!("Failed to fetch template from OCI repository: {e}");
+        DevContainerError::ResourceFetchFailed
+    })
+    .await?;
+
+    let layer = &manifest.layers.get(0).ok_or_else(|| {
+        log::error!("Given manifest has no layers to query for blob. Aborting");
+        DevContainerError::ResourceFetchFailed
+    })?;
+
+    let timestamp = std::time::SystemTime::now()
+        .duration_since(std::time::UNIX_EPOCH)
+        .map(|d| d.as_millis())
+        .unwrap_or(0);
+    let extract_dir = std::env::temp_dir()
+        .join(&template.id)
+        .join(format!("extracted-{timestamp}"));
+
+    context.fs.create_dir(&extract_dir).await.map_err(|e| {
+        log::error!("Could not create temporary directory: {e}");
+        DevContainerError::FilesystemError
+    })?;
+
+    download_oci_tarball(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &layer.digest,
+        "application/vnd.oci.image.manifest.v1+json",
+        &extract_dir,
+        &context.http_client,
+        &context.fs,
+        Some(&template.id),
+    )
+    .map_err(|e| {
+        log::error!("Error downloading tarball: {:?}", e);
+        DevContainerError::ResourceFetchFailed
+    })
+    .await?;
+
+    let downloaded_devcontainer_folder = &extract_dir.join(".devcontainer/");
+    let mut project_files = Vec::new();
+    for entry in WalkDir::new(downloaded_devcontainer_folder) {
+        let Ok(entry) = entry else {
+            continue;
         };
-
-        let datadir_cli_path = paths::devcontainer_dir()
-            .join("node_modules")
-            .join("@devcontainers")
-            .join("cli")
-            .join(&dev_container_script());
-
-        log::debug!(
-            "devcontainer not found in path, using local location: ${}",
-            datadir_cli_path.display()
-        );
-
-        let mut command =
-            util::command::new_command(node_runtime_path.as_os_str().display().to_string());
-        command.arg(datadir_cli_path.display().to_string());
-        command.arg("--version");
-
-        match command.output().await {
-            Err(e) => log::error!(
-                "Unable to find devcontainer CLI in Data dir. Will try to install. Error: {:?}",
-                e
-            ),
-            Ok(output) => {
-                if output.status.success() {
-                    log::info!("Found devcontainer CLI in Data dir");
-                    return Ok(DevContainerCli {
-                        path: datadir_cli_path.clone(),
-                        node_runtime_path: Some(node_runtime_path.clone()),
-                    });
-                } else {
-                    log::error!(
-                        "Could not run devcontainer CLI from data_dir. Will try once more to install. Output: {:?}",
-                        output
-                    );
-                }
-            }
+        if !entry.file_type().is_file() {
+            continue;
         }
-
-        if let Err(e) = fs::create_dir_all(paths::devcontainer_dir()).await {
-            log::error!("Unable to create devcontainer directory. Error: {:?}", e);
-            return Err(DevContainerError::DevContainerCliNotAvailable);
+        let relative_path = entry.path().strip_prefix(&extract_dir).map_err(|e| {
+            log::error!("Can't create relative path: {e}");
+            DevContainerError::FilesystemError
+        })?;
+        let rel_path = RelPath::unix(relative_path)
+            .map_err(|e| {
+                log::error!("Can't create relative path: {e}");
+                DevContainerError::FilesystemError
+            })?
+            .into_arc();
+        let content = context.fs.load(entry.path()).await.map_err(|e| {
+            log::error!("Unable to read file: {e}");
+            DevContainerError::FilesystemError
+        })?;
+
+        let mut content = expand_template_options(content, template_options);
+        if let Some("devcontainer.json") = &rel_path.file_name() {
+            content = insert_features_into_devcontainer_json(&content, features_selected)
         }
-
-        if let Err(e) = node_runtime
-            .npm_install_packages(
-                &paths::devcontainer_dir(),
-                &[("@devcontainers/cli", "latest")],
-            )
-            .await
-        {
-            log::error!(
-                "Unable to install devcontainer CLI to data directory. Error: {:?}",
-                e
-            );
-            return Err(DevContainerError::DevContainerCliNotAvailable);
-        };
-
-        let mut command =
-            util::command::new_command(node_runtime_path.as_os_str().display().to_string());
-        command.arg(datadir_cli_path.display().to_string());
-        command.arg("--version");
-        if let Err(e) = command.output().await {
-            log::error!(
-                "Unable to find devcontainer cli after NPM install. Error: {:?}",
-                e
-            );
-            Err(DevContainerError::DevContainerCliNotAvailable)
-        } else {
-            Ok(DevContainerCli {
-                path: datadir_cli_path,
-                node_runtime_path: Some(node_runtime_path),
+        worktree
+            .update(cx, |worktree, cx| {
+                worktree.create_entry(rel_path.clone(), false, Some(content.into_bytes()), cx)
             })
-        }
-    } else {
-        log::info!("Found devcontainer cli on $PATH, using it");
-        Ok(DevContainerCli {
-            path: PathBuf::from(&dev_container_cli()),
-            node_runtime_path: None,
-        })
-    }
-}
-
-async fn devcontainer_up(
-    context: &DevContainerContext,
-    cli: &DevContainerCli,
-    config_path: Option<&Path>,
-) -> Result<DevContainerUp, DevContainerError> {
-    let mut command = cli.command(context.use_podman);
-    command.arg("up");
-    command.arg("--workspace-folder");
-    command.arg(context.project_directory.display().to_string());
-
-    if let Some(config) = config_path {
-        command.arg("--config");
-        command.arg(config.display().to_string());
+            .await
+            .map_err(|e| {
+                log::error!("Unable to create entry in worktree: {e}");
+                DevContainerError::NotInValidProject
+            })?;
+        project_files.push(rel_path);
     }
 
-    log::info!("Running full devcontainer up command: {:?}", command);
-
-    match command.output().await {
-        Ok(output) => {
-            if output.status.success() {
-                let raw = String::from_utf8_lossy(&output.stdout);
-                parse_json_from_cli(&raw)
-            } else {
-                let message = format!(
-                    "Non-success status running devcontainer up for workspace: out: {}, err: {}",
-                    String::from_utf8_lossy(&output.stdout),
-                    String::from_utf8_lossy(&output.stderr)
-                );
-
-                log::error!("{}", &message);
-                Err(DevContainerError::DevContainerUpFailed(message))
-            }
-        }
-        Err(e) => {
-            let message = format!("Error running devcontainer up: {:?}", e);
-            log::error!("{}", &message);
-            Err(DevContainerError::DevContainerUpFailed(message))
-        }
-    }
+    Ok(DevContainerApply { project_files })
 }
 
-pub(crate) async fn read_devcontainer_configuration(
-    context: &DevContainerContext,
-    cli: &DevContainerCli,
-    config_path: Option<&Path>,
-) -> Result<DevContainerConfigurationOutput, DevContainerError> {
-    let mut command = cli.command(context.use_podman);
-    command.arg("read-configuration");
-    command.arg("--workspace-folder");
-    command.arg(context.project_directory.display().to_string());
-
-    if let Some(config) = config_path {
-        command.arg("--config");
-        command.arg(config.display().to_string());
-    }
-
-    match command.output().await {
-        Ok(output) => {
-            if output.status.success() {
-                let raw = String::from_utf8_lossy(&output.stdout);
-                parse_json_from_cli(&raw)
-            } else {
-                let message = format!(
-                    "Non-success status running devcontainer read-configuration for workspace: out: {:?}, err: {:?}",
-                    String::from_utf8_lossy(&output.stdout),
-                    String::from_utf8_lossy(&output.stderr)
-                );
-                log::error!("{}", &message);
-                Err(DevContainerError::DevContainerNotFound)
-            }
-        }
-        Err(e) => {
-            let message = format!("Error running devcontainer read-configuration: {:?}", e);
-            log::error!("{}", &message);
-            Err(DevContainerError::DevContainerNotFound)
-        }
+fn insert_features_into_devcontainer_json(
+    content: &str,
+    features: &HashSet<DevContainerFeature>,
+) -> String {
+    if features.is_empty() {
+        return content.to_string();
     }
-}
-
-pub(crate) async fn apply_dev_container_template(
-    template: &DevContainerTemplate,
-    template_options: &HashMap<String, String>,
-    features_selected: &HashSet<DevContainerFeature>,
-    context: &DevContainerContext,
-    cli: &DevContainerCli,
-) -> Result<DevContainerApply, DevContainerError> {
-    let mut command = cli.command(context.use_podman);
-
-    let Ok(serialized_options) = serde_json::to_string(template_options) else {
-        log::error!("Unable to serialize options for {:?}", template_options);
-        return Err(DevContainerError::DevContainerParseFailed);
-    };
 
-    command.arg("templates");
-    command.arg("apply");
-    command.arg("--workspace-folder");
-    command.arg(context.project_directory.display().to_string());
-    command.arg("--template-id");
-    command.arg(format!(
-        "{}/{}",
-        template
-            .source_repository
-            .as_ref()
-            .unwrap_or(&String::from("")),
-        template.id
-    ));
-    command.arg("--template-args");
-    command.arg(serialized_options);
-    command.arg("--features");
-    command.arg(template_features_to_json(features_selected));
-
-    log::debug!("Running full devcontainer apply command: {:?}", command);
+    let features_value: serde_json::Value = features
+        .iter()
+        .map(|f| {
+            let key = format!(
+                "{}/{}:{}",
+                f.source_repository.as_deref().unwrap_or(""),
+                f.id,
+                f.major_version()
+            );
+            (key, serde_json::Value::Object(Default::default()))
+        })
+        .collect::<serde_json::Map<String, serde_json::Value>>()
+        .into();
+
+    let tab_size = infer_json_indent_size(content);
+    let (range, replacement) = replace_value_in_json_text(
+        content,
+        &["features"],
+        tab_size,
+        Some(&features_value),
+        None,
+    );
 
-    match command.output().await {
-        Ok(output) => {
-            if output.status.success() {
-                let raw = String::from_utf8_lossy(&output.stdout);
-                parse_json_from_cli(&raw)
-            } else {
-                let message = format!(
-                    "Non-success status running devcontainer templates apply for workspace: out: {:?}, err: {:?}",
-                    String::from_utf8_lossy(&output.stdout),
-                    String::from_utf8_lossy(&output.stderr)
-                );
+    let mut result = content.to_string();
+    result.replace_range(range, &replacement);
+    result
+}
 
-                log::error!("{}", &message);
-                Err(DevContainerError::DevContainerTemplateApplyFailed(message))
-            }
-        }
-        Err(e) => {
-            let message = format!("Error running devcontainer templates apply: {:?}", e);
-            log::error!("{}", &message);
-            Err(DevContainerError::DevContainerTemplateApplyFailed(message))
-        }
+fn expand_template_options(content: String, template_options: &HashMap<String, String>) -> String {
+    let mut replaced_content = content;
+    for (key, val) in template_options {
+        replaced_content = replaced_content.replace(&format!("${{templateOption:{key}}}"), val)
     }
-}
-// Try to parse directly first (newer versions output pure JSON)
-// If that fails, look for JSON start (older versions have plaintext prefix)
-fn parse_json_from_cli<T: serde::de::DeserializeOwned>(raw: &str) -> Result<T, DevContainerError> {
-    serde_json::from_str::<T>(&raw)
-        .or_else(|e| {
-            log::error!("Error parsing json: {} - will try to find json object in larger plaintext", e);
-            let json_start = raw
-                .find(|c| c == '{')
-                .ok_or_else(|| {
-                    log::error!("No JSON found in devcontainer up output");
-                    DevContainerError::DevContainerParseFailed
-                })?;
-
-            serde_json::from_str(&raw[json_start..]).map_err(|e| {
-                log::error!(
-                    "Unable to parse JSON from devcontainer up output (starting at position {}), error: {:?}",
-                    json_start,
-                    e
-                );
-                DevContainerError::DevContainerParseFailed
-            })
-        })
+    replaced_content
 }
 
 fn get_backup_project_name(remote_workspace_folder: &str, container_id: &str) -> String {
@@ -577,36 +468,11 @@ fn get_backup_project_name(remote_workspace_folder: &str, container_id: &str) ->
         .unwrap_or_else(|| container_id.to_string())
 }
 
-fn template_features_to_json(features_selected: &HashSet<DevContainerFeature>) -> String {
-    let features_map = features_selected
-        .iter()
-        .map(|feature| {
-            let mut map = HashMap::new();
-            map.insert(
-                "id",
-                format!(
-                    "{}/{}:{}",
-                    feature
-                        .source_repository
-                        .as_ref()
-                        .unwrap_or(&String::from("")),
-                    feature.id,
-                    feature.major_version()
-                ),
-            );
-            map
-        })
-        .collect::<Vec<HashMap<&str, String>>>();
-    serde_json::to_string(&features_map).unwrap()
-}
-
 #[cfg(test)]
 mod tests {
     use std::path::PathBuf;
 
-    use crate::devcontainer_api::{
-        DevContainerConfig, DevContainerUp, find_configs_in_snapshot, parse_json_from_cli,
-    };
+    use crate::devcontainer_api::{DevContainerConfig, find_configs_in_snapshot};
     use fs::FakeFs;
     use gpui::TestAppContext;
     use project::Project;
@@ -621,30 +487,6 @@ mod tests {
         });
     }
 
-    #[test]
-    fn should_parse_from_devcontainer_json() {
-        let json = r#"{"outcome":"success","containerId":"826abcac45afd412abff083ab30793daff2f3c8ce2c831df728baf39933cb37a","remoteUser":"vscode","remoteWorkspaceFolder":"/workspaces/zed"}"#;
-        let up: DevContainerUp = parse_json_from_cli(json).unwrap();
-        assert_eq!(up._outcome, "success");
-        assert_eq!(
-            up.container_id,
-            "826abcac45afd412abff083ab30793daff2f3c8ce2c831df728baf39933cb37a"
-        );
-        assert_eq!(up.remote_user, "vscode");
-        assert_eq!(up.remote_workspace_folder, "/workspaces/zed");
-
-        let json_in_plaintext = r#"[2026-01-22T16:19:08.802Z] @devcontainers/cli 0.80.1. Node.js v22.21.1. darwin 24.6.0 arm64.
-            {"outcome":"success","containerId":"826abcac45afd412abff083ab30793daff2f3c8ce2c831df728baf39933cb37a","remoteUser":"vscode","remoteWorkspaceFolder":"/workspaces/zed"}"#;
-        let up: DevContainerUp = parse_json_from_cli(json_in_plaintext).unwrap();
-        assert_eq!(up._outcome, "success");
-        assert_eq!(
-            up.container_id,
-            "826abcac45afd412abff083ab30793daff2f3c8ce2c831df728baf39933cb37a"
-        );
-        assert_eq!(up.remote_user, "vscode");
-        assert_eq!(up.remote_workspace_folder, "/workspaces/zed");
-    }
-
     #[gpui::test]
     async fn test_find_configs_root_devcontainer_json(cx: &mut TestAppContext) {
         init_test(cx);

crates/dev_container/src/devcontainer_json.rs 🔗

@@ -0,0 +1,1358 @@
+use std::{collections::HashMap, fmt::Display, path::Path, sync::Arc};
+
+use crate::{command_json::CommandRunner, devcontainer_api::DevContainerError};
+use serde::{Deserialize, Deserializer, Serialize};
+use serde_json_lenient::Value;
+use util::command::Command;
+
+#[derive(Debug, Deserialize, Serialize, Eq, PartialEq, Clone)]
+#[serde(untagged)]
+pub(crate) enum ForwardPort {
+    Number(u16),
+    String(String),
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) enum PortAttributeProtocol {
+    Https,
+    Http,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) enum OnAutoForward {
+    Notify,
+    OpenBrowser,
+    OpenBrowserOnce,
+    OpenPreview,
+    Silent,
+    Ignore,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct PortAttributes {
+    label: String,
+    on_auto_forward: OnAutoForward,
+    elevate_if_needed: bool,
+    require_local_port: bool,
+    protocol: PortAttributeProtocol,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) enum UserEnvProbe {
+    None,
+    InteractiveShell,
+    LoginShell,
+    LoginInteractiveShell,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) enum ShutdownAction {
+    None,
+    StopContainer,
+    StopCompose,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct MountDefinition {
+    pub(crate) source: String,
+    pub(crate) target: String,
+    #[serde(rename = "type")]
+    pub(crate) mount_type: Option<String>,
+}
+
+impl Display for MountDefinition {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(
+            f,
+            "type={},source={},target={},consistency=cached",
+            self.mount_type.clone().unwrap_or_else(|| {
+                if self.source.starts_with('/') {
+                    "bind".to_string()
+                } else {
+                    "volume".to_string()
+                }
+            }),
+            self.source,
+            self.target
+        )
+    }
+}
+
+/// Represents the value associated with a feature ID in the `features` map of devcontainer.json.
+///
+/// Per the spec, the value can be:
+/// - A boolean (`true` to enable with defaults)
+/// - A string (shorthand for `{"version": "<value>"}`)
+/// - An object mapping option names to string or boolean values
+///
+/// See: https://containers.dev/implementors/features/#devcontainerjson-properties
+#[derive(Debug, Deserialize, Serialize, Eq, PartialEq, Clone)]
+#[serde(untagged)]
+pub(crate) enum FeatureOptions {
+    Bool(bool),
+    String(String),
+    Options(HashMap<String, FeatureOptionValue>),
+}
+
+#[derive(Debug, Deserialize, Serialize, Eq, PartialEq, Clone)]
+#[serde(untagged)]
+pub(crate) enum FeatureOptionValue {
+    Bool(bool),
+    String(String),
+}
+impl std::fmt::Display for FeatureOptionValue {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            FeatureOptionValue::Bool(b) => write!(f, "{}", b),
+            FeatureOptionValue::String(s) => write!(f, "{}", s),
+        }
+    }
+}
+
+#[derive(Clone, Debug, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct ZedCustomizationsWrapper {
+    pub(crate) zed: ZedCustomization,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct ZedCustomization {
+    #[serde(default)]
+    pub(crate) extensions: Vec<String>,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ContainerBuild {
+    pub(crate) dockerfile: String,
+    context: Option<String>,
+    pub(crate) args: Option<HashMap<String, String>>,
+    options: Option<Vec<String>>,
+    target: Option<String>,
+    #[serde(default, deserialize_with = "deserialize_string_or_array")]
+    cache_from: Option<Vec<String>>,
+}
+
+#[derive(Clone, Debug, Serialize, Eq, PartialEq)]
+struct LifecycleScriptInternal {
+    command: Option<String>,
+    args: Vec<String>,
+}
+
+impl LifecycleScriptInternal {
+    fn from_args(args: Vec<String>) -> Self {
+        let command = args.get(0).map(|a| a.to_string());
+        let remaining = args.iter().skip(1).map(|a| a.to_string()).collect();
+        Self {
+            command,
+            args: remaining,
+        }
+    }
+}
+
+#[derive(Clone, Debug, Serialize, Eq, PartialEq)]
+pub struct LifecycleScript {
+    scripts: HashMap<String, LifecycleScriptInternal>,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct HostRequirements {
+    cpus: Option<u16>,
+    memory: Option<String>,
+    storage: Option<String>,
+}
+
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "camelCase")]
+pub(crate) enum LifecycleCommand {
+    InitializeCommand,
+    OnCreateCommand,
+    UpdateContentCommand,
+    PostCreateCommand,
+    PostStartCommand,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(crate) enum DevContainerBuildType {
+    Image,
+    Dockerfile,
+    DockerCompose,
+    None,
+}
+#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, Default)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct DevContainer {
+    pub(crate) image: Option<String>,
+    pub(crate) name: Option<String>,
+    pub(crate) remote_user: Option<String>,
+    pub(crate) forward_ports: Option<Vec<ForwardPort>>,
+    pub(crate) ports_attributes: Option<HashMap<String, PortAttributes>>,
+    pub(crate) other_ports_attributes: Option<PortAttributes>,
+    pub(crate) container_env: Option<HashMap<String, String>>,
+    pub(crate) remote_env: Option<HashMap<String, String>>,
+    pub(crate) container_user: Option<String>,
+    #[serde(rename = "updateRemoteUserUID")]
+    pub(crate) update_remote_user_uid: Option<bool>,
+    user_env_probe: Option<UserEnvProbe>,
+    override_command: Option<bool>,
+    shutdown_action: Option<ShutdownAction>,
+    init: Option<bool>,
+    pub(crate) privileged: Option<bool>,
+    cap_add: Option<Vec<String>>,
+    security_opt: Option<Vec<String>>,
+    #[serde(default, deserialize_with = "deserialize_mount_definitions")]
+    pub(crate) mounts: Option<Vec<MountDefinition>>,
+    pub(crate) features: Option<HashMap<String, FeatureOptions>>,
+    pub(crate) override_feature_install_order: Option<Vec<String>>,
+    pub(crate) customizations: Option<ZedCustomizationsWrapper>,
+    pub(crate) build: Option<ContainerBuild>,
+    #[serde(default, deserialize_with = "deserialize_string_or_int")]
+    pub(crate) app_port: Option<String>,
+    #[serde(default, deserialize_with = "deserialize_mount_definition")]
+    pub(crate) workspace_mount: Option<MountDefinition>,
+    pub(crate) workspace_folder: Option<String>,
+    run_args: Option<Vec<String>>,
+    #[serde(default, deserialize_with = "deserialize_string_or_array")]
+    pub(crate) docker_compose_file: Option<Vec<String>>,
+    pub(crate) service: Option<String>,
+    run_services: Option<Vec<String>>,
+    pub(crate) initialize_command: Option<LifecycleScript>,
+    pub(crate) on_create_command: Option<LifecycleScript>,
+    pub(crate) update_content_command: Option<LifecycleScript>,
+    pub(crate) post_create_command: Option<LifecycleScript>,
+    pub(crate) post_start_command: Option<LifecycleScript>,
+    pub(crate) post_attach_command: Option<LifecycleScript>,
+    wait_for: Option<LifecycleCommand>,
+    host_requirements: Option<HostRequirements>,
+}
+
+pub(crate) fn deserialize_devcontainer_json(json: &str) -> Result<DevContainer, DevContainerError> {
+    match serde_json_lenient::from_str(json) {
+        Ok(devcontainer) => Ok(devcontainer),
+        Err(e) => {
+            log::error!("Unable to deserialize devcontainer from json: {e}");
+            Err(DevContainerError::DevContainerParseFailed)
+        }
+    }
+}
+
+impl DevContainer {
+    pub(crate) fn build_type(&self) -> DevContainerBuildType {
+        if self.image.is_some() {
+            return DevContainerBuildType::Image;
+        } else if self.docker_compose_file.is_some() {
+            return DevContainerBuildType::DockerCompose;
+        } else if self.build.is_some() {
+            return DevContainerBuildType::Dockerfile;
+        }
+        return DevContainerBuildType::None;
+    }
+
+    pub(crate) fn has_features(&self) -> bool {
+        self.features
+            .as_ref()
+            .map(|features| !features.is_empty())
+            .unwrap_or(false)
+    }
+}
+
+// Custom deserializer that parses the entire customizations object as a
+// serde_json_lenient::Value first, then extracts the "zed" portion.
+// This avoids a bug in serde_json_lenient's `ignore_value` codepath which
+// does not handle trailing commas in skipped values.
+impl<'de> Deserialize<'de> for ZedCustomizationsWrapper {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        let value = Value::deserialize(deserializer)?;
+        let zed = value
+            .get("zed")
+            .map(|zed_value| serde_json_lenient::from_value::<ZedCustomization>(zed_value.clone()))
+            .transpose()
+            .map_err(serde::de::Error::custom)?
+            .unwrap_or_default();
+        Ok(ZedCustomizationsWrapper { zed })
+    }
+}
+
+impl LifecycleScript {
+    fn from_map(args: HashMap<String, Vec<String>>) -> Self {
+        Self {
+            scripts: args
+                .into_iter()
+                .map(|(k, v)| (k, LifecycleScriptInternal::from_args(v)))
+                .collect(),
+        }
+    }
+    fn from_str(args: &str) -> Self {
+        let script: Vec<String> = args.split(" ").map(|a| a.to_string()).collect();
+
+        Self::from_args(script)
+    }
+    fn from_args(args: Vec<String>) -> Self {
+        Self::from_map(HashMap::from([("default".to_string(), args)]))
+    }
+    pub fn script_commands(&self) -> HashMap<String, Command> {
+        self.scripts
+            .iter()
+            .filter_map(|(k, v)| {
+                if let Some(inner_command) = &v.command {
+                    let mut command = Command::new(inner_command);
+                    command.args(&v.args);
+                    Some((k.clone(), command))
+                } else {
+                    log::warn!(
+                        "Lifecycle script command {k}, value {:?} has no program to run. Skipping",
+                        v
+                    );
+                    None
+                }
+            })
+            .collect()
+    }
+
+    pub async fn run(
+        &self,
+        command_runnder: &Arc<dyn CommandRunner>,
+        working_directory: &Path,
+    ) -> Result<(), DevContainerError> {
+        for (command_name, mut command) in self.script_commands() {
+            log::debug!("Running script {command_name}");
+
+            command.current_dir(working_directory);
+
+            let output = command_runnder
+                .run_command(&mut command)
+                .await
+                .map_err(|e| {
+                    log::error!("Error running command {command_name}: {e}");
+                    DevContainerError::CommandFailed(command_name.clone())
+                })?;
+            if !output.status.success() {
+                let std_err = String::from_utf8_lossy(&output.stderr);
+                log::error!(
+                    "Command {command_name} produced a non-successful output. StdErr: {std_err}"
+                );
+            }
+            let std_out = String::from_utf8_lossy(&output.stdout);
+            log::debug!("Command {command_name} output:\n {std_out}");
+        }
+        Ok(())
+    }
+}
+
+impl<'de> Deserialize<'de> for LifecycleScript {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        use serde::de::{self, Visitor};
+        use std::fmt;
+
+        struct LifecycleScriptVisitor;
+
+        impl<'de> Visitor<'de> for LifecycleScriptVisitor {
+            type Value = LifecycleScript;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("a string, an array of strings, or a map of arrays")
+            }
+
+            fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(LifecycleScript::from_str(value))
+            }
+
+            fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+            where
+                A: de::SeqAccess<'de>,
+            {
+                let mut array = Vec::new();
+                while let Some(elem) = seq.next_element()? {
+                    array.push(elem);
+                }
+                Ok(LifecycleScript::from_args(array))
+            }
+
+            fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+            where
+                A: de::MapAccess<'de>,
+            {
+                let mut result = HashMap::new();
+                while let Some(key) = map.next_key::<String>()? {
+                    let value: Value = map.next_value()?;
+                    let script_args = match value {
+                        Value::String(s) => {
+                            s.split(" ").map(|s| s.to_string()).collect::<Vec<String>>()
+                        }
+                        Value::Array(arr) => {
+                            let strings: Vec<String> = arr
+                                .into_iter()
+                                .filter_map(|v| v.as_str().map(|s| s.to_string()))
+                                .collect();
+                            strings
+                        }
+                        _ => continue,
+                    };
+                    result.insert(key, script_args);
+                }
+                Ok(LifecycleScript::from_map(result))
+            }
+        }
+
+        deserializer.deserialize_any(LifecycleScriptVisitor)
+    }
+}
+
+fn deserialize_mount_definition<'de, D>(
+    deserializer: D,
+) -> Result<Option<MountDefinition>, D::Error>
+where
+    D: serde::Deserializer<'de>,
+{
+    use serde::Deserialize;
+    use serde::de::Error;
+
+    #[derive(Deserialize)]
+    #[serde(untagged)]
+    enum MountItem {
+        Object(MountDefinition),
+        String(String),
+    }
+
+    let item = MountItem::deserialize(deserializer)?;
+
+    let mount = match item {
+        MountItem::Object(mount) => mount,
+        MountItem::String(s) => {
+            let mut source = None;
+            let mut target = None;
+            let mut mount_type = None;
+
+            for part in s.split(',') {
+                let part = part.trim();
+                if let Some((key, value)) = part.split_once('=') {
+                    match key.trim() {
+                        "source" => source = Some(value.trim().to_string()),
+                        "target" => target = Some(value.trim().to_string()),
+                        "type" => mount_type = Some(value.trim().to_string()),
+                        _ => {} // Ignore unknown keys
+                    }
+                }
+            }
+
+            let source = source
+                .ok_or_else(|| D::Error::custom(format!("mount string missing 'source': {}", s)))?;
+            let target = target
+                .ok_or_else(|| D::Error::custom(format!("mount string missing 'target': {}", s)))?;
+
+            MountDefinition {
+                source,
+                target,
+                mount_type,
+            }
+        }
+    };
+
+    Ok(Some(mount))
+}
+
+fn deserialize_mount_definitions<'de, D>(
+    deserializer: D,
+) -> Result<Option<Vec<MountDefinition>>, D::Error>
+where
+    D: serde::Deserializer<'de>,
+{
+    use serde::Deserialize;
+    use serde::de::Error;
+
+    #[derive(Deserialize)]
+    #[serde(untagged)]
+    enum MountItem {
+        Object(MountDefinition),
+        String(String),
+    }
+
+    let items = Vec::<MountItem>::deserialize(deserializer)?;
+    let mut mounts = Vec::new();
+
+    for item in items {
+        match item {
+            MountItem::Object(mount) => mounts.push(mount),
+            MountItem::String(s) => {
+                let mut source = None;
+                let mut target = None;
+                let mut mount_type = None;
+
+                for part in s.split(',') {
+                    let part = part.trim();
+                    if let Some((key, value)) = part.split_once('=') {
+                        match key.trim() {
+                            "source" => source = Some(value.trim().to_string()),
+                            "target" => target = Some(value.trim().to_string()),
+                            "type" => mount_type = Some(value.trim().to_string()),
+                            _ => {} // Ignore unknown keys
+                        }
+                    }
+                }
+
+                let source = source.ok_or_else(|| {
+                    D::Error::custom(format!("mount string missing 'source': {}", s))
+                })?;
+                let target = target.ok_or_else(|| {
+                    D::Error::custom(format!("mount string missing 'target': {}", s))
+                })?;
+
+                mounts.push(MountDefinition {
+                    source,
+                    target,
+                    mount_type,
+                });
+            }
+        }
+    }
+
+    Ok(Some(mounts))
+}
+
+fn deserialize_string_or_int<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
+where
+    D: serde::Deserializer<'de>,
+{
+    use serde::Deserialize;
+
+    #[derive(Deserialize)]
+    #[serde(untagged)]
+    enum StringOrInt {
+        String(String),
+        Int(u32),
+    }
+
+    match StringOrInt::deserialize(deserializer)? {
+        StringOrInt::String(s) => Ok(Some(s)),
+        StringOrInt::Int(b) => Ok(Some(b.to_string())),
+    }
+}
+
+fn deserialize_string_or_array<'de, D>(deserializer: D) -> Result<Option<Vec<String>>, D::Error>
+where
+    D: serde::Deserializer<'de>,
+{
+    use serde::Deserialize;
+
+    #[derive(Deserialize)]
+    #[serde(untagged)]
+    enum StringOrArray {
+        String(String),
+        Array(Vec<String>),
+    }
+
+    match StringOrArray::deserialize(deserializer)? {
+        StringOrArray::String(s) => Ok(Some(vec![s])),
+        StringOrArray::Array(b) => Ok(Some(b)),
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::collections::HashMap;
+
+    use crate::{
+        devcontainer_api::DevContainerError,
+        devcontainer_json::{
+            ContainerBuild, DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort,
+            HostRequirements, LifecycleCommand, LifecycleScript, MountDefinition, OnAutoForward,
+            PortAttributeProtocol, PortAttributes, ShutdownAction, UserEnvProbe, ZedCustomization,
+            ZedCustomizationsWrapper, deserialize_devcontainer_json,
+        },
+    };
+
+    #[test]
+    fn should_deserialize_customizations_with_unknown_keys() {
+        let json_with_other_customizations = r#"
+            {
+                "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
+                "customizations": {
+                  "vscode": {
+                    "extensions": [
+                      "dbaeumer.vscode-eslint",
+                      "GitHub.vscode-pull-request-github",
+                    ],
+                  },
+                  "zed": {
+                    "extensions": ["vue", "ruby"],
+                  },
+                  "codespaces": {
+                    "repositories": {
+                      "devcontainers/features": {
+                        "permissions": {
+                          "contents": "write",
+                          "workflows": "write",
+                        },
+                      },
+                    },
+                  },
+                },
+            }
+        "#;
+
+        let result = deserialize_devcontainer_json(json_with_other_customizations);
+
+        assert!(
+            result.is_ok(),
+            "Should ignore unknown customization keys, but got: {:?}",
+            result.err()
+        );
+        let devcontainer = result.expect("ok");
+        assert_eq!(
+            devcontainer.customizations,
+            Some(ZedCustomizationsWrapper {
+                zed: ZedCustomization {
+                    extensions: vec!["vue".to_string(), "ruby".to_string()]
+                }
+            })
+        );
+    }
+
+    #[test]
+    fn should_deserialize_customizations_without_zed_key() {
+        let json_without_zed = r#"
+            {
+                "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
+                "customizations": {
+                    "vscode": {
+                        "extensions": ["dbaeumer.vscode-eslint"]
+                    }
+                }
+            }
+        "#;
+
+        let result = deserialize_devcontainer_json(json_without_zed);
+
+        assert!(
+            result.is_ok(),
+            "Should handle missing zed key in customizations, but got: {:?}",
+            result.err()
+        );
+        let devcontainer = result.expect("ok");
+        assert_eq!(
+            devcontainer.customizations,
+            Some(ZedCustomizationsWrapper {
+                zed: ZedCustomization { extensions: vec![] }
+            })
+        );
+    }
+
+    #[test]
+    fn should_deserialize_simple_devcontainer_json() {
+        let given_bad_json = "{ \"image\": 123 }";
+
+        let result = deserialize_devcontainer_json(given_bad_json);
+
+        assert!(result.is_err());
+        assert_eq!(
+            result.expect_err("err"),
+            DevContainerError::DevContainerParseFailed
+        );
+
+        let given_image_container_json = r#"
+            // These are some external comments. serde_lenient should handle them
+            {
+                // These are some internal comments
+                "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
+                "name": "myDevContainer",
+                "remoteUser": "root",
+                "forwardPorts": [
+                    "db:5432",
+                    3000
+                ],
+                "portsAttributes": {
+                    "3000": {
+                        "label": "This Port",
+                        "onAutoForward": "notify",
+                        "elevateIfNeeded": false,
+                        "requireLocalPort": true,
+                        "protocol": "https"
+                    },
+                    "db:5432": {
+                        "label": "This Port too",
+                        "onAutoForward": "silent",
+                        "elevateIfNeeded": true,
+                        "requireLocalPort": false,
+                        "protocol": "http"
+                    }
+                },
+                "otherPortsAttributes": {
+                    "label": "Other Ports",
+                    "onAutoForward": "openBrowser",
+                    "elevateIfNeeded": true,
+                    "requireLocalPort": true,
+                    "protocol": "https"
+                },
+                "updateRemoteUserUID": true,
+                "remoteEnv": {
+                    "MYVAR1": "myvarvalue",
+                    "MYVAR2": "myvarothervalue"
+                },
+                "initializeCommand": ["echo", "initialize_command"],
+                "onCreateCommand": "echo on_create_command",
+                "updateContentCommand": {
+                    "first": "echo update_content_command",
+                    "second": ["echo", "update_content_command"]
+                },
+                "postCreateCommand": ["echo", "post_create_command"],
+                "postStartCommand": "echo post_start_command",
+                "postAttachCommand": {
+                    "something": "echo post_attach_command",
+                    "something1": "echo something else",
+                },
+                "waitFor": "postStartCommand",
+                "userEnvProbe": "loginShell",
+                "features": {
+              		"ghcr.io/devcontainers/features/aws-cli:1": {},
+              		"ghcr.io/devcontainers/features/anaconda:1": {}
+               	},
+                "overrideFeatureInstallOrder": [
+                    "ghcr.io/devcontainers/features/anaconda:1",
+                    "ghcr.io/devcontainers/features/aws-cli:1"
+                ],
+                "hostRequirements": {
+                    "cpus": 2,
+                    "memory": "8gb",
+                    "storage": "32gb",
+                    // Note that we're not parsing this currently
+                    "gpu": true,
+                },
+                "appPort": 8081,
+                "containerEnv": {
+                    "MYVAR3": "myvar3",
+                    "MYVAR4": "myvar4"
+                },
+                "containerUser": "myUser",
+                "mounts": [
+                    {
+                        "source": "/localfolder/app",
+                        "target": "/workspaces/app",
+                        "type": "volume"
+                    }
+                ],
+                "runArgs": [
+                    "-c",
+                    "some_command"
+                ],
+                "shutdownAction": "stopContainer",
+                "overrideCommand": true,
+                "workspaceFolder": "/workspaces",
+                "workspaceMount": "source=/app,target=/workspaces/app,type=bind,consistency=cached",
+                "customizations": {
+                    "vscode": {
+                        // Just confirm that this can be included and ignored
+                    },
+                    "zed": {
+                        "extensions": [
+                            "html"
+                        ]
+                    }
+                }
+            }
+            "#;
+
+        let result = deserialize_devcontainer_json(given_image_container_json);
+
+        assert!(result.is_ok());
+        let devcontainer = result.expect("ok");
+        assert_eq!(
+            devcontainer,
+            DevContainer {
+                image: Some(String::from("mcr.microsoft.com/devcontainers/base:ubuntu")),
+                name: Some(String::from("myDevContainer")),
+                remote_user: Some(String::from("root")),
+                forward_ports: Some(vec![
+                    ForwardPort::String("db:5432".to_string()),
+                    ForwardPort::Number(3000),
+                ]),
+                ports_attributes: Some(HashMap::from([
+                    (
+                        "3000".to_string(),
+                        PortAttributes {
+                            label: "This Port".to_string(),
+                            on_auto_forward: OnAutoForward::Notify,
+                            elevate_if_needed: false,
+                            require_local_port: true,
+                            protocol: PortAttributeProtocol::Https
+                        }
+                    ),
+                    (
+                        "db:5432".to_string(),
+                        PortAttributes {
+                            label: "This Port too".to_string(),
+                            on_auto_forward: OnAutoForward::Silent,
+                            elevate_if_needed: true,
+                            require_local_port: false,
+                            protocol: PortAttributeProtocol::Http
+                        }
+                    )
+                ])),
+                other_ports_attributes: Some(PortAttributes {
+                    label: "Other Ports".to_string(),
+                    on_auto_forward: OnAutoForward::OpenBrowser,
+                    elevate_if_needed: true,
+                    require_local_port: true,
+                    protocol: PortAttributeProtocol::Https
+                }),
+                update_remote_user_uid: Some(true),
+                remote_env: Some(HashMap::from([
+                    ("MYVAR1".to_string(), "myvarvalue".to_string()),
+                    ("MYVAR2".to_string(), "myvarothervalue".to_string())
+                ])),
+                initialize_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "initialize_command".to_string()
+                ])),
+                on_create_command: Some(LifecycleScript::from_str("echo on_create_command")),
+                update_content_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "first".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    ),
+                    (
+                        "second".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    )
+                ]))),
+                post_create_command: Some(LifecycleScript::from_str("echo post_create_command")),
+                post_start_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "post_start_command".to_string()
+                ])),
+                post_attach_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "something".to_string(),
+                        vec!["echo".to_string(), "post_attach_command".to_string()]
+                    ),
+                    (
+                        "something1".to_string(),
+                        vec![
+                            "echo".to_string(),
+                            "something".to_string(),
+                            "else".to_string()
+                        ]
+                    )
+                ]))),
+                wait_for: Some(LifecycleCommand::PostStartCommand),
+                user_env_probe: Some(UserEnvProbe::LoginShell),
+                features: Some(HashMap::from([
+                    (
+                        "ghcr.io/devcontainers/features/aws-cli:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    ),
+                    (
+                        "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    )
+                ])),
+                override_feature_install_order: Some(vec![
+                    "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                    "ghcr.io/devcontainers/features/aws-cli:1".to_string()
+                ]),
+                host_requirements: Some(HostRequirements {
+                    cpus: Some(2),
+                    memory: Some("8gb".to_string()),
+                    storage: Some("32gb".to_string()),
+                }),
+                app_port: Some("8081".to_string()),
+                container_env: Some(HashMap::from([
+                    ("MYVAR3".to_string(), "myvar3".to_string()),
+                    ("MYVAR4".to_string(), "myvar4".to_string())
+                ])),
+                container_user: Some("myUser".to_string()),
+                mounts: Some(vec![MountDefinition {
+                    source: "/localfolder/app".to_string(),
+                    target: "/workspaces/app".to_string(),
+                    mount_type: Some("volume".to_string()),
+                }]),
+                run_args: Some(vec!["-c".to_string(), "some_command".to_string()]),
+                shutdown_action: Some(ShutdownAction::StopContainer),
+                override_command: Some(true),
+                workspace_folder: Some("/workspaces".to_string()),
+                workspace_mount: Some(MountDefinition {
+                    source: "/app".to_string(),
+                    target: "/workspaces/app".to_string(),
+                    mount_type: Some("bind".to_string())
+                }),
+                customizations: Some(ZedCustomizationsWrapper {
+                    zed: ZedCustomization {
+                        extensions: vec!["html".to_string()]
+                    }
+                }),
+                ..Default::default()
+            }
+        );
+
+        assert_eq!(devcontainer.build_type(), DevContainerBuildType::Image);
+    }
+
+    #[test]
+    fn should_deserialize_docker_compose_devcontainer_json() {
+        let given_docker_compose_json = r#"
+            // These are some external comments. serde_lenient should handle them
+            {
+                // These are some internal comments
+                "name": "myDevContainer",
+                "remoteUser": "root",
+                "forwardPorts": [
+                    "db:5432",
+                    3000
+                ],
+                "portsAttributes": {
+                    "3000": {
+                        "label": "This Port",
+                        "onAutoForward": "notify",
+                        "elevateIfNeeded": false,
+                        "requireLocalPort": true,
+                        "protocol": "https"
+                    },
+                    "db:5432": {
+                        "label": "This Port too",
+                        "onAutoForward": "silent",
+                        "elevateIfNeeded": true,
+                        "requireLocalPort": false,
+                        "protocol": "http"
+                    }
+                },
+                "otherPortsAttributes": {
+                    "label": "Other Ports",
+                    "onAutoForward": "openBrowser",
+                    "elevateIfNeeded": true,
+                    "requireLocalPort": true,
+                    "protocol": "https"
+                },
+                "updateRemoteUserUID": true,
+                "remoteEnv": {
+                    "MYVAR1": "myvarvalue",
+                    "MYVAR2": "myvarothervalue"
+                },
+                "initializeCommand": ["echo", "initialize_command"],
+                "onCreateCommand": "echo on_create_command",
+                "updateContentCommand": {
+                    "first": "echo update_content_command",
+                    "second": ["echo", "update_content_command"]
+                },
+                "postCreateCommand": ["echo", "post_create_command"],
+                "postStartCommand": "echo post_start_command",
+                "postAttachCommand": {
+                    "something": "echo post_attach_command",
+                    "something1": "echo something else",
+                },
+                "waitFor": "postStartCommand",
+                "userEnvProbe": "loginShell",
+                "features": {
+              		"ghcr.io/devcontainers/features/aws-cli:1": {},
+              		"ghcr.io/devcontainers/features/anaconda:1": {}
+               	},
+                "overrideFeatureInstallOrder": [
+                    "ghcr.io/devcontainers/features/anaconda:1",
+                    "ghcr.io/devcontainers/features/aws-cli:1"
+                ],
+                "hostRequirements": {
+                    "cpus": 2,
+                    "memory": "8gb",
+                    "storage": "32gb",
+                    // Note that we're not parsing this currently
+                    "gpu": true,
+                },
+                "dockerComposeFile": "docker-compose.yml",
+                "service": "myService",
+                "runServices": [
+                    "myService",
+                    "mySupportingService"
+                ],
+                "workspaceFolder": "/workspaces/thing",
+                "shutdownAction": "stopCompose",
+                "overrideCommand": true
+            }
+            "#;
+        let result = deserialize_devcontainer_json(given_docker_compose_json);
+
+        assert!(result.is_ok());
+        let devcontainer = result.expect("ok");
+        assert_eq!(
+            devcontainer,
+            DevContainer {
+                name: Some(String::from("myDevContainer")),
+                remote_user: Some(String::from("root")),
+                forward_ports: Some(vec![
+                    ForwardPort::String("db:5432".to_string()),
+                    ForwardPort::Number(3000),
+                ]),
+                ports_attributes: Some(HashMap::from([
+                    (
+                        "3000".to_string(),
+                        PortAttributes {
+                            label: "This Port".to_string(),
+                            on_auto_forward: OnAutoForward::Notify,
+                            elevate_if_needed: false,
+                            require_local_port: true,
+                            protocol: PortAttributeProtocol::Https
+                        }
+                    ),
+                    (
+                        "db:5432".to_string(),
+                        PortAttributes {
+                            label: "This Port too".to_string(),
+                            on_auto_forward: OnAutoForward::Silent,
+                            elevate_if_needed: true,
+                            require_local_port: false,
+                            protocol: PortAttributeProtocol::Http
+                        }
+                    )
+                ])),
+                other_ports_attributes: Some(PortAttributes {
+                    label: "Other Ports".to_string(),
+                    on_auto_forward: OnAutoForward::OpenBrowser,
+                    elevate_if_needed: true,
+                    require_local_port: true,
+                    protocol: PortAttributeProtocol::Https
+                }),
+                update_remote_user_uid: Some(true),
+                remote_env: Some(HashMap::from([
+                    ("MYVAR1".to_string(), "myvarvalue".to_string()),
+                    ("MYVAR2".to_string(), "myvarothervalue".to_string())
+                ])),
+                initialize_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "initialize_command".to_string()
+                ])),
+                on_create_command: Some(LifecycleScript::from_str("echo on_create_command")),
+                update_content_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "first".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    ),
+                    (
+                        "second".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    )
+                ]))),
+                post_create_command: Some(LifecycleScript::from_str("echo post_create_command")),
+                post_start_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "post_start_command".to_string()
+                ])),
+                post_attach_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "something".to_string(),
+                        vec!["echo".to_string(), "post_attach_command".to_string()]
+                    ),
+                    (
+                        "something1".to_string(),
+                        vec![
+                            "echo".to_string(),
+                            "something".to_string(),
+                            "else".to_string()
+                        ]
+                    )
+                ]))),
+                wait_for: Some(LifecycleCommand::PostStartCommand),
+                user_env_probe: Some(UserEnvProbe::LoginShell),
+                features: Some(HashMap::from([
+                    (
+                        "ghcr.io/devcontainers/features/aws-cli:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    ),
+                    (
+                        "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    )
+                ])),
+                override_feature_install_order: Some(vec![
+                    "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                    "ghcr.io/devcontainers/features/aws-cli:1".to_string()
+                ]),
+                host_requirements: Some(HostRequirements {
+                    cpus: Some(2),
+                    memory: Some("8gb".to_string()),
+                    storage: Some("32gb".to_string()),
+                }),
+                docker_compose_file: Some(vec!["docker-compose.yml".to_string()]),
+                service: Some("myService".to_string()),
+                run_services: Some(vec![
+                    "myService".to_string(),
+                    "mySupportingService".to_string(),
+                ]),
+                workspace_folder: Some("/workspaces/thing".to_string()),
+                shutdown_action: Some(ShutdownAction::StopCompose),
+                override_command: Some(true),
+                ..Default::default()
+            }
+        );
+
+        assert_eq!(
+            devcontainer.build_type(),
+            DevContainerBuildType::DockerCompose
+        );
+    }
+
+    #[test]
+    fn should_deserialize_dockerfile_devcontainer_json() {
+        let given_dockerfile_container_json = r#"
+            // These are some external comments. serde_lenient should handle them
+            {
+                // These are some internal comments
+                "name": "myDevContainer",
+                "remoteUser": "root",
+                "forwardPorts": [
+                    "db:5432",
+                    3000
+                ],
+                "portsAttributes": {
+                    "3000": {
+                        "label": "This Port",
+                        "onAutoForward": "notify",
+                        "elevateIfNeeded": false,
+                        "requireLocalPort": true,
+                        "protocol": "https"
+                    },
+                    "db:5432": {
+                        "label": "This Port too",
+                        "onAutoForward": "silent",
+                        "elevateIfNeeded": true,
+                        "requireLocalPort": false,
+                        "protocol": "http"
+                    }
+                },
+                "otherPortsAttributes": {
+                    "label": "Other Ports",
+                    "onAutoForward": "openBrowser",
+                    "elevateIfNeeded": true,
+                    "requireLocalPort": true,
+                    "protocol": "https"
+                },
+                "updateRemoteUserUID": true,
+                "remoteEnv": {
+                    "MYVAR1": "myvarvalue",
+                    "MYVAR2": "myvarothervalue"
+                },
+                "initializeCommand": ["echo", "initialize_command"],
+                "onCreateCommand": "echo on_create_command",
+                "updateContentCommand": {
+                    "first": "echo update_content_command",
+                    "second": ["echo", "update_content_command"]
+                },
+                "postCreateCommand": ["echo", "post_create_command"],
+                "postStartCommand": "echo post_start_command",
+                "postAttachCommand": {
+                    "something": "echo post_attach_command",
+                    "something1": "echo something else",
+                },
+                "waitFor": "postStartCommand",
+                "userEnvProbe": "loginShell",
+                "features": {
+              		"ghcr.io/devcontainers/features/aws-cli:1": {},
+              		"ghcr.io/devcontainers/features/anaconda:1": {}
+               	},
+                "overrideFeatureInstallOrder": [
+                    "ghcr.io/devcontainers/features/anaconda:1",
+                    "ghcr.io/devcontainers/features/aws-cli:1"
+                ],
+                "hostRequirements": {
+                    "cpus": 2,
+                    "memory": "8gb",
+                    "storage": "32gb",
+                    // Note that we're not parsing this currently
+                    "gpu": true,
+                },
+                "appPort": 8081,
+                "containerEnv": {
+                    "MYVAR3": "myvar3",
+                    "MYVAR4": "myvar4"
+                },
+                "containerUser": "myUser",
+                "mounts": [
+                    {
+                        "source": "/localfolder/app",
+                        "target": "/workspaces/app",
+                        "type": "volume"
+                    },
+                    "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
+                ],
+                "runArgs": [
+                    "-c",
+                    "some_command"
+                ],
+                "shutdownAction": "stopContainer",
+                "overrideCommand": true,
+                "workspaceFolder": "/workspaces",
+                "workspaceMount": "source=/folder,target=/workspace,type=bind,consistency=cached",
+                "build": {
+                   	"dockerfile": "DockerFile",
+                   	"context": "..",
+                   	"args": {
+                   	    "MYARG": "MYVALUE"
+                   	},
+                   	"options": [
+                   	    "--some-option",
+                   	    "--mount"
+                   	],
+                   	"target": "development",
+                   	"cacheFrom": "some_image"
+                }
+            }
+            "#;
+
+        let result = deserialize_devcontainer_json(given_dockerfile_container_json);
+
+        assert!(result.is_ok());
+        let devcontainer = result.expect("ok");
+        assert_eq!(
+            devcontainer,
+            DevContainer {
+                name: Some(String::from("myDevContainer")),
+                remote_user: Some(String::from("root")),
+                forward_ports: Some(vec![
+                    ForwardPort::String("db:5432".to_string()),
+                    ForwardPort::Number(3000),
+                ]),
+                ports_attributes: Some(HashMap::from([
+                    (
+                        "3000".to_string(),
+                        PortAttributes {
+                            label: "This Port".to_string(),
+                            on_auto_forward: OnAutoForward::Notify,
+                            elevate_if_needed: false,
+                            require_local_port: true,
+                            protocol: PortAttributeProtocol::Https
+                        }
+                    ),
+                    (
+                        "db:5432".to_string(),
+                        PortAttributes {
+                            label: "This Port too".to_string(),
+                            on_auto_forward: OnAutoForward::Silent,
+                            elevate_if_needed: true,
+                            require_local_port: false,
+                            protocol: PortAttributeProtocol::Http
+                        }
+                    )
+                ])),
+                other_ports_attributes: Some(PortAttributes {
+                    label: "Other Ports".to_string(),
+                    on_auto_forward: OnAutoForward::OpenBrowser,
+                    elevate_if_needed: true,
+                    require_local_port: true,
+                    protocol: PortAttributeProtocol::Https
+                }),
+                update_remote_user_uid: Some(true),
+                remote_env: Some(HashMap::from([
+                    ("MYVAR1".to_string(), "myvarvalue".to_string()),
+                    ("MYVAR2".to_string(), "myvarothervalue".to_string())
+                ])),
+                initialize_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "initialize_command".to_string()
+                ])),
+                on_create_command: Some(LifecycleScript::from_str("echo on_create_command")),
+                update_content_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "first".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    ),
+                    (
+                        "second".to_string(),
+                        vec!["echo".to_string(), "update_content_command".to_string()]
+                    )
+                ]))),
+                post_create_command: Some(LifecycleScript::from_str("echo post_create_command")),
+                post_start_command: Some(LifecycleScript::from_args(vec![
+                    "echo".to_string(),
+                    "post_start_command".to_string()
+                ])),
+                post_attach_command: Some(LifecycleScript::from_map(HashMap::from([
+                    (
+                        "something".to_string(),
+                        vec!["echo".to_string(), "post_attach_command".to_string()]
+                    ),
+                    (
+                        "something1".to_string(),
+                        vec![
+                            "echo".to_string(),
+                            "something".to_string(),
+                            "else".to_string()
+                        ]
+                    )
+                ]))),
+                wait_for: Some(LifecycleCommand::PostStartCommand),
+                user_env_probe: Some(UserEnvProbe::LoginShell),
+                features: Some(HashMap::from([
+                    (
+                        "ghcr.io/devcontainers/features/aws-cli:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    ),
+                    (
+                        "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                        FeatureOptions::Options(HashMap::new())
+                    )
+                ])),
+                override_feature_install_order: Some(vec![
+                    "ghcr.io/devcontainers/features/anaconda:1".to_string(),
+                    "ghcr.io/devcontainers/features/aws-cli:1".to_string()
+                ]),
+                host_requirements: Some(HostRequirements {
+                    cpus: Some(2),
+                    memory: Some("8gb".to_string()),
+                    storage: Some("32gb".to_string()),
+                }),
+                app_port: Some("8081".to_string()),
+                container_env: Some(HashMap::from([
+                    ("MYVAR3".to_string(), "myvar3".to_string()),
+                    ("MYVAR4".to_string(), "myvar4".to_string())
+                ])),
+                container_user: Some("myUser".to_string()),
+                mounts: Some(vec![
+                    MountDefinition {
+                        source: "/localfolder/app".to_string(),
+                        target: "/workspaces/app".to_string(),
+                        mount_type: Some("volume".to_string()),
+                    },
+                    MountDefinition {
+                        source: "dev-containers-cli-bashhistory".to_string(),
+                        target: "/home/node/commandhistory".to_string(),
+                        mount_type: None,
+                    }
+                ]),
+                run_args: Some(vec!["-c".to_string(), "some_command".to_string()]),
+                shutdown_action: Some(ShutdownAction::StopContainer),
+                override_command: Some(true),
+                workspace_folder: Some("/workspaces".to_string()),
+                workspace_mount: Some(MountDefinition {
+                    source: "/folder".to_string(),
+                    target: "/workspace".to_string(),
+                    mount_type: Some("bind".to_string())
+                }),
+                build: Some(ContainerBuild {
+                    dockerfile: "DockerFile".to_string(),
+                    context: Some("..".to_string()),
+                    args: Some(HashMap::from([(
+                        "MYARG".to_string(),
+                        "MYVALUE".to_string()
+                    )])),
+                    options: Some(vec!["--some-option".to_string(), "--mount".to_string()]),
+                    target: Some("development".to_string()),
+                    cache_from: Some(vec!["some_image".to_string()]),
+                }),
+                ..Default::default()
+            }
+        );
+
+        assert_eq!(devcontainer.build_type(), DevContainerBuildType::Dockerfile);
+    }
+}

crates/dev_container/src/devcontainer_manifest.rs 🔗

@@ -0,0 +1,6571 @@
+use std::{
+    collections::HashMap,
+    fmt::Debug,
+    hash::{DefaultHasher, Hash, Hasher},
+    path::{Path, PathBuf},
+    sync::Arc,
+};
+
+use fs::Fs;
+use http_client::HttpClient;
+use util::{ResultExt, command::Command};
+
+use crate::{
+    DevContainerConfig, DevContainerContext,
+    command_json::{CommandRunner, DefaultCommandRunner},
+    devcontainer_api::{DevContainerError, DevContainerUp},
+    devcontainer_json::{
+        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
+        deserialize_devcontainer_json,
+    },
+    docker::{
+        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
+        DockerComposeVolume, DockerInspect, DockerPs, get_remote_dir_from_config,
+    },
+    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
+    get_oci_token,
+    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
+    safe_id_lower,
+};
+
+enum ConfigStatus {
+    Deserialized(DevContainer),
+    VariableParsed(DevContainer),
+}
+
+#[derive(Debug, Clone, Eq, PartialEq, Default)]
+pub(crate) struct DockerComposeResources {
+    files: Vec<PathBuf>,
+    config: DockerComposeConfig,
+}
+
+struct DevContainerManifest {
+    http_client: Arc<dyn HttpClient>,
+    fs: Arc<dyn Fs>,
+    docker_client: Arc<dyn DockerClient>,
+    command_runner: Arc<dyn CommandRunner>,
+    raw_config: String,
+    config: ConfigStatus,
+    local_environment: HashMap<String, String>,
+    local_project_directory: PathBuf,
+    config_directory: PathBuf,
+    file_name: String,
+    root_image: Option<DockerInspect>,
+    features_build_info: Option<FeaturesBuildInfo>,
+    features: Vec<FeatureManifest>,
+}
+const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
+impl DevContainerManifest {
+    async fn new(
+        context: &DevContainerContext,
+        environment: HashMap<String, String>,
+        docker_client: Arc<dyn DockerClient>,
+        command_runner: Arc<dyn CommandRunner>,
+        local_config: DevContainerConfig,
+        local_project_path: &Path,
+    ) -> Result<Self, DevContainerError> {
+        let config_path = local_project_path.join(local_config.config_path.clone());
+        log::debug!("parsing devcontainer json found in {:?}", &config_path);
+        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
+            log::error!("Unable to read devcontainer contents: {e}");
+            DevContainerError::DevContainerParseFailed
+        })?;
+
+        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
+
+        let devcontainer_directory = config_path.parent().ok_or_else(|| {
+            log::error!("Dev container file should be in a directory");
+            DevContainerError::NotInValidProject
+        })?;
+        let file_name = config_path
+            .file_name()
+            .and_then(|f| f.to_str())
+            .ok_or_else(|| {
+                log::error!("Dev container file has no file name, or is invalid unicode");
+                DevContainerError::DevContainerParseFailed
+            })?;
+
+        Ok(Self {
+            fs: context.fs.clone(),
+            http_client: context.http_client.clone(),
+            docker_client,
+            command_runner,
+            raw_config: devcontainer_contents,
+            config: ConfigStatus::Deserialized(devcontainer),
+            local_project_directory: local_project_path.to_path_buf(),
+            local_environment: environment,
+            config_directory: devcontainer_directory.to_path_buf(),
+            file_name: file_name.to_string(),
+            root_image: None,
+            features_build_info: None,
+            features: Vec::new(),
+        })
+    }
+
+    fn devcontainer_id(&self) -> String {
+        let mut labels = self.identifying_labels();
+        labels.sort_by_key(|(key, _)| *key);
+
+        let mut hasher = DefaultHasher::new();
+        for (key, value) in &labels {
+            key.hash(&mut hasher);
+            value.hash(&mut hasher);
+        }
+
+        format!("{:016x}", hasher.finish())
+    }
+
+    fn identifying_labels(&self) -> Vec<(&str, String)> {
+        let labels = vec![
+            (
+                "devcontainer.local_folder",
+                (self.local_project_directory.display()).to_string(),
+            ),
+            (
+                "devcontainer.config_file",
+                (self.config_file().display()).to_string(),
+            ),
+        ];
+        labels
+    }
+
+    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
+        let mut replaced_content = content
+            .replace("${devcontainerId}", &self.devcontainer_id())
+            .replace(
+                "${containerWorkspaceFolderBasename}",
+                &self.remote_workspace_base_name().unwrap_or_default(),
+            )
+            .replace(
+                "${localWorkspaceFolderBasename}",
+                &self.local_workspace_base_name()?,
+            )
+            .replace(
+                "${containerWorkspaceFolder}",
+                &self
+                    .remote_workspace_folder()
+                    .map(|path| path.display().to_string())
+                    .unwrap_or_default()
+                    .replace('\\', "/"),
+            )
+            .replace(
+                "${localWorkspaceFolder}",
+                &self.local_workspace_folder().replace('\\', "/"),
+            );
+        for (k, v) in &self.local_environment {
+            let find = format!("${{localEnv:{k}}}");
+            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
+        }
+
+        Ok(replaced_content)
+    }
+
+    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
+        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
+        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
+
+        self.config = ConfigStatus::VariableParsed(parsed_config);
+
+        Ok(())
+    }
+
+    fn runtime_remote_env(
+        &self,
+        container_env: &HashMap<String, String>,
+    ) -> Result<HashMap<String, String>, DevContainerError> {
+        let mut merged_remote_env = container_env.clone();
+        // HOME is user-specific, and we will often not run as the image user
+        merged_remote_env.remove("HOME");
+        if let Some(remote_env) = self.dev_container().remote_env.clone() {
+            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
+                log::error!(
+                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
+                    remote_env
+                );
+                DevContainerError::DevContainerParseFailed
+            })?;
+            for (k, v) in container_env {
+                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
+            }
+            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
+                .map_err(|e| {
+                    log::error!(
+                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
+                        &raw
+                    );
+                    DevContainerError::DevContainerParseFailed
+                })?;
+            for (k, v) in reserialized {
+                merged_remote_env.insert(k, v);
+            }
+        }
+        Ok(merged_remote_env)
+    }
+
+    fn config_file(&self) -> PathBuf {
+        self.config_directory.join(&self.file_name)
+    }
+
+    fn dev_container(&self) -> &DevContainer {
+        match &self.config {
+            ConfigStatus::Deserialized(dev_container) => dev_container,
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        }
+    }
+
+    async fn dockerfile_location(&self) -> Option<PathBuf> {
+        let dev_container = self.dev_container();
+        match dev_container.build_type() {
+            DevContainerBuildType::Image => None,
+            DevContainerBuildType::Dockerfile => dev_container
+                .build
+                .as_ref()
+                .map(|build| self.config_directory.join(&build.dockerfile)),
+            DevContainerBuildType::DockerCompose => {
+                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
+                    return None;
+                };
+                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
+                else {
+                    return None;
+                };
+                main_service
+                    .build
+                    .and_then(|b| b.dockerfile)
+                    .map(|dockerfile| self.config_directory.join(dockerfile))
+            }
+            DevContainerBuildType::None => None,
+        }
+    }
+
+    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
+        let mut hasher = DefaultHasher::new();
+        let prefix = match &self.dev_container().name {
+            Some(name) => &safe_id_lower(name),
+            None => "zed-dc",
+        };
+        let prefix = prefix.get(..6).unwrap_or(prefix);
+
+        dockerfile_build_path.hash(&mut hasher);
+
+        let hash = hasher.finish();
+        format!("{}-{:x}-features", prefix, hash)
+    }
+
+    /// Gets the base image from the devcontainer with the following precedence:
+    /// - The devcontainer image if an image is specified
+    /// - The image sourced in the Dockerfile if a Dockerfile is specified
+    /// - The image sourced in the docker-compose main service, if one is specified
+    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
+    /// If no such image is available, return an error
+    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
+        if let Some(image) = &self.dev_container().image {
+            return Ok(image.to_string());
+        }
+        if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
+            let dockerfile_contents = self
+                .fs
+                .load(&self.config_directory.join(dockerfile))
+                .await
+                .map_err(|e| {
+                    log::error!("Error reading dockerfile: {e}");
+                    DevContainerError::DevContainerParseFailed
+                })?;
+            return image_from_dockerfile(self, dockerfile_contents);
+        }
+        if self.dev_container().docker_compose_file.is_some() {
+            let docker_compose_manifest = self.docker_compose_manifest().await?;
+            let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
+
+            if let Some(dockerfile) = main_service
+                .build
+                .as_ref()
+                .and_then(|b| b.dockerfile.as_ref())
+            {
+                let dockerfile_contents = self
+                    .fs
+                    .load(&self.config_directory.join(dockerfile))
+                    .await
+                    .map_err(|e| {
+                        log::error!("Error reading dockerfile: {e}");
+                        DevContainerError::DevContainerParseFailed
+                    })?;
+                return image_from_dockerfile(self, dockerfile_contents);
+            }
+            if let Some(image) = &main_service.image {
+                return Ok(image.to_string());
+            }
+
+            log::error!("No valid base image found in docker-compose configuration");
+            return Err(DevContainerError::DevContainerParseFailed);
+        }
+        log::error!("No valid base image found in dev container configuration");
+        Err(DevContainerError::DevContainerParseFailed)
+    }
+
+    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+        let root_image_tag = self.get_base_image_from_config().await?;
+        let root_image = self.docker_client.inspect(&root_image_tag).await?;
+
+        if dev_container.build_type() == DevContainerBuildType::Image
+            && !dev_container.has_features()
+        {
+            log::debug!("No resources to download. Proceeding with just the image");
+            return Ok(());
+        }
+
+        let temp_base = std::env::temp_dir().join("devcontainer-zed");
+        let timestamp = std::time::SystemTime::now()
+            .duration_since(std::time::UNIX_EPOCH)
+            .map(|d| d.as_millis())
+            .unwrap_or(0);
+
+        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
+        let empty_context_dir = temp_base.join("empty-folder");
+
+        self.fs
+            .create_dir(&features_content_dir)
+            .await
+            .map_err(|e| {
+                log::error!("Failed to create features content dir: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
+            log::error!("Failed to create empty context dir: {e}");
+            DevContainerError::FilesystemError
+        })?;
+
+        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
+        let image_tag =
+            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
+
+        let build_info = FeaturesBuildInfo {
+            dockerfile_path,
+            features_content_dir,
+            empty_context_dir,
+            build_image: dev_container.image.clone(),
+            image_tag,
+        };
+
+        let features = match &dev_container.features {
+            Some(features) => features,
+            None => &HashMap::new(),
+        };
+
+        let container_user = get_container_user_from_config(&root_image, self)?;
+        let remote_user = get_remote_user_from_config(&root_image, self)?;
+
+        let builtin_env_content = format!(
+            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
+            container_user, remote_user
+        );
+
+        let builtin_env_path = build_info
+            .features_content_dir
+            .join("devcontainer-features.builtin.env");
+
+        self.fs
+            .write(&builtin_env_path, &builtin_env_content.as_bytes())
+            .await
+            .map_err(|e| {
+                log::error!("Failed to write builtin env file: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        let ordered_features =
+            resolve_feature_order(features, &dev_container.override_feature_install_order);
+
+        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
+            if matches!(options, FeatureOptions::Bool(false)) {
+                log::debug!(
+                    "Feature '{}' is disabled (set to false), skipping",
+                    feature_ref
+                );
+                continue;
+            }
+
+            let feature_id = extract_feature_id(feature_ref);
+            let consecutive_id = format!("{}_{}", feature_id, index);
+            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
+
+            self.fs.create_dir(&feature_dir).await.map_err(|e| {
+                log::error!(
+                    "Failed to create feature directory for {}: {e}",
+                    feature_ref
+                );
+                DevContainerError::FilesystemError
+            })?;
+
+            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
+                log::error!(
+                    "Feature '{}' is not a supported OCI feature reference",
+                    feature_ref
+                );
+                DevContainerError::DevContainerParseFailed
+            })?;
+            let TokenResponse { token } =
+                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
+                    .await
+                    .map_err(|e| {
+                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
+                        DevContainerError::ResourceFetchFailed
+                    })?;
+            let manifest = get_oci_manifest(
+                &oci_ref.registry,
+                &oci_ref.path,
+                &token,
+                &self.http_client,
+                &oci_ref.version,
+                None,
+            )
+            .await
+            .map_err(|e| {
+                log::error!(
+                    "Failed to fetch OCI manifest for feature '{}': {e}",
+                    feature_ref
+                );
+                DevContainerError::ResourceFetchFailed
+            })?;
+            let digest = &manifest
+                .layers
+                .first()
+                .ok_or_else(|| {
+                    log::error!(
+                        "OCI manifest for feature '{}' contains no layers",
+                        feature_ref
+                    );
+                    DevContainerError::ResourceFetchFailed
+                })?
+                .digest;
+            download_oci_tarball(
+                &token,
+                &oci_ref.registry,
+                &oci_ref.path,
+                digest,
+                "application/vnd.devcontainers.layer.v1+tar",
+                &feature_dir,
+                &self.http_client,
+                &self.fs,
+                None,
+            )
+            .await?;
+
+            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
+            if !self.fs.is_file(feature_json_path).await {
+                let message = format!(
+                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
+                    feature_json_path
+                );
+                log::error!("{}", &message);
+                return Err(DevContainerError::ResourceFetchFailed);
+            }
+
+            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
+                log::error!("error reading devcontainer-feature.json: {:?}", e);
+                DevContainerError::FilesystemError
+            })?;
+
+            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
+
+            let feature_json: DevContainerFeatureJson =
+                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
+                    log::error!("Failed to parse devcontainer-feature.json: {e}");
+                    DevContainerError::ResourceFetchFailed
+                })?;
+
+            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
+
+            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
+
+            let env_content = feature_manifest
+                .write_feature_env(&self.fs, options)
+                .await?;
+
+            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
+
+            self.fs
+                .write(
+                    &feature_manifest
+                        .file_path()
+                        .join("devcontainer-features-install.sh"),
+                    &wrapper_content.as_bytes(),
+                )
+                .await
+                .map_err(|e| {
+                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
+                    DevContainerError::FilesystemError
+                })?;
+
+            self.features.push(feature_manifest);
+        }
+
+        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
+
+        let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
+        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
+
+        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
+            self.fs.load(location).await.log_err()
+        } else {
+            None
+        };
+
+        let dockerfile_content = self.generate_dockerfile_extended(
+            &container_user,
+            &remote_user,
+            dockerfile_base_content,
+            use_buildkit,
+        );
+
+        self.fs
+            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
+            .await
+            .map_err(|e| {
+                log::error!("Failed to write Dockerfile.extended: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        log::debug!(
+            "Features build resources written to {:?}",
+            build_info.features_content_dir
+        );
+
+        self.root_image = Some(root_image);
+        self.features_build_info = Some(build_info);
+
+        Ok(())
+    }
+
+    fn generate_dockerfile_extended(
+        &self,
+        container_user: &str,
+        remote_user: &str,
+        dockerfile_content: Option<String>,
+        use_buildkit: bool,
+    ) -> String {
+        #[cfg(not(target_os = "windows"))]
+        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
+        #[cfg(target_os = "windows")]
+        let update_remote_user_uid = false;
+        let feature_layers: String = self
+            .features
+            .iter()
+            .map(|manifest| {
+                manifest.generate_dockerfile_feature_layer(
+                    use_buildkit,
+                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
+                )
+            })
+            .collect();
+
+        let container_home_cmd = get_ent_passwd_shell_command(container_user);
+        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
+
+        let dockerfile_content = dockerfile_content
+            .map(|content| {
+                if dockerfile_alias(&content).is_some() {
+                    content
+                } else {
+                    dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
+                }
+            })
+            .unwrap_or("".to_string());
+
+        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
+
+        let feature_content_source_stage = if use_buildkit {
+            "".to_string()
+        } else {
+            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
+                .to_string()
+        };
+
+        let builtin_env_source_path = if use_buildkit {
+            "./devcontainer-features.builtin.env"
+        } else {
+            "/tmp/build-features/devcontainer-features.builtin.env"
+        };
+
+        let mut extended_dockerfile = format!(
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+{dockerfile_content}
+{feature_content_source_stage}
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p {dest}
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
+
+RUN \
+echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
+
+{feature_layers}
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+"#
+        );
+
+        // If we're not adding a uid update layer, then we should add env vars to this layer instead
+        if !update_remote_user_uid {
+            extended_dockerfile = format!(
+                r#"{extended_dockerfile}
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
+"#
+            );
+
+            for feature in &self.features {
+                let container_env_layer = feature.generate_dockerfile_env();
+                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
+            }
+
+            if let Some(env) = &self.dev_container().container_env {
+                for (key, value) in env {
+                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
+                }
+            }
+        }
+
+        extended_dockerfile
+    }
+
+    fn build_merged_resources(
+        &self,
+        base_image: DockerInspect,
+    ) -> Result<DockerBuildResources, DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
+
+        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
+
+        mounts.append(&mut feature_mounts);
+
+        let privileged = dev_container.privileged.unwrap_or(false)
+            || self.features.iter().any(|f| f.privileged());
+
+        let mut entrypoint_script_lines = vec![
+            "echo Container started".to_string(),
+            "trap \"exit 0\" 15".to_string(),
+        ];
+
+        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
+            entrypoint_script_lines.push(entrypoint.clone());
+        }
+        entrypoint_script_lines.append(&mut vec![
+            "exec \"$@\"".to_string(),
+            "while sleep 1 & wait $!; do :; done".to_string(),
+        ]);
+
+        Ok(DockerBuildResources {
+            image: base_image,
+            additional_mounts: mounts,
+            privileged,
+            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
+        })
+    }
+
+    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
+        if let ConfigStatus::Deserialized(_) = &self.config {
+            log::error!(
+                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
+            );
+            return Err(DevContainerError::DevContainerParseFailed);
+        }
+        let dev_container = self.dev_container();
+        match dev_container.build_type() {
+            DevContainerBuildType::Image | DevContainerBuildType::Dockerfile => {
+                let built_docker_image = self.build_docker_image().await?;
+                let built_docker_image = self
+                    .update_remote_user_uid(built_docker_image, None)
+                    .await?;
+
+                let resources = self.build_merged_resources(built_docker_image)?;
+                Ok(DevContainerBuildResources::Docker(resources))
+            }
+            DevContainerBuildType::DockerCompose => {
+                log::debug!("Using docker compose. Building extended compose files");
+                let docker_compose_resources = self.build_and_extend_compose_files().await?;
+
+                return Ok(DevContainerBuildResources::DockerCompose(
+                    docker_compose_resources,
+                ));
+            }
+            DevContainerBuildType::None => {
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+        }
+    }
+
+    async fn run_dev_container(
+        &self,
+        build_resources: DevContainerBuildResources,
+    ) -> Result<DevContainerUp, DevContainerError> {
+        let ConfigStatus::VariableParsed(_) = &self.config else {
+            log::error!(
+                "Variables have not been parsed; cannot proceed with running the dev container"
+            );
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let running_container = match build_resources {
+            DevContainerBuildResources::DockerCompose(resources) => {
+                self.run_docker_compose(resources).await?
+            }
+            DevContainerBuildResources::Docker(resources) => {
+                self.run_docker_image(resources).await?
+            }
+        };
+
+        let remote_user = get_remote_user_from_config(&running_container, self)?;
+        let remote_workspace_folder = get_remote_dir_from_config(
+            &running_container,
+            (&self.local_project_directory.display()).to_string(),
+        )?;
+
+        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
+
+        Ok(DevContainerUp {
+            container_id: running_container.id,
+            remote_user,
+            remote_workspace_folder,
+            extension_ids: self.extension_ids(),
+            remote_env,
+        })
+    }
+
+    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let docker_compose_full_paths = docker_compose_files
+            .iter()
+            .map(|relative| self.config_directory.join(relative))
+            .collect::<Vec<PathBuf>>();
+
+        let Some(config) = self
+            .docker_client
+            .get_docker_compose_config(&docker_compose_full_paths)
+            .await?
+        else {
+            log::error!("Output could not deserialize into DockerComposeConfig");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        Ok(DockerComposeResources {
+            files: docker_compose_full_paths,
+            config,
+        })
+    }
+
+    async fn build_and_extend_compose_files(
+        &self,
+    ) -> Result<DockerComposeResources, DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+
+        let Some(features_build_info) = &self.features_build_info else {
+            log::error!(
+                "Cannot build and extend compose files: features build info is not yet constructed"
+            );
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let mut docker_compose_resources = self.docker_compose_manifest().await?;
+        let supports_buildkit = self.docker_client.supports_compose_buildkit();
+
+        let (main_service_name, main_service) =
+            find_primary_service(&docker_compose_resources, self)?;
+        let built_service_image = if main_service
+            .build
+            .as_ref()
+            .map(|b| b.dockerfile.as_ref())
+            .is_some()
+        {
+            if !supports_buildkit {
+                self.build_feature_content_image().await?;
+            }
+
+            let dockerfile_path = &features_build_info.dockerfile_path;
+
+            let build_args = if !supports_buildkit {
+                HashMap::from([
+                    (
+                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
+                        "dev_container_auto_added_stage_label".to_string(),
+                    ),
+                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
+                ])
+            } else {
+                HashMap::from([
+                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
+                    (
+                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
+                        "dev_container_auto_added_stage_label".to_string(),
+                    ),
+                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
+                ])
+            };
+
+            let additional_contexts = if !supports_buildkit {
+                None
+            } else {
+                Some(HashMap::from([(
+                    "dev_containers_feature_content_source".to_string(),
+                    features_build_info
+                        .features_content_dir
+                        .display()
+                        .to_string(),
+                )]))
+            };
+
+            let build_override = DockerComposeConfig {
+                name: None,
+                services: HashMap::from([(
+                    main_service_name.clone(),
+                    DockerComposeService {
+                        image: Some(features_build_info.image_tag.clone()),
+                        entrypoint: None,
+                        cap_add: None,
+                        security_opt: None,
+                        labels: None,
+                        build: Some(DockerComposeServiceBuild {
+                            context: Some(
+                                features_build_info.empty_context_dir.display().to_string(),
+                            ),
+                            dockerfile: Some(dockerfile_path.display().to_string()),
+                            args: Some(build_args),
+                            additional_contexts,
+                        }),
+                        volumes: Vec::new(),
+                        ..Default::default()
+                    },
+                )]),
+                volumes: HashMap::new(),
+            };
+
+            let temp_base = std::env::temp_dir().join("devcontainer-zed");
+            let config_location = temp_base.join("docker_compose_build.json");
+
+            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
+                log::error!("Error serializing docker compose runtime override: {e}");
+                DevContainerError::DevContainerParseFailed
+            })?;
+
+            self.fs
+                .write(&config_location, config_json.as_bytes())
+                .await
+                .map_err(|e| {
+                    log::error!("Error writing the runtime override file: {e}");
+                    DevContainerError::FilesystemError
+                })?;
+
+            docker_compose_resources.files.push(config_location);
+
+            self.docker_client
+                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
+                .await?;
+            self.docker_client
+                .inspect(&features_build_info.image_tag)
+                .await?
+        } else if let Some(image) = &main_service.image {
+            if dev_container
+                .features
+                .as_ref()
+                .is_none_or(|features| features.is_empty())
+            {
+                self.docker_client.inspect(image).await?
+            } else {
+                if !supports_buildkit {
+                    self.build_feature_content_image().await?;
+                }
+
+                let dockerfile_path = &features_build_info.dockerfile_path;
+
+                let build_args = if !supports_buildkit {
+                    HashMap::from([
+                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
+                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
+                    ])
+                } else {
+                    HashMap::from([
+                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
+                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
+                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
+                    ])
+                };
+
+                let additional_contexts = if !supports_buildkit {
+                    None
+                } else {
+                    Some(HashMap::from([(
+                        "dev_containers_feature_content_source".to_string(),
+                        features_build_info
+                            .features_content_dir
+                            .display()
+                            .to_string(),
+                    )]))
+                };
+
+                let build_override = DockerComposeConfig {
+                    name: None,
+                    services: HashMap::from([(
+                        main_service_name.clone(),
+                        DockerComposeService {
+                            image: Some(features_build_info.image_tag.clone()),
+                            entrypoint: None,
+                            cap_add: None,
+                            security_opt: None,
+                            labels: None,
+                            build: Some(DockerComposeServiceBuild {
+                                context: Some(
+                                    features_build_info.empty_context_dir.display().to_string(),
+                                ),
+                                dockerfile: Some(dockerfile_path.display().to_string()),
+                                args: Some(build_args),
+                                additional_contexts,
+                            }),
+                            volumes: Vec::new(),
+                            ..Default::default()
+                        },
+                    )]),
+                    volumes: HashMap::new(),
+                };
+
+                let temp_base = std::env::temp_dir().join("devcontainer-zed");
+                let config_location = temp_base.join("docker_compose_build.json");
+
+                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
+                    log::error!("Error serializing docker compose runtime override: {e}");
+                    DevContainerError::DevContainerParseFailed
+                })?;
+
+                self.fs
+                    .write(&config_location, config_json.as_bytes())
+                    .await
+                    .map_err(|e| {
+                        log::error!("Error writing the runtime override file: {e}");
+                        DevContainerError::FilesystemError
+                    })?;
+
+                docker_compose_resources.files.push(config_location);
+
+                self.docker_client
+                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
+                    .await?;
+
+                self.docker_client
+                    .inspect(&features_build_info.image_tag)
+                    .await?
+            }
+        } else {
+            log::error!("Docker compose must have either image or dockerfile defined");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+
+        let built_service_image = self
+            .update_remote_user_uid(built_service_image, Some(&features_build_info.image_tag))
+            .await?;
+
+        let resources = self.build_merged_resources(built_service_image)?;
+
+        let network_mode = main_service.network_mode.as_ref();
+        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
+        let runtime_override_file = self
+            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
+            .await?;
+
+        docker_compose_resources.files.push(runtime_override_file);
+
+        Ok(docker_compose_resources)
+    }
+
+    async fn write_runtime_override_file(
+        &self,
+        main_service_name: &str,
+        network_mode_service: Option<&str>,
+        resources: DockerBuildResources,
+    ) -> Result<PathBuf, DevContainerError> {
+        let config =
+            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
+        let temp_base = std::env::temp_dir().join("devcontainer-zed");
+        let config_location = temp_base.join("docker_compose_runtime.json");
+
+        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
+            log::error!("Error serializing docker compose runtime override: {e}");
+            DevContainerError::DevContainerParseFailed
+        })?;
+
+        self.fs
+            .write(&config_location, config_json.as_bytes())
+            .await
+            .map_err(|e| {
+                log::error!("Error writing the runtime override file: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        Ok(config_location)
+    }
+
+    fn build_runtime_override(
+        &self,
+        main_service_name: &str,
+        network_mode_service: Option<&str>,
+        resources: DockerBuildResources,
+    ) -> Result<DockerComposeConfig, DevContainerError> {
+        let mut runtime_labels = vec![];
+
+        if let Some(metadata) = &resources.image.config.labels.metadata {
+            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
+                log::error!("Error serializing docker image metadata: {e}");
+                DevContainerError::ContainerNotValid(resources.image.id.clone())
+            })?;
+
+            runtime_labels.push(format!(
+                "{}={}",
+                "devcontainer.metadata", serialized_metadata
+            ));
+        }
+
+        for (k, v) in self.identifying_labels() {
+            runtime_labels.push(format!("{}={}", k, v));
+        }
+
+        let config_volumes: HashMap<String, DockerComposeVolume> = resources
+            .additional_mounts
+            .iter()
+            .filter_map(|mount| {
+                if let Some(mount_type) = &mount.mount_type
+                    && mount_type.to_lowercase() == "volume"
+                {
+                    Some((
+                        mount.source.clone(),
+                        DockerComposeVolume {
+                            name: mount.source.clone(),
+                        },
+                    ))
+                } else {
+                    None
+                }
+            })
+            .collect();
+
+        let volumes: Vec<MountDefinition> = resources
+            .additional_mounts
+            .iter()
+            .map(|v| MountDefinition {
+                source: v.source.clone(),
+                target: v.target.clone(),
+                mount_type: v.mount_type.clone(),
+            })
+            .collect();
+
+        let mut main_service = DockerComposeService {
+            entrypoint: Some(vec![
+                "/bin/sh".to_string(),
+                "-c".to_string(),
+                resources.entrypoint_script,
+                "-".to_string(),
+            ]),
+            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
+            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
+            labels: Some(runtime_labels),
+            volumes,
+            privileged: Some(resources.privileged),
+            ..Default::default()
+        };
+        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
+        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
+        if let Some(forward_ports) = &self.dev_container().forward_ports {
+            let main_service_ports: Vec<String> = forward_ports
+                .iter()
+                .filter_map(|f| match f {
+                    ForwardPort::Number(port) => Some(port.to_string()),
+                    ForwardPort::String(port) => {
+                        let parts: Vec<&str> = port.split(":").collect();
+                        if parts.len() <= 1 {
+                            Some(port.to_string())
+                        } else if parts.len() == 2 {
+                            if parts[0] == main_service_name {
+                                Some(parts[1].to_string())
+                            } else {
+                                None
+                            }
+                        } else {
+                            None
+                        }
+                    }
+                })
+                .collect();
+            for port in main_service_ports {
+                // If the main service uses a different service's network bridge, append to that service's ports instead
+                if let Some(network_service_name) = network_mode_service {
+                    if let Some(service) = service_declarations.get_mut(network_service_name) {
+                        service.ports.push(format!("{port}:{port}"));
+                    } else {
+                        service_declarations.insert(
+                            network_service_name.to_string(),
+                            DockerComposeService {
+                                ports: vec![format!("{port}:{port}")],
+                                ..Default::default()
+                            },
+                        );
+                    }
+                } else {
+                    main_service.ports.push(format!("{port}:{port}"));
+                }
+            }
+            let other_service_ports: Vec<(&str, &str)> = forward_ports
+                .iter()
+                .filter_map(|f| match f {
+                    ForwardPort::Number(_) => None,
+                    ForwardPort::String(port) => {
+                        let parts: Vec<&str> = port.split(":").collect();
+                        if parts.len() != 2 {
+                            None
+                        } else {
+                            if parts[0] == main_service_name {
+                                None
+                            } else {
+                                Some((parts[0], parts[1]))
+                            }
+                        }
+                    }
+                })
+                .collect();
+            for (service_name, port) in other_service_ports {
+                if let Some(service) = service_declarations.get_mut(service_name) {
+                    service.ports.push(format!("{port}:{port}"));
+                } else {
+                    service_declarations.insert(
+                        service_name.to_string(),
+                        DockerComposeService {
+                            ports: vec![format!("{port}:{port}")],
+                            ..Default::default()
+                        },
+                    );
+                }
+            }
+        }
+        if let Some(port) = &self.dev_container().app_port {
+            if let Some(network_service_name) = network_mode_service {
+                if let Some(service) = service_declarations.get_mut(network_service_name) {
+                    service.ports.push(format!("{port}:{port}"));
+                } else {
+                    service_declarations.insert(
+                        network_service_name.to_string(),
+                        DockerComposeService {
+                            ports: vec![format!("{port}:{port}")],
+                            ..Default::default()
+                        },
+                    );
+                }
+            } else {
+                main_service.ports.push(format!("{port}:{port}"));
+            }
+        }
+
+        service_declarations.insert(main_service_name.to_string(), main_service);
+        let new_docker_compose_config = DockerComposeConfig {
+            name: None,
+            services: service_declarations,
+            volumes: config_volumes,
+        };
+
+        Ok(new_docker_compose_config)
+    }
+
+    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+
+        match dev_container.build_type() {
+            DevContainerBuildType::Image => {
+                let Some(image_tag) = &dev_container.image else {
+                    return Err(DevContainerError::DevContainerParseFailed);
+                };
+                let base_image = self.docker_client.inspect(image_tag).await?;
+                if dev_container
+                    .features
+                    .as_ref()
+                    .is_none_or(|features| features.is_empty())
+                {
+                    log::debug!("No features to add. Using base image");
+                    return Ok(base_image);
+                }
+            }
+            DevContainerBuildType::Dockerfile => {}
+            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+        };
+
+        let mut command = self.create_docker_build()?;
+
+        let output = self
+            .command_runner
+            .run_command(&mut command)
+            .await
+            .map_err(|e| {
+                log::error!("Error building docker image: {e}");
+                DevContainerError::CommandFailed(command.get_program().display().to_string())
+            })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("docker buildx build failed: {stderr}");
+            return Err(DevContainerError::CommandFailed(
+                command.get_program().display().to_string(),
+            ));
+        }
+
+        // After a successful build, inspect the newly tagged image to get its metadata
+        let Some(features_build_info) = &self.features_build_info else {
+            log::error!("Features build info expected, but not created");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let image = self
+            .docker_client
+            .inspect(&features_build_info.image_tag)
+            .await?;
+
+        Ok(image)
+    }
+
+    #[cfg(target_os = "windows")]
+    async fn update_remote_user_uid(
+        &self,
+        image: DockerInspect,
+        _override_tag: Option<&str>,
+    ) -> Result<DockerInspect, DevContainerError> {
+        Ok(image)
+    }
+    #[cfg(not(target_os = "windows"))]
+    async fn update_remote_user_uid(
+        &self,
+        image: DockerInspect,
+        override_tag: Option<&str>,
+    ) -> Result<DockerInspect, DevContainerError> {
+        let dev_container = self.dev_container();
+
+        let Some(features_build_info) = &self.features_build_info else {
+            return Ok(image);
+        };
+
+        // updateRemoteUserUID defaults to true per the devcontainers spec
+        if dev_container.update_remote_user_uid == Some(false) {
+            return Ok(image);
+        }
+
+        let remote_user = get_remote_user_from_config(&image, self)?;
+        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
+            return Ok(image);
+        }
+
+        let image_user = image
+            .config
+            .image_user
+            .as_deref()
+            .unwrap_or("root")
+            .to_string();
+
+        let host_uid = Command::new("id")
+            .arg("-u")
+            .output()
+            .await
+            .map_err(|e| {
+                log::error!("Failed to get host UID: {e}");
+                DevContainerError::CommandFailed("id -u".to_string())
+            })
+            .and_then(|output| {
+                String::from_utf8_lossy(&output.stdout)
+                    .trim()
+                    .parse::<u32>()
+                    .map_err(|e| {
+                        log::error!("Failed to parse host UID: {e}");
+                        DevContainerError::CommandFailed("id -u".to_string())
+                    })
+            })?;
+
+        let host_gid = Command::new("id")
+            .arg("-g")
+            .output()
+            .await
+            .map_err(|e| {
+                log::error!("Failed to get host GID: {e}");
+                DevContainerError::CommandFailed("id -g".to_string())
+            })
+            .and_then(|output| {
+                String::from_utf8_lossy(&output.stdout)
+                    .trim()
+                    .parse::<u32>()
+                    .map_err(|e| {
+                        log::error!("Failed to parse host GID: {e}");
+                        DevContainerError::CommandFailed("id -g".to_string())
+                    })
+            })?;
+
+        let dockerfile_content = self.generate_update_uid_dockerfile();
+
+        let dockerfile_path = features_build_info
+            .features_content_dir
+            .join("updateUID.Dockerfile");
+        self.fs
+            .write(&dockerfile_path, dockerfile_content.as_bytes())
+            .await
+            .map_err(|e| {
+                log::error!("Failed to write updateUID Dockerfile: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        let updated_image_tag = override_tag
+            .map(|t| t.to_string())
+            .unwrap_or_else(|| format!("{}-uid", features_build_info.image_tag));
+
+        let mut command = Command::new(self.docker_client.docker_cli());
+        command.args(["build"]);
+        command.args(["-f", &dockerfile_path.display().to_string()]);
+        command.args(["-t", &updated_image_tag]);
+        command.args([
+            "--build-arg",
+            &format!("BASE_IMAGE={}", features_build_info.image_tag),
+        ]);
+        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
+        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
+        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
+        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
+        command.arg(features_build_info.empty_context_dir.display().to_string());
+
+        let output = self
+            .command_runner
+            .run_command(&mut command)
+            .await
+            .map_err(|e| {
+                log::error!("Error building UID update image: {e}");
+                DevContainerError::CommandFailed(command.get_program().display().to_string())
+            })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("UID update build failed: {stderr}");
+            return Err(DevContainerError::CommandFailed(
+                command.get_program().display().to_string(),
+            ));
+        }
+
+        self.docker_client.inspect(&updated_image_tag).await
+    }
+
+    #[cfg(not(target_os = "windows"))]
+    fn generate_update_uid_dockerfile(&self) -> String {
+        let mut dockerfile = r#"ARG BASE_IMAGE
+FROM $BASE_IMAGE
+
+USER root
+
+ARG REMOTE_USER
+ARG NEW_UID
+ARG NEW_GID
+SHELL ["/bin/sh", "-c"]
+RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
+	if [ -z "$OLD_UID" ]; then \
+		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
+	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
+		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
+	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
+		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
+	else \
+		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
+			FREE_GID=65532; \
+			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
+			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
+			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
+		fi; \
+		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
+		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
+		if [ "$OLD_GID" != "$NEW_GID" ]; then \
+			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
+		fi; \
+		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
+	fi;
+
+ARG IMAGE_USER
+USER $IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+"#.to_string();
+        for feature in &self.features {
+            let container_env_layer = feature.generate_dockerfile_env();
+            dockerfile = format!("{dockerfile}\n{container_env_layer}");
+        }
+
+        if let Some(env) = &self.dev_container().container_env {
+            for (key, value) in env {
+                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
+            }
+        }
+        dockerfile
+    }
+
+    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
+        let Some(features_build_info) = &self.features_build_info else {
+            log::error!("Features build info not available for building feature content image");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let features_content_dir = &features_build_info.features_content_dir;
+
+        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
+        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
+
+        self.fs
+            .write(&dockerfile_path, dockerfile_content.as_bytes())
+            .await
+            .map_err(|e| {
+                log::error!("Failed to write feature content Dockerfile: {e}");
+                DevContainerError::FilesystemError
+            })?;
+
+        let mut command = Command::new(self.docker_client.docker_cli());
+        command.args([
+            "build",
+            "-t",
+            "dev_container_feature_content_temp",
+            "-f",
+            &dockerfile_path.display().to_string(),
+            &features_content_dir.display().to_string(),
+        ]);
+
+        let output = self
+            .command_runner
+            .run_command(&mut command)
+            .await
+            .map_err(|e| {
+                log::error!("Error building feature content image: {e}");
+                DevContainerError::CommandFailed(self.docker_client.docker_cli())
+            })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("Feature content image build failed: {stderr}");
+            return Err(DevContainerError::CommandFailed(
+                self.docker_client.docker_cli(),
+            ));
+        }
+
+        Ok(())
+    }
+
+    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
+        let dev_container = match &self.config {
+            ConfigStatus::Deserialized(_) => {
+                log::error!(
+                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
+                );
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            ConfigStatus::VariableParsed(dev_container) => dev_container,
+        };
+
+        let Some(features_build_info) = &self.features_build_info else {
+            log::error!(
+                "Cannot create docker build command; features build info has not been constructed"
+            );
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        let mut command = Command::new(self.docker_client.docker_cli());
+
+        command.args(["buildx", "build"]);
+
+        // --load is short for --output=docker, loading the built image into the local docker images
+        command.arg("--load");
+
+        // BuildKit build context: provides the features content directory as a named context
+        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
+        command.args([
+            "--build-context",
+            &format!(
+                "dev_containers_feature_content_source={}",
+                features_build_info.features_content_dir.display()
+            ),
+        ]);
+
+        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
+        if let Some(build_image) = &features_build_info.build_image {
+            command.args([
+                "--build-arg",
+                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
+            ]);
+        } else {
+            command.args([
+                "--build-arg",
+                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
+            ]);
+        }
+
+        command.args([
+            "--build-arg",
+            &format!(
+                "_DEV_CONTAINERS_IMAGE_USER={}",
+                self.root_image
+                    .as_ref()
+                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
+                    .unwrap_or(&"root".to_string())
+            ),
+        ]);
+
+        command.args([
+            "--build-arg",
+            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
+        ]);
+
+        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
+            for (key, value) in args {
+                command.args(["--build-arg", &format!("{}={}", key, value)]);
+            }
+        }
+
+        command.args(["--target", "dev_containers_target_stage"]);
+
+        command.args([
+            "-f",
+            &features_build_info.dockerfile_path.display().to_string(),
+        ]);
+
+        command.args(["-t", &features_build_info.image_tag]);
+
+        if dev_container.build_type() == DevContainerBuildType::Dockerfile {
+            command.arg(self.config_directory.display().to_string());
+        } else {
+            // Use an empty folder as the build context to avoid pulling in unneeded files.
+            // The actual feature content is supplied via the BuildKit build context above.
+            command.arg(features_build_info.empty_context_dir.display().to_string());
+        }
+
+        Ok(command)
+    }
+
+    async fn run_docker_compose(
+        &self,
+        resources: DockerComposeResources,
+    ) -> Result<DockerInspect, DevContainerError> {
+        let mut command = Command::new(self.docker_client.docker_cli());
+        command.args(&["compose", "--project-name", &self.project_name()]);
+        for docker_compose_file in resources.files {
+            command.args(&["-f", &docker_compose_file.display().to_string()]);
+        }
+        command.args(&["up", "-d"]);
+
+        let output = self
+            .command_runner
+            .run_command(&mut command)
+            .await
+            .map_err(|e| {
+                log::error!("Error running docker compose up: {e}");
+                DevContainerError::CommandFailed(command.get_program().display().to_string())
+            })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("Non-success status from docker compose up: {}", stderr);
+            return Err(DevContainerError::CommandFailed(
+                command.get_program().display().to_string(),
+            ));
+        }
+
+        if let Some(docker_ps) = self.check_for_existing_container().await? {
+            log::debug!("Found newly created dev container");
+            return self.docker_client.inspect(&docker_ps.id).await;
+        }
+
+        log::error!("Could not find existing container after docker compose up");
+
+        Err(DevContainerError::DevContainerParseFailed)
+    }
+
+    async fn run_docker_image(
+        &self,
+        build_resources: DockerBuildResources,
+    ) -> Result<DockerInspect, DevContainerError> {
+        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
+
+        let output = self
+            .command_runner
+            .run_command(&mut docker_run_command)
+            .await
+            .map_err(|e| {
+                log::error!("Error running docker run: {e}");
+                DevContainerError::CommandFailed(
+                    docker_run_command.get_program().display().to_string(),
+                )
+            })?;
+
+        if !output.status.success() {
+            let std_err = String::from_utf8_lossy(&output.stderr);
+            log::error!("Non-success status from docker run. StdErr: {std_err}");
+            return Err(DevContainerError::CommandFailed(
+                docker_run_command.get_program().display().to_string(),
+            ));
+        }
+
+        log::debug!("Checking for container that was started");
+        let Some(docker_ps) = self.check_for_existing_container().await? else {
+            log::error!("Could not locate container just created");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+        self.docker_client.inspect(&docker_ps.id).await
+    }
+
+    fn local_workspace_folder(&self) -> String {
+        self.local_project_directory.display().to_string()
+    }
+    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
+        self.local_project_directory
+            .file_name()
+            .map(|f| f.display().to_string())
+            .ok_or(DevContainerError::DevContainerParseFailed)
+    }
+
+    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
+        self.dev_container()
+            .workspace_folder
+            .as_ref()
+            .map(|folder| PathBuf::from(folder))
+            .or(Some(
+                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
+            ))
+            .ok_or(DevContainerError::DevContainerParseFailed)
+    }
+    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
+        self.remote_workspace_folder().and_then(|f| {
+            f.file_name()
+                .map(|file_name| file_name.display().to_string())
+                .ok_or(DevContainerError::DevContainerParseFailed)
+        })
+    }
+
+    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
+        if let Some(mount) = &self.dev_container().workspace_mount {
+            return Ok(mount.clone());
+        }
+        let Some(project_directory_name) = self.local_project_directory.file_name() else {
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+
+        Ok(MountDefinition {
+            source: self.local_workspace_folder(),
+            target: format!("/workspaces/{}", project_directory_name.display()),
+            mount_type: None,
+        })
+    }
+
+    fn create_docker_run_command(
+        &self,
+        build_resources: DockerBuildResources,
+    ) -> Result<Command, DevContainerError> {
+        let remote_workspace_mount = self.remote_workspace_mount()?;
+
+        let docker_cli = self.docker_client.docker_cli();
+        let mut command = Command::new(&docker_cli);
+
+        command.arg("run");
+
+        if build_resources.privileged {
+            command.arg("--privileged");
+        }
+
+        if &docker_cli == "podman" {
+            command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
+        }
+
+        command.arg("--sig-proxy=false");
+        command.arg("-d");
+        command.arg("--mount");
+        command.arg(remote_workspace_mount.to_string());
+
+        for mount in &build_resources.additional_mounts {
+            command.arg("--mount");
+            command.arg(mount.to_string());
+        }
+
+        for (key, val) in self.identifying_labels() {
+            command.arg("-l");
+            command.arg(format!("{}={}", key, val));
+        }
+
+        if let Some(metadata) = &build_resources.image.config.labels.metadata {
+            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
+                log::error!("Problem serializing image metadata: {e}");
+                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
+            })?;
+            command.arg("-l");
+            command.arg(format!(
+                "{}={}",
+                "devcontainer.metadata", serialized_metadata
+            ));
+        }
+
+        if let Some(forward_ports) = &self.dev_container().forward_ports {
+            for port in forward_ports {
+                if let ForwardPort::Number(port_number) = port {
+                    command.arg("-p");
+                    command.arg(format!("{port_number}:{port_number}"));
+                }
+            }
+        }
+        if let Some(app_port) = &self.dev_container().app_port {
+            command.arg("-p");
+            command.arg(format!("{app_port}:{app_port}"));
+        }
+
+        command.arg("--entrypoint");
+        command.arg("/bin/sh");
+        command.arg(&build_resources.image.id);
+        command.arg("-c");
+
+        command.arg(build_resources.entrypoint_script);
+        command.arg("-");
+
+        Ok(command)
+    }
+
+    fn extension_ids(&self) -> Vec<String> {
+        self.dev_container()
+            .customizations
+            .as_ref()
+            .map(|c| c.zed.extensions.clone())
+            .unwrap_or_default()
+    }
+
+    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
+        self.run_initialize_commands().await?;
+
+        self.download_feature_and_dockerfile_resources().await?;
+
+        let build_resources = self.build_resources().await?;
+
+        let devcontainer_up = self.run_dev_container(build_resources).await?;
+
+        self.run_remote_scripts(&devcontainer_up, true).await?;
+
+        Ok(devcontainer_up)
+    }
+
+    async fn run_remote_scripts(
+        &self,
+        devcontainer_up: &DevContainerUp,
+        new_container: bool,
+    ) -> Result<(), DevContainerError> {
+        let ConfigStatus::VariableParsed(config) = &self.config else {
+            log::error!("Config not yet parsed, cannot proceed with remote scripts");
+            return Err(DevContainerError::DevContainerScriptsFailed);
+        };
+        let remote_folder = self.remote_workspace_folder()?.display().to_string();
+
+        if new_container {
+            if let Some(on_create_command) = &config.on_create_command {
+                for (command_name, command) in on_create_command.script_commands() {
+                    log::debug!("Running on create command {command_name}");
+                    self.docker_client
+                        .run_docker_exec(
+                            &devcontainer_up.container_id,
+                            &remote_folder,
+                            "root",
+                            &devcontainer_up.remote_env,
+                            command,
+                        )
+                        .await?;
+                }
+            }
+            if let Some(update_content_command) = &config.update_content_command {
+                for (command_name, command) in update_content_command.script_commands() {
+                    log::debug!("Running update content command {command_name}");
+                    self.docker_client
+                        .run_docker_exec(
+                            &devcontainer_up.container_id,
+                            &remote_folder,
+                            "root",
+                            &devcontainer_up.remote_env,
+                            command,
+                        )
+                        .await?;
+                }
+            }
+
+            if let Some(post_create_command) = &config.post_create_command {
+                for (command_name, command) in post_create_command.script_commands() {
+                    log::debug!("Running post create command {command_name}");
+                    self.docker_client
+                        .run_docker_exec(
+                            &devcontainer_up.container_id,
+                            &remote_folder,
+                            &devcontainer_up.remote_user,
+                            &devcontainer_up.remote_env,
+                            command,
+                        )
+                        .await?;
+                }
+            }
+            if let Some(post_start_command) = &config.post_start_command {
+                for (command_name, command) in post_start_command.script_commands() {
+                    log::debug!("Running post start command {command_name}");
+                    self.docker_client
+                        .run_docker_exec(
+                            &devcontainer_up.container_id,
+                            &remote_folder,
+                            &devcontainer_up.remote_user,
+                            &devcontainer_up.remote_env,
+                            command,
+                        )
+                        .await?;
+                }
+            }
+        }
+        if let Some(post_attach_command) = &config.post_attach_command {
+            for (command_name, command) in post_attach_command.script_commands() {
+                log::debug!("Running post attach command {command_name}");
+                self.docker_client
+                    .run_docker_exec(
+                        &devcontainer_up.container_id,
+                        &remote_folder,
+                        &devcontainer_up.remote_user,
+                        &devcontainer_up.remote_env,
+                        command,
+                    )
+                    .await?;
+            }
+        }
+
+        Ok(())
+    }
+
+    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
+        let ConfigStatus::VariableParsed(config) = &self.config else {
+            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
+            return Err(DevContainerError::DevContainerParseFailed);
+        };
+
+        if let Some(initialize_command) = &config.initialize_command {
+            log::debug!("Running initialize command");
+            initialize_command
+                .run(&self.command_runner, &self.local_project_directory)
+                .await
+        } else {
+            log::warn!("No initialize command found");
+            Ok(())
+        }
+    }
+
+    async fn check_for_existing_devcontainer(
+        &self,
+    ) -> Result<Option<DevContainerUp>, DevContainerError> {
+        if let Some(docker_ps) = self.check_for_existing_container().await? {
+            log::debug!("Dev container already found. Proceeding with it");
+
+            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
+
+            if !docker_inspect.is_running() {
+                log::debug!("Container not running. Will attempt to start, and then proceed");
+                self.docker_client.start_container(&docker_ps.id).await?;
+            }
+
+            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
+
+            let remote_folder = get_remote_dir_from_config(
+                &docker_inspect,
+                (&self.local_project_directory.display()).to_string(),
+            )?;
+
+            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
+
+            let dev_container_up = DevContainerUp {
+                container_id: docker_ps.id,
+                remote_user: remote_user,
+                remote_workspace_folder: remote_folder,
+                extension_ids: self.extension_ids(),
+                remote_env,
+            };
+
+            self.run_remote_scripts(&dev_container_up, false).await?;
+
+            Ok(Some(dev_container_up))
+        } else {
+            log::debug!("Existing container not found.");
+
+            Ok(None)
+        }
+    }
+
+    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
+        self.docker_client
+            .find_process_by_filters(
+                self.identifying_labels()
+                    .iter()
+                    .map(|(k, v)| format!("label={k}={v}"))
+                    .collect(),
+            )
+            .await
+    }
+
+    fn project_name(&self) -> String {
+        if let Some(name) = &self.dev_container().name {
+            safe_id_lower(name)
+        } else {
+            let alternate_name = &self
+                .local_workspace_base_name()
+                .unwrap_or(self.local_workspace_folder());
+            safe_id_lower(alternate_name)
+        }
+    }
+}
+
+/// Holds all the information needed to construct a `docker buildx build` command
+/// that extends a base image with dev container features.
+///
+/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
+/// (cli/src/spec-node/containerFeatures.ts).
+#[derive(Debug, Eq, PartialEq)]
+pub(crate) struct FeaturesBuildInfo {
+    /// Path to the generated Dockerfile.extended
+    pub dockerfile_path: PathBuf,
+    /// Path to the features content directory (used as a BuildKit build context)
+    pub features_content_dir: PathBuf,
+    /// Path to an empty directory used as the Docker build context
+    pub empty_context_dir: PathBuf,
+    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
+    pub build_image: Option<String>,
+    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
+    pub image_tag: String,
+}
+
+pub(crate) async fn read_devcontainer_configuration(
+    config: DevContainerConfig,
+    context: &DevContainerContext,
+    environment: HashMap<String, String>,
+) -> Result<DevContainer, DevContainerError> {
+    let docker = if context.use_podman {
+        Docker::new("podman")
+    } else {
+        Docker::new("docker")
+    };
+    let mut dev_container = DevContainerManifest::new(
+        context,
+        environment,
+        Arc::new(docker),
+        Arc::new(DefaultCommandRunner::new()),
+        config,
+        &context.project_directory.as_ref(),
+    )
+    .await?;
+    dev_container.parse_nonremote_vars()?;
+    Ok(dev_container.dev_container().clone())
+}
+
+pub(crate) async fn spawn_dev_container(
+    context: &DevContainerContext,
+    environment: HashMap<String, String>,
+    config: DevContainerConfig,
+    local_project_path: &Path,
+) -> Result<DevContainerUp, DevContainerError> {
+    let docker = if context.use_podman {
+        Docker::new("podman")
+    } else {
+        Docker::new("docker")
+    };
+    let mut devcontainer_manifest = DevContainerManifest::new(
+        context,
+        environment,
+        Arc::new(docker),
+        Arc::new(DefaultCommandRunner::new()),
+        config,
+        local_project_path,
+    )
+    .await?;
+
+    devcontainer_manifest.parse_nonremote_vars()?;
+
+    log::debug!("Checking for existing container");
+    if let Some(devcontainer) = devcontainer_manifest
+        .check_for_existing_devcontainer()
+        .await?
+    {
+        Ok(devcontainer)
+    } else {
+        log::debug!("Existing container not found. Building");
+
+        devcontainer_manifest.build_and_run().await
+    }
+}
+
+#[derive(Debug)]
+struct DockerBuildResources {
+    image: DockerInspect,
+    additional_mounts: Vec<MountDefinition>,
+    privileged: bool,
+    entrypoint_script: String,
+}
+
+#[derive(Debug)]
+enum DevContainerBuildResources {
+    DockerCompose(DockerComposeResources),
+    Docker(DockerBuildResources),
+}
+
+fn find_primary_service(
+    docker_compose: &DockerComposeResources,
+    devcontainer: &DevContainerManifest,
+) -> Result<(String, DockerComposeService), DevContainerError> {
+    let Some(service_name) = &devcontainer.dev_container().service else {
+        return Err(DevContainerError::DevContainerParseFailed);
+    };
+
+    match docker_compose.config.services.get(service_name) {
+        Some(service) => Ok((service_name.clone(), service.clone())),
+        None => Err(DevContainerError::DevContainerParseFailed),
+    }
+}
+
+/// Destination folder inside the container where feature content is staged during build.
+/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
+const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
+
+/// Escapes regex special characters in a string.
+fn escape_regex_chars(input: &str) -> String {
+    let mut result = String::with_capacity(input.len() * 2);
+    for c in input.chars() {
+        if ".*+?^${}()|[]\\".contains(c) {
+            result.push('\\');
+        }
+        result.push(c);
+    }
+    result
+}
+
+/// Extracts the short feature ID from a full feature reference string.
+///
+/// Examples:
+/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
+/// - `ghcr.io/user/repo/go` → `go`
+/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
+/// - `./myFeature` → `myFeature`
+fn extract_feature_id(feature_ref: &str) -> &str {
+    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
+        &feature_ref[..at_idx]
+    } else {
+        let last_slash = feature_ref.rfind('/');
+        let last_colon = feature_ref.rfind(':');
+        match (last_slash, last_colon) {
+            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
+            _ => feature_ref,
+        }
+    };
+    match without_version.rfind('/') {
+        Some(idx) => &without_version[idx + 1..],
+        None => without_version,
+    }
+}
+
+/// Generates a shell command that looks up a user's passwd entry.
+///
+/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
+/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
+fn get_ent_passwd_shell_command(user: &str) -> String {
+    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
+    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
+    format!(
+        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
+        shell = escaped_for_shell,
+        re = escaped_for_regex,
+    )
+}
+
+/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
+///
+/// Features listed in the override come first (in the specified order), followed
+/// by any remaining features sorted lexicographically by their full reference ID.
+fn resolve_feature_order<'a>(
+    features: &'a HashMap<String, FeatureOptions>,
+    override_order: &Option<Vec<String>>,
+) -> Vec<(&'a String, &'a FeatureOptions)> {
+    if let Some(order) = override_order {
+        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
+        for ordered_id in order {
+            if let Some((key, options)) = features.get_key_value(ordered_id) {
+                ordered.push((key, options));
+            }
+        }
+        let mut remaining: Vec<_> = features
+            .iter()
+            .filter(|(id, _)| !order.iter().any(|o| o == *id))
+            .collect();
+        remaining.sort_by_key(|(id, _)| id.as_str());
+        ordered.extend(remaining);
+        ordered
+    } else {
+        let mut entries: Vec<_> = features.iter().collect();
+        entries.sort_by_key(|(id, _)| id.as_str());
+        entries
+    }
+}
+
+/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
+///
+/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
+/// `containerFeaturesConfiguration.ts`.
+fn generate_install_wrapper(
+    feature_ref: &str,
+    feature_id: &str,
+    env_variables: &str,
+) -> Result<String, DevContainerError> {
+    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
+        log::error!("Error escaping feature ref {feature_ref}: {e}");
+        DevContainerError::DevContainerParseFailed
+    })?;
+    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
+        log::error!("Error escaping feature {feature_id}: {e}");
+        DevContainerError::DevContainerParseFailed
+    })?;
+    let options_indented: String = env_variables
+        .lines()
+        .filter(|l| !l.is_empty())
+        .map(|l| format!("    {}", l))
+        .collect::<Vec<_>>()
+        .join("\n");
+    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
+        log::error!("Error escaping options {options_indented}: {e}");
+        DevContainerError::DevContainerParseFailed
+    })?;
+
+    let script = format!(
+        r#"#!/bin/sh
+set -e
+
+on_exit () {{
+    [ $? -eq 0 ] && exit
+    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
+}}
+
+trap on_exit EXIT
+
+echo ===========================================================================
+echo 'Feature       : {escaped_name}'
+echo 'Id            : {escaped_id}'
+echo 'Options       :'
+echo {escaped_options}
+echo ===========================================================================
+
+set -a
+. ../devcontainer-features.builtin.env
+. ./devcontainer-features.env
+set +a
+
+chmod +x ./install.sh
+./install.sh
+"#
+    );
+
+    Ok(script)
+}
+
+// Dockerfile actions need to be moved to their own file
+fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
+    dockerfile_content
+        .lines()
+        .find(|line| line.starts_with("FROM"))
+        .and_then(|line| {
+            let words: Vec<&str> = line.split(" ").collect();
+            if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
+                return Some(words[words.len() - 1].to_string());
+            } else {
+                return None;
+            }
+        })
+}
+
+fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
+    if dockerfile_alias(dockerfile_content).is_some() {
+        dockerfile_content.to_string()
+    } else {
+        dockerfile_content
+            .lines()
+            .map(|line| {
+                if line.starts_with("FROM") {
+                    format!("{} AS {}", line, alias)
+                } else {
+                    line.to_string()
+                }
+            })
+            .collect::<Vec<String>>()
+            .join("\n")
+    }
+}
+
+fn image_from_dockerfile(
+    devcontainer: &DevContainerManifest,
+    dockerfile_contents: String,
+) -> Result<String, DevContainerError> {
+    let mut raw_contents = dockerfile_contents
+        .lines()
+        .find(|line| line.starts_with("FROM"))
+        .and_then(|from_line| {
+            from_line
+                .split(' ')
+                .collect::<Vec<&str>>()
+                .get(1)
+                .map(|s| s.to_string())
+        })
+        .ok_or_else(|| {
+            log::error!("Could not find an image definition in dockerfile");
+            DevContainerError::DevContainerParseFailed
+        })?;
+
+    for (k, v) in devcontainer
+        .dev_container()
+        .build
+        .as_ref()
+        .and_then(|b| b.args.as_ref())
+        .unwrap_or(&HashMap::new())
+    {
+        raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
+    }
+    Ok(raw_contents)
+}
+
+// Container user things
+// This should come from spec - see the docs
+fn get_remote_user_from_config(
+    docker_config: &DockerInspect,
+    devcontainer: &DevContainerManifest,
+) -> Result<String, DevContainerError> {
+    if let DevContainer {
+        remote_user: Some(user),
+        ..
+    } = &devcontainer.dev_container()
+    {
+        return Ok(user.clone());
+    }
+    let Some(metadata) = &docker_config.config.labels.metadata else {
+        log::error!("Could not locate metadata");
+        return Err(DevContainerError::ContainerNotValid(
+            docker_config.id.clone(),
+        ));
+    };
+    for metadatum in metadata {
+        if let Some(remote_user) = metadatum.get("remoteUser") {
+            if let Some(remote_user_str) = remote_user.as_str() {
+                return Ok(remote_user_str.to_string());
+            }
+        }
+    }
+    log::error!("Could not locate the remote user");
+    Err(DevContainerError::ContainerNotValid(
+        docker_config.id.clone(),
+    ))
+}
+
+// This should come from spec - see the docs
+fn get_container_user_from_config(
+    docker_config: &DockerInspect,
+    devcontainer: &DevContainerManifest,
+) -> Result<String, DevContainerError> {
+    if let Some(user) = &devcontainer.dev_container().container_user {
+        return Ok(user.to_string());
+    }
+    if let Some(metadata) = &docker_config.config.labels.metadata {
+        for metadatum in metadata {
+            if let Some(container_user) = metadatum.get("containerUser") {
+                if let Some(container_user_str) = container_user.as_str() {
+                    return Ok(container_user_str.to_string());
+                }
+            }
+        }
+    }
+    if let Some(image_user) = &docker_config.config.image_user {
+        return Ok(image_user.to_string());
+    }
+
+    Err(DevContainerError::DevContainerParseFailed)
+}
+
+#[cfg(test)]
+mod test {
+    use std::{
+        collections::HashMap,
+        ffi::OsStr,
+        path::PathBuf,
+        process::{ExitStatus, Output},
+        sync::{Arc, Mutex},
+    };
+
+    use async_trait::async_trait;
+    use fs::{FakeFs, Fs};
+    use gpui::{AppContext, TestAppContext};
+    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
+    use project::{
+        ProjectEnvironment,
+        worktree_store::{WorktreeIdCounter, WorktreeStore},
+    };
+    use serde_json_lenient::Value;
+    use util::{command::Command, paths::SanitizedPath};
+
+    use crate::{
+        DevContainerConfig, DevContainerContext,
+        command_json::CommandRunner,
+        devcontainer_api::DevContainerError,
+        devcontainer_json::MountDefinition,
+        devcontainer_manifest::{
+            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
+            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
+        },
+        docker::{
+            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
+            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
+            DockerPs,
+        },
+        oci::TokenResponse,
+    };
+    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
+
+    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
+        let buffer = futures::io::Cursor::new(Vec::new());
+        let mut builder = async_tar::Builder::new(buffer);
+        for (file_name, content) in content {
+            if content.is_empty() {
+                let mut header = async_tar::Header::new_gnu();
+                header.set_size(0);
+                header.set_mode(0o755);
+                header.set_entry_type(async_tar::EntryType::Directory);
+                header.set_cksum();
+                builder
+                    .append_data(&mut header, file_name, &[] as &[u8])
+                    .await
+                    .unwrap();
+            } else {
+                let data = content.as_bytes();
+                let mut header = async_tar::Header::new_gnu();
+                header.set_size(data.len() as u64);
+                header.set_mode(0o755);
+                header.set_entry_type(async_tar::EntryType::Regular);
+                header.set_cksum();
+                builder
+                    .append_data(&mut header, file_name, data)
+                    .await
+                    .unwrap();
+            }
+        }
+        let buffer = builder.into_inner().await.unwrap();
+        buffer.into_inner()
+    }
+
+    fn test_project_filename() -> String {
+        PathBuf::from(TEST_PROJECT_PATH)
+            .file_name()
+            .expect("is valid")
+            .display()
+            .to_string()
+    }
+
+    async fn init_devcontainer_config(
+        fs: &Arc<FakeFs>,
+        devcontainer_contents: &str,
+    ) -> DevContainerConfig {
+        fs.insert_tree(
+            format!("{TEST_PROJECT_PATH}/.devcontainer"),
+            serde_json::json!({"devcontainer.json": devcontainer_contents}),
+        )
+        .await;
+
+        DevContainerConfig::default_config()
+    }
+
+    struct TestDependencies {
+        fs: Arc<FakeFs>,
+        _http_client: Arc<dyn HttpClient>,
+        docker: Arc<FakeDocker>,
+        command_runner: Arc<TestCommandRunner>,
+    }
+
+    async fn init_default_devcontainer_manifest(
+        cx: &mut TestAppContext,
+        devcontainer_contents: &str,
+    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
+        let fs = FakeFs::new(cx.executor());
+        let http_client = fake_http_client();
+        let command_runner = Arc::new(TestCommandRunner::new());
+        let docker = Arc::new(FakeDocker::new());
+        let environment = HashMap::new();
+
+        init_devcontainer_manifest(
+            cx,
+            fs,
+            http_client,
+            docker,
+            command_runner,
+            environment,
+            devcontainer_contents,
+        )
+        .await
+    }
+
+    async fn init_devcontainer_manifest(
+        cx: &mut TestAppContext,
+        fs: Arc<FakeFs>,
+        http_client: Arc<dyn HttpClient>,
+        docker_client: Arc<FakeDocker>,
+        command_runner: Arc<TestCommandRunner>,
+        environment: HashMap<String, String>,
+        devcontainer_contents: &str,
+    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
+        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
+        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
+        let worktree_store =
+            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
+        let project_environment =
+            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
+
+        let context = DevContainerContext {
+            project_directory: SanitizedPath::cast_arc(project_path),
+            use_podman: false,
+            fs: fs.clone(),
+            http_client: http_client.clone(),
+            environment: project_environment.downgrade(),
+        };
+
+        let test_dependencies = TestDependencies {
+            fs: fs.clone(),
+            _http_client: http_client.clone(),
+            docker: docker_client.clone(),
+            command_runner: command_runner.clone(),
+        };
+        let manifest = DevContainerManifest::new(
+            &context,
+            environment,
+            docker_client,
+            command_runner,
+            local_config,
+            &PathBuf::from(TEST_PROJECT_PATH),
+        )
+        .await?;
+
+        Ok((test_dependencies, manifest))
+    }
+
+    #[gpui::test]
+    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
+        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
+            cx,
+            r#"
+// These are some external comments. serde_lenient should handle them
+{
+    // These are some internal comments
+    "image": "image",
+    "remoteUser": "root",
+}
+            "#,
+        )
+        .await
+        .unwrap();
+
+        let mut metadata = HashMap::new();
+        metadata.insert(
+            "remoteUser".to_string(),
+            serde_json_lenient::Value::String("vsCode".to_string()),
+        );
+        let given_docker_config = DockerInspect {
+            id: "docker_id".to_string(),
+            config: DockerInspectConfig {
+                labels: DockerConfigLabels {
+                    metadata: Some(vec![metadata]),
+                },
+                image_user: None,
+                env: Vec::new(),
+            },
+            mounts: None,
+            state: None,
+        };
+
+        let remote_user =
+            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
+
+        assert_eq!(remote_user, "root".to_string())
+    }
+
+    #[gpui::test]
+    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
+        let (_, devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
+        let mut metadata = HashMap::new();
+        metadata.insert(
+            "remoteUser".to_string(),
+            serde_json_lenient::Value::String("vsCode".to_string()),
+        );
+        let given_docker_config = DockerInspect {
+            id: "docker_id".to_string(),
+            config: DockerInspectConfig {
+                labels: DockerConfigLabels {
+                    metadata: Some(vec![metadata]),
+                },
+                image_user: None,
+                env: Vec::new(),
+            },
+            mounts: None,
+            state: None,
+        };
+
+        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
+
+        assert!(remote_user.is_ok());
+        let remote_user = remote_user.expect("ok");
+        assert_eq!(&remote_user, "vsCode")
+    }
+
+    #[test]
+    fn should_extract_feature_id_from_references() {
+        assert_eq!(
+            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
+            "aws-cli"
+        );
+        assert_eq!(
+            extract_feature_id("ghcr.io/devcontainers/features/go"),
+            "go"
+        );
+        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
+        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
+        assert_eq!(
+            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
+            "rust"
+        );
+    }
+
+    #[gpui::test]
+    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
+        let mut metadata = HashMap::new();
+        metadata.insert(
+            "remoteUser".to_string(),
+            serde_json_lenient::Value::String("vsCode".to_string()),
+        );
+
+        let (_, devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
+        let build_resources = DockerBuildResources {
+            image: DockerInspect {
+                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
+                config: DockerInspectConfig {
+                    labels: DockerConfigLabels { metadata: None },
+                    image_user: None,
+                    env: Vec::new(),
+                },
+                mounts: None,
+                state: None,
+            },
+            additional_mounts: vec![],
+            privileged: false,
+            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
+        };
+        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
+
+        assert!(docker_run_command.is_ok());
+        let docker_run_command = docker_run_command.expect("ok");
+
+        assert_eq!(docker_run_command.get_program(), "docker");
+        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
+            .join(".devcontainer")
+            .join("devcontainer.json");
+        let expected_config_file_label = expected_config_file_label.display();
+        assert_eq!(
+            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
+            vec![
+                OsStr::new("run"),
+                OsStr::new("--sig-proxy=false"),
+                OsStr::new("-d"),
+                OsStr::new("--mount"),
+                OsStr::new(
+                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
+                ),
+                OsStr::new("-l"),
+                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
+                OsStr::new("-l"),
+                OsStr::new(&format!(
+                    "devcontainer.config_file={expected_config_file_label}"
+                )),
+                OsStr::new("--entrypoint"),
+                OsStr::new("/bin/sh"),
+                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
+                OsStr::new("-c"),
+                OsStr::new(
+                    "
+    echo Container started
+    trap \"exit 0\" 15
+    exec \"$@\"
+    while sleep 1 & wait $!; do :; done
+                        "
+                    .trim()
+                ),
+                OsStr::new("-"),
+            ]
+        )
+    }
+
+    #[gpui::test]
+    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
+        // State where service not defined in dev container
+        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
+        let given_docker_compose_config = DockerComposeResources {
+            config: DockerComposeConfig {
+                name: Some("devcontainers".to_string()),
+                services: HashMap::new(),
+                ..Default::default()
+            },
+            ..Default::default()
+        };
+
+        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
+
+        assert!(bad_result.is_err());
+
+        // State where service defined in devcontainer, not found in DockerCompose config
+        let (_, given_dev_container) =
+            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
+                .await
+                .unwrap();
+        let given_docker_compose_config = DockerComposeResources {
+            config: DockerComposeConfig {
+                name: Some("devcontainers".to_string()),
+                services: HashMap::new(),
+                ..Default::default()
+            },
+            ..Default::default()
+        };
+
+        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
+
+        assert!(bad_result.is_err());
+        // State where service defined in devcontainer and in DockerCompose config
+
+        let (_, given_dev_container) =
+            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
+                .await
+                .unwrap();
+        let given_docker_compose_config = DockerComposeResources {
+            config: DockerComposeConfig {
+                name: Some("devcontainers".to_string()),
+                services: HashMap::from([(
+                    "found_service".to_string(),
+                    DockerComposeService {
+                        ..Default::default()
+                    },
+                )]),
+                ..Default::default()
+            },
+            ..Default::default()
+        };
+
+        let (service_name, _) =
+            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
+
+        assert_eq!(service_name, "found_service".to_string());
+    }
+
+    #[gpui::test]
+    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
+        let fs = FakeFs::new(cx.executor());
+        let given_devcontainer_contents = r#"
+// These are some external comments. serde_lenient should handle them
+{
+    // These are some internal comments
+    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
+    "name": "myDevContainer-${devcontainerId}",
+    "remoteUser": "root",
+    "remoteEnv": {
+        "DEVCONTAINER_ID": "${devcontainerId}",
+        "MYVAR2": "myvarothervalue",
+        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
+        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
+        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
+        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
+        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
+        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
+
+    }
+}
+                    "#;
+        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
+            cx,
+            fs,
+            fake_http_client(),
+            Arc::new(FakeDocker::new()),
+            Arc::new(TestCommandRunner::new()),
+            HashMap::from([
+                ("local_env_1".to_string(), "local_env_value1".to_string()),
+                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
+            ]),
+            given_devcontainer_contents,
+        )
+        .await
+        .unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
+            &devcontainer_manifest.config
+        else {
+            panic!("Config not parsed");
+        };
+
+        // ${devcontainerId}
+        let devcontainer_id = devcontainer_manifest.devcontainer_id();
+        assert_eq!(
+            variable_replaced_devcontainer.name,
+            Some(format!("myDevContainer-{devcontainer_id}"))
+        );
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("DEVCONTAINER_ID")),
+            Some(&devcontainer_id)
+        );
+
+        // ${containerWorkspaceFolderBasename}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
+            Some(&test_project_filename())
+        );
+
+        // ${localWorkspaceFolderBasename}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
+            Some(&test_project_filename())
+        );
+
+        // ${containerWorkspaceFolder}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
+            Some(&format!("/workspaces/{}", test_project_filename()))
+        );
+
+        // ${localWorkspaceFolder}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
+            Some(&TEST_PROJECT_PATH.to_string())
+        );
+
+        // ${localEnv:VARIABLE_NAME}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
+            Some(&"local_env_value1".to_string())
+        );
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
+            Some(&"THISVALUEHERE".to_string())
+        );
+    }
+
+    #[gpui::test]
+    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
+        let given_devcontainer_contents = r#"
+                // These are some external comments. serde_lenient should handle them
+                {
+                    // These are some internal comments
+                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
+                    "name": "myDevContainer-${devcontainerId}",
+                    "remoteUser": "root",
+                    "remoteEnv": {
+                        "DEVCONTAINER_ID": "${devcontainerId}",
+                        "MYVAR2": "myvarothervalue",
+                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
+                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
+                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
+                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
+
+                    },
+                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
+                    "workspaceFolder": "/workspace/customfolder"
+                }
+            "#;
+
+        let (_, mut devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
+                .await
+                .unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
+            &devcontainer_manifest.config
+        else {
+            panic!("Config not parsed");
+        };
+
+        // ${devcontainerId}
+        let devcontainer_id = devcontainer_manifest.devcontainer_id();
+        assert_eq!(
+            variable_replaced_devcontainer.name,
+            Some(format!("myDevContainer-{devcontainer_id}"))
+        );
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("DEVCONTAINER_ID")),
+            Some(&devcontainer_id)
+        );
+
+        // ${containerWorkspaceFolderBasename}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
+            Some(&"customfolder".to_string())
+        );
+
+        // ${localWorkspaceFolderBasename}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
+            Some(&"project".to_string())
+        );
+
+        // ${containerWorkspaceFolder}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
+            Some(&"/workspace/customfolder".to_string())
+        );
+
+        // ${localWorkspaceFolder}
+        assert_eq!(
+            variable_replaced_devcontainer
+                .remote_env
+                .as_ref()
+                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
+            Some(&TEST_PROJECT_PATH.to_string())
+        );
+    }
+
+    // updateRemoteUserUID is treated as false in Windows, so this test will fail
+    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
+    #[cfg(not(target_os = "windows"))]
+    #[gpui::test]
+    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
+        cx.executor().allow_parking();
+        env_logger::try_init().ok();
+        let given_devcontainer_contents = r#"
+            /*---------------------------------------------------------------------------------------------
+             *  Copyright (c) Microsoft Corporation. All rights reserved.
+             *  Licensed under the MIT License. See License.txt in the project root for license information.
+             *--------------------------------------------------------------------------------------------*/
+            {
+              "name": "cli-${devcontainerId}",
+              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
+              "build": {
+                "dockerfile": "Dockerfile",
+                "args": {
+                  "VARIANT": "18-bookworm",
+                  "FOO": "bar",
+                },
+              },
+              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
+              "workspaceFolder": "/workspace2",
+              "mounts": [
+                // Keep command history across instances
+                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
+              ],
+
+              "forwardPorts": [
+                8082,
+                8083,
+              ],
+              "appPort": "8084",
+
+              "containerEnv": {
+                "VARIABLE_VALUE": "value",
+              },
+
+              "initializeCommand": "touch IAM.md",
+
+              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
+
+              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
+
+              "postCreateCommand": {
+                "yarn": "yarn install",
+                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
+              },
+
+              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
+
+              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
+
+              "remoteUser": "node",
+
+              "remoteEnv": {
+                "PATH": "${containerEnv:PATH}:/some/other/path",
+                "OTHER_ENV": "other_env_value"
+              },
+
+              "features": {
+                "ghcr.io/devcontainers/features/docker-in-docker:2": {
+                  "moby": false,
+                },
+                "ghcr.io/devcontainers/features/go:1": {},
+              },
+
+              "customizations": {
+                "vscode": {
+                  "extensions": [
+                    "dbaeumer.vscode-eslint",
+                    "GitHub.vscode-pull-request-github",
+                  ],
+                },
+                "zed": {
+                  "extensions": ["vue", "ruby"],
+                },
+                "codespaces": {
+                  "repositories": {
+                    "devcontainers/features": {
+                      "permissions": {
+                        "contents": "write",
+                        "workflows": "write",
+                      },
+                    },
+                  },
+                },
+              },
+            }
+            "#;
+
+        let (test_dependencies, mut devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
+                .await
+                .unwrap();
+
+        test_dependencies
+            .fs
+            .atomic_write(
+                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
+                r#"
+#  Copyright (c) Microsoft Corporation. All rights reserved.
+#  Licensed under the MIT License. See License.txt in the project root for license information.
+ARG VARIANT="16-bullseye"
+FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
+
+RUN mkdir -p /workspaces && chown node:node /workspaces
+
+ARG USERNAME=node
+USER $USERNAME
+
+# Save command line history
+RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
+&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
+&& mkdir -p /home/$USERNAME/commandhistory \
+&& touch /home/$USERNAME/commandhistory/.bash_history \
+&& chown -R $USERNAME /home/$USERNAME/commandhistory
+                    "#.trim().to_string(),
+            )
+            .await
+            .unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
+
+        assert_eq!(
+            devcontainer_up.extension_ids,
+            vec!["vue".to_string(), "ruby".to_string()]
+        );
+
+        let files = test_dependencies.fs.files();
+        let feature_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
+            })
+            .expect("to be found");
+        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
+        assert_eq!(
+            &feature_dockerfile,
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+#  Copyright (c) Microsoft Corporation. All rights reserved.
+#  Licensed under the MIT License. See License.txt in the project root for license information.
+ARG VARIANT="16-bullseye"
+FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
+
+RUN mkdir -p /workspaces && chown node:node /workspaces
+
+ARG USERNAME=node
+USER $USERNAME
+
+# Save command line history
+RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
+&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
+&& mkdir -p /home/$USERNAME/commandhistory \
+&& touch /home/$USERNAME/commandhistory/.bash_history \
+&& chown -R $USERNAME /home/$USERNAME/commandhistory
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p /tmp/dev-container-features
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
+
+RUN \
+echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
+
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
+cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
+&& cd /tmp/dev-container-features/docker-in-docker_0 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/docker-in-docker_0
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
+cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/go_1 \
+&& cd /tmp/dev-container-features/go_1 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/go_1
+
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+"#
+        );
+
+        let uid_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
+            })
+            .expect("to be found");
+        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
+
+        assert_eq!(
+            &uid_dockerfile,
+            r#"ARG BASE_IMAGE
+FROM $BASE_IMAGE
+
+USER root
+
+ARG REMOTE_USER
+ARG NEW_UID
+ARG NEW_GID
+SHELL ["/bin/sh", "-c"]
+RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
+	if [ -z "$OLD_UID" ]; then \
+		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
+	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
+		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
+	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
+		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
+	else \
+		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
+			FREE_GID=65532; \
+			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
+			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
+			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
+		fi; \
+		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
+		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
+		if [ "$OLD_GID" != "$NEW_GID" ]; then \
+			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
+		fi; \
+		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
+	fi;
+
+ARG IMAGE_USER
+USER $IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+
+ENV DOCKER_BUILDKIT=1
+
+ENV GOPATH=/go
+ENV GOROOT=/usr/local/go
+ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
+ENV VARIABLE_VALUE=value
+"#
+        );
+
+        let golang_install_wrapper = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
+                    && f.to_str().is_some_and(|s| s.contains("/go_"))
+            })
+            .expect("to be found");
+        let golang_install_wrapper = test_dependencies
+            .fs
+            .load(golang_install_wrapper)
+            .await
+            .unwrap();
+        assert_eq!(
+            &golang_install_wrapper,
+            r#"#!/bin/sh
+set -e
+
+on_exit () {
+    [ $? -eq 0 ] && exit
+    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
+}
+
+trap on_exit EXIT
+
+echo ===========================================================================
+echo 'Feature       : go'
+echo 'Id            : ghcr.io/devcontainers/features/go:1'
+echo 'Options       :'
+echo '    GOLANGCILINTVERSION=latest
+    VERSION=latest'
+echo ===========================================================================
+
+set -a
+. ../devcontainer-features.builtin.env
+. ./devcontainer-features.env
+set +a
+
+chmod +x ./install.sh
+./install.sh
+"#
+        );
+
+        let docker_commands = test_dependencies
+            .command_runner
+            .commands_by_program("docker");
+
+        let docker_run_command = docker_commands
+            .iter()
+            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
+            .expect("found");
+
+        assert_eq!(
+            docker_run_command.args,
+            vec![
+                "run".to_string(),
+                "--privileged".to_string(),
+                "--sig-proxy=false".to_string(),
+                "-d".to_string(),
+                "--mount".to_string(),
+                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
+                "--mount".to_string(),
+                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
+                "--mount".to_string(),
+                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
+                "-l".to_string(),
+                "devcontainer.local_folder=/path/to/local/project".to_string(),
+                "-l".to_string(),
+                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
+                "-l".to_string(),
+                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
+                "-p".to_string(),
+                "8082:8082".to_string(),
+                "-p".to_string(),
+                "8083:8083".to_string(),
+                "-p".to_string(),
+                "8084:8084".to_string(),
+                "--entrypoint".to_string(),
+                "/bin/sh".to_string(),
+                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
+                "-c".to_string(),
+                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
+                "-".to_string()
+            ]
+        );
+
+        let docker_exec_commands = test_dependencies
+            .docker
+            .exec_commands_recorded
+            .lock()
+            .unwrap();
+
+        assert!(docker_exec_commands.iter().all(|exec| {
+            exec.env
+                == HashMap::from([
+                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
+                    (
+                        "PATH".to_string(),
+                        "/initial/path:/some/other/path".to_string(),
+                    ),
+                ])
+        }))
+    }
+
+    // updateRemoteUserUID is treated as false in Windows, so this test will fail
+    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
+    #[cfg(not(target_os = "windows"))]
+    #[gpui::test]
+    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
+        cx.executor().allow_parking();
+        env_logger::try_init().ok();
+        let given_devcontainer_contents = r#"
+            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
+            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
+            {
+              "features": {
+                "ghcr.io/devcontainers/features/aws-cli:1": {},
+                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
+              },
+              "name": "Rust and PostgreSQL",
+              "dockerComposeFile": "docker-compose.yml",
+              "service": "app",
+              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
+
+              // Features to add to the dev container. More info: https://containers.dev/features.
+              // "features": {},
+
+              // Use 'forwardPorts' to make a list of ports inside the container available locally.
+              "forwardPorts": [
+                8083,
+                "db:5432",
+                "db:1234",
+              ],
+              "appPort": "8084",
+
+              // Use 'postCreateCommand' to run commands after the container is created.
+              // "postCreateCommand": "rustc --version",
+
+              // Configure tool-specific properties.
+              // "customizations": {},
+
+              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+              // "remoteUser": "root"
+            }
+            "#;
+        let (test_dependencies, mut devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
+                .await
+                .unwrap();
+
+        test_dependencies
+            .fs
+            .atomic_write(
+                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
+                r#"
+version: '3.8'
+
+volumes:
+    postgres-data:
+
+services:
+    app:
+        build:
+            context: .
+            dockerfile: Dockerfile
+        env_file:
+            # Ensure that the variables in .env match the same variables in devcontainer.json
+            - .env
+
+        volumes:
+            - ../..:/workspaces:cached
+
+        # Overrides default command so things don't shut down after the process ends.
+        command: sleep infinity
+
+        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
+        network_mode: service:db
+
+        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
+        # (Adding the "ports" property to this file will not forward from a Codespace.)
+
+    db:
+        image: postgres:14.1
+        restart: unless-stopped
+        volumes:
+            - postgres-data:/var/lib/postgresql/data
+        env_file:
+            # Ensure that the variables in .env match the same variables in devcontainer.json
+            - .env
+
+        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
+        # (Adding the "ports" property to this file will not forward from a Codespace.)
+                    "#.trim().to_string(),
+            )
+            .await
+            .unwrap();
+
+        test_dependencies.fs.atomic_write(
+            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
+            r#"
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+    && apt-get -y install clang lld \
+    && apt-get autoremove -y && apt-get clean -y
+            "#.trim().to_string()).await.unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
+
+        let files = test_dependencies.fs.files();
+        let feature_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
+            })
+            .expect("to be found");
+        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
+        assert_eq!(
+            &feature_dockerfile,
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+    && apt-get -y install clang lld \
+    && apt-get autoremove -y && apt-get clean -y
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p /tmp/dev-container-features
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
+
+RUN \
+echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
+
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
+cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
+&& cd /tmp/dev-container-features/aws-cli_0 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/aws-cli_0
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
+cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
+&& cd /tmp/dev-container-features/docker-in-docker_1 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/docker-in-docker_1
+
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+"#
+        );
+
+        let uid_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
+            })
+            .expect("to be found");
+        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
+
+        assert_eq!(
+            &uid_dockerfile,
+            r#"ARG BASE_IMAGE
+FROM $BASE_IMAGE
+
+USER root
+
+ARG REMOTE_USER
+ARG NEW_UID
+ARG NEW_GID
+SHELL ["/bin/sh", "-c"]
+RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
+	if [ -z "$OLD_UID" ]; then \
+		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
+	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
+		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
+	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
+		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
+	else \
+		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
+			FREE_GID=65532; \
+			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
+			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
+			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
+		fi; \
+		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
+		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
+		if [ "$OLD_GID" != "$NEW_GID" ]; then \
+			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
+		fi; \
+		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
+	fi;
+
+ARG IMAGE_USER
+USER $IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+
+
+ENV DOCKER_BUILDKIT=1
+"#
+        );
+
+        let runtime_override = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
+            })
+            .expect("to be found");
+        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
+
+        let expected_runtime_override = DockerComposeConfig {
+            name: None,
+            services: HashMap::from([
+                (
+                    "app".to_string(),
+                    DockerComposeService {
+                        entrypoint: Some(vec![
+                            "/bin/sh".to_string(),
+                            "-c".to_string(),
+                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
+                            "-".to_string(),
+                        ]),
+                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
+                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
+                        privileged: Some(true),
+                        labels: Some(vec![
+                            "devcontainer.metadata=[{\"remoteUser\":\"vscode\"}]".to_string(),
+                            "devcontainer.local_folder=/path/to/local/project".to_string(),
+                            "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string()
+                        ]),
+                        volumes: vec![
+                            MountDefinition {
+                                source: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
+                                target: "/var/lib/docker".to_string(),
+                                mount_type: Some("volume".to_string())
+                            }
+                        ],
+                        ..Default::default()
+                    },
+                ),
+                (
+                    "db".to_string(),
+                    DockerComposeService {
+                        ports: vec![
+                            "8083:8083".to_string(),
+                            "5432:5432".to_string(),
+                            "1234:1234".to_string(),
+                            "8084:8084".to_string()
+                        ],
+                        ..Default::default()
+                    },
+                ),
+            ]),
+            volumes: HashMap::from([(
+                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
+                DockerComposeVolume {
+                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
+                },
+            )]),
+        };
+
+        assert_eq!(
+            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
+            expected_runtime_override
+        )
+    }
+
+    #[gpui::test]
+    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
+        cx: &mut TestAppContext,
+    ) {
+        cx.executor().allow_parking();
+        env_logger::try_init().ok();
+        let given_devcontainer_contents = r#"
+        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
+        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
+        {
+          "features": {
+            "ghcr.io/devcontainers/features/aws-cli:1": {},
+            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
+          },
+          "name": "Rust and PostgreSQL",
+          "dockerComposeFile": "docker-compose.yml",
+          "service": "app",
+          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
+
+          // Features to add to the dev container. More info: https://containers.dev/features.
+          // "features": {},
+
+          // Use 'forwardPorts' to make a list of ports inside the container available locally.
+          "forwardPorts": [
+            8083,
+            "db:5432",
+            "db:1234",
+          ],
+          "updateRemoteUserUID": false,
+          "appPort": "8084",
+
+          // Use 'postCreateCommand' to run commands after the container is created.
+          // "postCreateCommand": "rustc --version",
+
+          // Configure tool-specific properties.
+          // "customizations": {},
+
+          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+          // "remoteUser": "root"
+        }
+        "#;
+        let (test_dependencies, mut devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
+                .await
+                .unwrap();
+
+        test_dependencies
+        .fs
+        .atomic_write(
+            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
+            r#"
+version: '3.8'
+
+volumes:
+postgres-data:
+
+services:
+app:
+    build:
+        context: .
+        dockerfile: Dockerfile
+    env_file:
+        # Ensure that the variables in .env match the same variables in devcontainer.json
+        - .env
+
+    volumes:
+        - ../..:/workspaces:cached
+
+    # Overrides default command so things don't shut down after the process ends.
+    command: sleep infinity
+
+    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
+    network_mode: service:db
+
+    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
+    # (Adding the "ports" property to this file will not forward from a Codespace.)
+
+db:
+    image: postgres:14.1
+    restart: unless-stopped
+    volumes:
+        - postgres-data:/var/lib/postgresql/data
+    env_file:
+        # Ensure that the variables in .env match the same variables in devcontainer.json
+        - .env
+
+    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
+    # (Adding the "ports" property to this file will not forward from a Codespace.)
+                "#.trim().to_string(),
+        )
+        .await
+        .unwrap();
+
+        test_dependencies.fs.atomic_write(
+        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
+        r#"
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+&& apt-get -y install clang lld \
+&& apt-get autoremove -y && apt-get clean -y
+        "#.trim().to_string()).await.unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
+
+        let files = test_dependencies.fs.files();
+        let feature_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
+            })
+            .expect("to be found");
+        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
+        assert_eq!(
+            &feature_dockerfile,
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+&& apt-get -y install clang lld \
+&& apt-get autoremove -y && apt-get clean -y
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p /tmp/dev-container-features
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
+
+RUN \
+echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
+
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
+cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
+&& cd /tmp/dev-container-features/aws-cli_0 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/aws-cli_0
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
+cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
+&& cd /tmp/dev-container-features/docker-in-docker_1 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/docker-in-docker_1
+
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+
+
+ENV DOCKER_BUILDKIT=1
+"#
+        );
+    }
+
+    #[cfg(not(target_os = "windows"))]
+    #[gpui::test]
+    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
+        cx.executor().allow_parking();
+        env_logger::try_init().ok();
+        let given_devcontainer_contents = r#"
+        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
+        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
+        {
+          "features": {
+            "ghcr.io/devcontainers/features/aws-cli:1": {},
+            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
+          },
+          "name": "Rust and PostgreSQL",
+          "dockerComposeFile": "docker-compose.yml",
+          "service": "app",
+          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
+
+          // Features to add to the dev container. More info: https://containers.dev/features.
+          // "features": {},
+
+          // Use 'forwardPorts' to make a list of ports inside the container available locally.
+          // "forwardPorts": [5432],
+
+          // Use 'postCreateCommand' to run commands after the container is created.
+          // "postCreateCommand": "rustc --version",
+
+          // Configure tool-specific properties.
+          // "customizations": {},
+
+          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+          // "remoteUser": "root"
+        }
+        "#;
+        let mut fake_docker = FakeDocker::new();
+        fake_docker.set_podman(true);
+        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
+            cx,
+            FakeFs::new(cx.executor()),
+            fake_http_client(),
+            Arc::new(fake_docker),
+            Arc::new(TestCommandRunner::new()),
+            HashMap::new(),
+            given_devcontainer_contents,
+        )
+        .await
+        .unwrap();
+
+        test_dependencies
+        .fs
+        .atomic_write(
+            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
+            r#"
+version: '3.8'
+
+volumes:
+postgres-data:
+
+services:
+app:
+build:
+    context: .
+    dockerfile: Dockerfile
+env_file:
+    # Ensure that the variables in .env match the same variables in devcontainer.json
+    - .env
+
+volumes:
+    - ../..:/workspaces:cached
+
+# Overrides default command so things don't shut down after the process ends.
+command: sleep infinity
+
+# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
+network_mode: service:db
+
+# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
+# (Adding the "ports" property to this file will not forward from a Codespace.)
+
+db:
+image: postgres:14.1
+restart: unless-stopped
+volumes:
+    - postgres-data:/var/lib/postgresql/data
+env_file:
+    # Ensure that the variables in .env match the same variables in devcontainer.json
+    - .env
+
+# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
+# (Adding the "ports" property to this file will not forward from a Codespace.)
+                "#.trim().to_string(),
+        )
+        .await
+        .unwrap();
+
+        test_dependencies.fs.atomic_write(
+        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
+        r#"
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+&& apt-get -y install clang lld \
+&& apt-get autoremove -y && apt-get clean -y
+        "#.trim().to_string()).await.unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
+
+        let files = test_dependencies.fs.files();
+
+        let feature_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
+            })
+            .expect("to be found");
+        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
+        assert_eq!(
+            &feature_dockerfile,
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
+
+# Include lld linker to improve build times either by using environment variable
+# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
+RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
+&& apt-get -y install clang lld \
+&& apt-get autoremove -y && apt-get clean -y
+
+FROM dev_container_feature_content_temp as dev_containers_feature_content_source
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p /tmp/dev-container-features
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
+
+RUN \
+echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
+
+
+COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
+RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
+&& cd /tmp/dev-container-features/aws-cli_0 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh
+
+COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
+RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
+&& cd /tmp/dev-container-features/docker-in-docker_1 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh
+
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+"#
+        );
+
+        let uid_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
+            })
+            .expect("to be found");
+        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
+
+        assert_eq!(
+            &uid_dockerfile,
+            r#"ARG BASE_IMAGE
+FROM $BASE_IMAGE
+
+USER root
+
+ARG REMOTE_USER
+ARG NEW_UID
+ARG NEW_GID
+SHELL ["/bin/sh", "-c"]
+RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
+	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
+	if [ -z "$OLD_UID" ]; then \
+		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
+	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
+		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
+	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
+		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
+	else \
+		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
+			FREE_GID=65532; \
+			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
+			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
+			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
+		fi; \
+		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
+		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
+		if [ "$OLD_GID" != "$NEW_GID" ]; then \
+			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
+		fi; \
+		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
+	fi;
+
+ARG IMAGE_USER
+USER $IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+
+
+ENV DOCKER_BUILDKIT=1
+"#
+        );
+    }
+
+    #[gpui::test]
+    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
+        cx.executor().allow_parking();
+        env_logger::try_init().ok();
+        let given_devcontainer_contents = r#"
+            /*---------------------------------------------------------------------------------------------
+             *  Copyright (c) Microsoft Corporation. All rights reserved.
+             *  Licensed under the MIT License. See License.txt in the project root for license information.
+             *--------------------------------------------------------------------------------------------*/
+            {
+              "name": "cli-${devcontainerId}",
+              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
+              "build": {
+                "dockerfile": "Dockerfile",
+                "args": {
+                  "VARIANT": "18-bookworm",
+                  "FOO": "bar",
+                },
+              },
+              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
+              "workspaceFolder": "/workspace2",
+              "mounts": [
+                // Keep command history across instances
+                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
+              ],
+
+              "forwardPorts": [
+                8082,
+                8083,
+              ],
+              "appPort": "8084",
+              "updateRemoteUserUID": false,
+
+              "containerEnv": {
+                "VARIABLE_VALUE": "value",
+              },
+
+              "initializeCommand": "touch IAM.md",
+
+              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
+
+              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
+
+              "postCreateCommand": {
+                "yarn": "yarn install",
+                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
+              },
+
+              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
+
+              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
+
+              "remoteUser": "node",
+
+              "remoteEnv": {
+                "PATH": "${containerEnv:PATH}:/some/other/path",
+                "OTHER_ENV": "other_env_value"
+              },
+
+              "features": {
+                "ghcr.io/devcontainers/features/docker-in-docker:2": {
+                  "moby": false,
+                },
+                "ghcr.io/devcontainers/features/go:1": {},
+              },
+
+              "customizations": {
+                "vscode": {
+                  "extensions": [
+                    "dbaeumer.vscode-eslint",
+                    "GitHub.vscode-pull-request-github",
+                  ],
+                },
+                "zed": {
+                  "extensions": ["vue", "ruby"],
+                },
+                "codespaces": {
+                  "repositories": {
+                    "devcontainers/features": {
+                      "permissions": {
+                        "contents": "write",
+                        "workflows": "write",
+                      },
+                    },
+                  },
+                },
+              },
+            }
+            "#;
+
+        let (test_dependencies, mut devcontainer_manifest) =
+            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
+                .await
+                .unwrap();
+
+        test_dependencies
+            .fs
+            .atomic_write(
+                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
+                r#"
+#  Copyright (c) Microsoft Corporation. All rights reserved.
+#  Licensed under the MIT License. See License.txt in the project root for license information.
+ARG VARIANT="16-bullseye"
+FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
+
+RUN mkdir -p /workspaces && chown node:node /workspaces
+
+ARG USERNAME=node
+USER $USERNAME
+
+# Save command line history
+RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
+&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
+&& mkdir -p /home/$USERNAME/commandhistory \
+&& touch /home/$USERNAME/commandhistory/.bash_history \
+&& chown -R $USERNAME /home/$USERNAME/commandhistory
+                    "#.trim().to_string(),
+            )
+            .await
+            .unwrap();
+
+        devcontainer_manifest.parse_nonremote_vars().unwrap();
+
+        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
+
+        assert_eq!(
+            devcontainer_up.extension_ids,
+            vec!["vue".to_string(), "ruby".to_string()]
+        );
+
+        let files = test_dependencies.fs.files();
+        let feature_dockerfile = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
+            })
+            .expect("to be found");
+        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
+        assert_eq!(
+            &feature_dockerfile,
+            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
+
+#  Copyright (c) Microsoft Corporation. All rights reserved.
+#  Licensed under the MIT License. See License.txt in the project root for license information.
+ARG VARIANT="16-bullseye"
+FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
+
+RUN mkdir -p /workspaces && chown node:node /workspaces
+
+ARG USERNAME=node
+USER $USERNAME
+
+# Save command line history
+RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
+&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
+&& mkdir -p /home/$USERNAME/commandhistory \
+&& touch /home/$USERNAME/commandhistory/.bash_history \
+&& chown -R $USERNAME /home/$USERNAME/commandhistory
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
+USER root
+COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
+RUN chmod -R 0755 /tmp/build-features/
+
+FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
+
+USER root
+
+RUN mkdir -p /tmp/dev-container-features
+COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
+
+RUN \
+echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
+echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
+
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
+cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
+&& cd /tmp/dev-container-features/docker-in-docker_0 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/docker-in-docker_0
+
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
+cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
+&& chmod -R 0755 /tmp/dev-container-features/go_1 \
+&& cd /tmp/dev-container-features/go_1 \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf /tmp/dev-container-features/go_1
+
+
+ARG _DEV_CONTAINERS_IMAGE_USER=root
+USER $_DEV_CONTAINERS_IMAGE_USER
+
+# Ensure that /etc/profile does not clobber the existing path
+RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
+
+ENV DOCKER_BUILDKIT=1
+
+ENV GOPATH=/go
+ENV GOROOT=/usr/local/go
+ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
+ENV VARIABLE_VALUE=value
+"#
+        );
+
+        let golang_install_wrapper = files
+            .iter()
+            .find(|f| {
+                f.file_name()
+                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
+                    && f.to_str().is_some_and(|s| s.contains("go_"))
+            })
+            .expect("to be found");
+        let golang_install_wrapper = test_dependencies
+            .fs
+            .load(golang_install_wrapper)
+            .await
+            .unwrap();
+        assert_eq!(
+            &golang_install_wrapper,
+            r#"#!/bin/sh
+set -e
+
+on_exit () {
+    [ $? -eq 0 ] && exit
+    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
+}
+
+trap on_exit EXIT
+
+echo ===========================================================================
+echo 'Feature       : go'
+echo 'Id            : ghcr.io/devcontainers/features/go:1'
+echo 'Options       :'
+echo '    GOLANGCILINTVERSION=latest
+    VERSION=latest'
+echo ===========================================================================
+
+set -a
+. ../devcontainer-features.builtin.env
+. ./devcontainer-features.env
+set +a
+
+chmod +x ./install.sh
+./install.sh
+"#
+        );
+
+        let docker_commands = test_dependencies
+            .command_runner
+            .commands_by_program("docker");
+
+        let docker_run_command = docker_commands
+            .iter()
+            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
+
+        assert!(docker_run_command.is_some());
+
+        let docker_exec_commands = test_dependencies
+            .docker
+            .exec_commands_recorded
+            .lock()
+            .unwrap();
+
+        assert!(docker_exec_commands.iter().all(|exec| {
+            exec.env
+                == HashMap::from([
+                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
+                    (
+                        "PATH".to_string(),
+                        "/initial/path:/some/other/path".to_string(),
+                    ),
+                ])
+        }))
+    }
+
+    pub(crate) struct RecordedExecCommand {
+        pub(crate) _container_id: String,
+        pub(crate) _remote_folder: String,
+        pub(crate) _user: String,
+        pub(crate) env: HashMap<String, String>,
+        pub(crate) _inner_command: Command,
+    }
+
+    pub(crate) struct FakeDocker {
+        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
+        podman: bool,
+    }
+
+    impl FakeDocker {
+        pub(crate) fn new() -> Self {
+            Self {
+                podman: false,
+                exec_commands_recorded: Mutex::new(Vec::new()),
+            }
+        }
+        #[cfg(not(target_os = "windows"))]
+        fn set_podman(&mut self, podman: bool) {
+            self.podman = podman;
+        }
+    }
+
+    #[async_trait]
+    impl DockerClient for FakeDocker {
+        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
+            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
+                return Ok(DockerInspect {
+                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
+                        .to_string(),
+                    config: DockerInspectConfig {
+                        labels: DockerConfigLabels {
+                            metadata: Some(vec![HashMap::from([(
+                                "remoteUser".to_string(),
+                                Value::String("node".to_string()),
+                            )])]),
+                        },
+                        env: Vec::new(),
+                        image_user: Some("root".to_string()),
+                    },
+                    mounts: None,
+                    state: None,
+                });
+            }
+            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
+                return Ok(DockerInspect {
+                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
+                        .to_string(),
+                    config: DockerInspectConfig {
+                        labels: DockerConfigLabels {
+                            metadata: Some(vec![HashMap::from([(
+                                "remoteUser".to_string(),
+                                Value::String("vscode".to_string()),
+                            )])]),
+                        },
+                        image_user: Some("root".to_string()),
+                        env: Vec::new(),
+                    },
+                    mounts: None,
+                    state: None,
+                });
+            }
+            if id.starts_with("cli_") {
+                return Ok(DockerInspect {
+                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
+                        .to_string(),
+                    config: DockerInspectConfig {
+                        labels: DockerConfigLabels {
+                            metadata: Some(vec![HashMap::from([(
+                                "remoteUser".to_string(),
+                                Value::String("node".to_string()),
+                            )])]),
+                        },
+                        image_user: Some("root".to_string()),
+                        env: vec!["PATH=/initial/path".to_string()],
+                    },
+                    mounts: None,
+                    state: None,
+                });
+            }
+            if id == "found_docker_ps" {
+                return Ok(DockerInspect {
+                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
+                        .to_string(),
+                    config: DockerInspectConfig {
+                        labels: DockerConfigLabels {
+                            metadata: Some(vec![HashMap::from([(
+                                "remoteUser".to_string(),
+                                Value::String("node".to_string()),
+                            )])]),
+                        },
+                        image_user: Some("root".to_string()),
+                        env: vec!["PATH=/initial/path".to_string()],
+                    },
+                    mounts: Some(vec![DockerInspectMount {
+                        source: "/path/to/local/project".to_string(),
+                        destination: "/workspaces/project".to_string(),
+                    }]),
+                    state: None,
+                });
+            }
+            if id.starts_with("rust_a-") {
+                return Ok(DockerInspect {
+                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
+                        .to_string(),
+                    config: DockerInspectConfig {
+                        labels: DockerConfigLabels {
+                            metadata: Some(vec![HashMap::from([(
+                                "remoteUser".to_string(),
+                                Value::String("vscode".to_string()),
+                            )])]),
+                        },
+                        image_user: Some("root".to_string()),
+                        env: Vec::new(),
+                    },
+                    mounts: None,
+                    state: None,
+                });
+            }
+
+            Err(DevContainerError::DockerNotAvailable)
+        }
+        async fn get_docker_compose_config(
+            &self,
+            config_files: &Vec<PathBuf>,
+        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
+            if config_files.len() == 1
+                && config_files.get(0)
+                    == Some(&PathBuf::from(
+                        "/path/to/local/project/.devcontainer/docker-compose.yml",
+                    ))
+            {
+                return Ok(Some(DockerComposeConfig {
+                    name: None,
+                    services: HashMap::from([
+                        (
+                            "app".to_string(),
+                            DockerComposeService {
+                                build: Some(DockerComposeServiceBuild {
+                                    context: Some(".".to_string()),
+                                    dockerfile: Some("Dockerfile".to_string()),
+                                    args: None,
+                                    additional_contexts: None,
+                                }),
+                                volumes: vec![MountDefinition {
+                                    source: "../..".to_string(),
+                                    target: "/workspaces".to_string(),
+                                    mount_type: Some("bind".to_string()),
+                                }],
+                                network_mode: Some("service:db".to_string()),
+                                ..Default::default()
+                            },
+                        ),
+                        (
+                            "db".to_string(),
+                            DockerComposeService {
+                                image: Some("postgres:14.1".to_string()),
+                                volumes: vec![MountDefinition {
+                                    source: "postgres-data".to_string(),
+                                    target: "/var/lib/postgresql/data".to_string(),
+                                    mount_type: Some("volume".to_string()),
+                                }],
+                                env_file: Some(vec![".env".to_string()]),
+                                ..Default::default()
+                            },
+                        ),
+                    ]),
+                    volumes: HashMap::from([(
+                        "postgres-data".to_string(),
+                        DockerComposeVolume::default(),
+                    )]),
+                }));
+            }
+            Err(DevContainerError::DockerNotAvailable)
+        }
+        async fn docker_compose_build(
+            &self,
+            _config_files: &Vec<PathBuf>,
+            _project_name: &str,
+        ) -> Result<(), DevContainerError> {
+            Ok(())
+        }
+        async fn run_docker_exec(
+            &self,
+            container_id: &str,
+            remote_folder: &str,
+            user: &str,
+            env: &HashMap<String, String>,
+            inner_command: Command,
+        ) -> Result<(), DevContainerError> {
+            let mut record = self
+                .exec_commands_recorded
+                .lock()
+                .expect("should be available");
+            record.push(RecordedExecCommand {
+                _container_id: container_id.to_string(),
+                _remote_folder: remote_folder.to_string(),
+                _user: user.to_string(),
+                env: env.clone(),
+                _inner_command: inner_command,
+            });
+            Ok(())
+        }
+        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
+            Err(DevContainerError::DockerNotAvailable)
+        }
+        async fn find_process_by_filters(
+            &self,
+            _filters: Vec<String>,
+        ) -> Result<Option<DockerPs>, DevContainerError> {
+            Ok(Some(DockerPs {
+                id: "found_docker_ps".to_string(),
+            }))
+        }
+        fn supports_compose_buildkit(&self) -> bool {
+            !self.podman
+        }
+        fn docker_cli(&self) -> String {
+            if self.podman {
+                "podman".to_string()
+            } else {
+                "docker".to_string()
+            }
+        }
+    }
+
+    #[derive(Debug, Clone)]
+    pub(crate) struct TestCommand {
+        pub(crate) program: String,
+        pub(crate) args: Vec<String>,
+    }
+
+    pub(crate) struct TestCommandRunner {
+        commands_recorded: Mutex<Vec<TestCommand>>,
+    }
+
+    impl TestCommandRunner {
+        fn new() -> Self {
+            Self {
+                commands_recorded: Mutex::new(Vec::new()),
+            }
+        }
+
+        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
+            let record = self.commands_recorded.lock().expect("poisoned");
+            record
+                .iter()
+                .filter(|r| r.program == program)
+                .map(|r| r.clone())
+                .collect()
+        }
+    }
+
+    #[async_trait]
+    impl CommandRunner for TestCommandRunner {
+        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
+            let mut record = self.commands_recorded.lock().expect("poisoned");
+
+            record.push(TestCommand {
+                program: command.get_program().display().to_string(),
+                args: command
+                    .get_args()
+                    .map(|a| a.display().to_string())
+                    .collect(),
+            });
+
+            Ok(Output {
+                status: ExitStatus::default(),
+                stdout: vec![],
+                stderr: vec![],
+            })
+        }
+    }
+
+    fn fake_http_client() -> Arc<dyn HttpClient> {
+        FakeHttpClient::create(|request| async move {
+            let (parts, _body) = request.into_parts();
+            if parts.uri.path() == "/token" {
+                let token_response = TokenResponse {
+                    token: "token".to_string(),
+                };
+                return Ok(http::Response::builder()
+                    .status(200)
+                    .body(http_client::AsyncBody::from(
+                        serde_json_lenient::to_string(&token_response).unwrap(),
+                    ))
+                    .unwrap());
+            }
+
+            // OCI specific things
+            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
+                let response = r#"
+                    {
+                        "schemaVersion": 2,
+                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
+                        "config": {
+                            "mediaType": "application/vnd.devcontainers",
+                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
+                            "size": 2
+                        },
+                        "layers": [
+                            {
+                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
+                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
+                                "size": 59392,
+                                "annotations": {
+                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
+                                }
+                            }
+                        ],
+                        "annotations": {

crates/dev_container/src/docker.rs 🔗

@@ -0,0 +1,898 @@
+use std::{collections::HashMap, path::PathBuf};
+
+use async_trait::async_trait;
+use serde::{Deserialize, Deserializer, Serialize};
+use util::command::Command;
+
+use crate::{
+    command_json::evaluate_json_command, devcontainer_api::DevContainerError,
+    devcontainer_json::MountDefinition,
+};
+
+#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "PascalCase")]
+pub(crate) struct DockerPs {
+    #[serde(alias = "ID")]
+    pub(crate) id: String,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "PascalCase")]
+pub(crate) struct DockerState {
+    pub(crate) running: bool,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "PascalCase")]
+pub(crate) struct DockerInspect {
+    pub(crate) id: String,
+    pub(crate) config: DockerInspectConfig,
+    pub(crate) mounts: Option<Vec<DockerInspectMount>>,
+    pub(crate) state: Option<DockerState>,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+pub(crate) struct DockerConfigLabels {
+    #[serde(
+        rename = "devcontainer.metadata",
+        deserialize_with = "deserialize_metadata"
+    )]
+    pub(crate) metadata: Option<Vec<HashMap<String, serde_json_lenient::Value>>>,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "PascalCase")]
+pub(crate) struct DockerInspectConfig {
+    pub(crate) labels: DockerConfigLabels,
+    #[serde(rename = "User")]
+    pub(crate) image_user: Option<String>,
+    #[serde(default)]
+    pub(crate) env: Vec<String>,
+}
+
+impl DockerInspectConfig {
+    pub(crate) fn env_as_map(&self) -> Result<HashMap<String, String>, DevContainerError> {
+        let mut map = HashMap::new();
+        for env_var in &self.env {
+            let parts: Vec<&str> = env_var.split("=").collect();
+            if parts.len() != 2 {
+                log::error!("Unable to parse {env_var} into and environment key-value");
+                return Err(DevContainerError::DevContainerParseFailed);
+            }
+            map.insert(parts[0].to_string(), parts[1].to_string());
+        }
+        Ok(map)
+    }
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)]
+#[serde(rename_all = "PascalCase")]
+pub(crate) struct DockerInspectMount {
+    pub(crate) source: String,
+    pub(crate) destination: String,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct DockerComposeServiceBuild {
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) context: Option<String>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) dockerfile: Option<String>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) args: Option<HashMap<String, String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) additional_contexts: Option<HashMap<String, String>>,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct DockerComposeService {
+    pub(crate) image: Option<String>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) entrypoint: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) cap_add: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) security_opt: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) labels: Option<Vec<String>>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) build: Option<DockerComposeServiceBuild>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) privileged: Option<bool>,
+    pub(crate) volumes: Vec<MountDefinition>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) env_file: Option<Vec<String>>,
+    #[serde(default, skip_serializing_if = "Vec::is_empty")]
+    pub(crate) ports: Vec<String>,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) network_mode: Option<String>,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct DockerComposeVolume {
+    pub(crate) name: String,
+}
+
+#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq, Default)]
+pub(crate) struct DockerComposeConfig {
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub(crate) name: Option<String>,
+    pub(crate) services: HashMap<String, DockerComposeService>,
+    pub(crate) volumes: HashMap<String, DockerComposeVolume>,
+}
+
+pub(crate) struct Docker {
+    docker_cli: String,
+}
+
+impl DockerInspect {
+    pub(crate) fn is_running(&self) -> bool {
+        self.state.as_ref().map_or(false, |s| s.running)
+    }
+}
+
+impl Docker {
+    pub(crate) fn new(docker_cli: &str) -> Self {
+        Self {
+            docker_cli: docker_cli.to_string(),
+        }
+    }
+
+    fn is_podman(&self) -> bool {
+        self.docker_cli == "podman"
+    }
+
+    async fn pull_image(&self, image: &String) -> Result<(), DevContainerError> {
+        let mut command = Command::new(&self.docker_cli);
+        command.args(&["pull", image]);
+
+        let output = command.output().await.map_err(|e| {
+            log::error!("Error pulling image: {e}");
+            DevContainerError::ResourceFetchFailed
+        })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("Non-success result from docker pull: {stderr}");
+            return Err(DevContainerError::ResourceFetchFailed);
+        }
+        Ok(())
+    }
+
+    fn create_docker_query_containers(&self, filters: Vec<String>) -> Command {
+        let mut command = Command::new(&self.docker_cli);
+        command.args(&["ps", "-a"]);
+
+        for filter in filters {
+            command.arg("--filter");
+            command.arg(filter);
+        }
+        command.arg("--format={{ json . }}");
+        command
+    }
+
+    fn create_docker_inspect(&self, id: &str) -> Command {
+        let mut command = Command::new(&self.docker_cli);
+        command.args(&["inspect", "--format={{json . }}", id]);
+        command
+    }
+
+    fn create_docker_compose_config_command(&self, config_files: &Vec<PathBuf>) -> Command {
+        let mut command = Command::new(&self.docker_cli);
+        command.arg("compose");
+        for file_path in config_files {
+            command.args(&["-f", &file_path.display().to_string()]);
+        }
+        command.args(&["config", "--format", "json"]);
+        command
+    }
+}
+
+#[async_trait]
+impl DockerClient for Docker {
+    async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
+        // Try to pull the image, continue on failure; Image may be local only, id a reference to a running container
+        self.pull_image(id).await.ok();
+
+        let command = self.create_docker_inspect(id);
+
+        let Some(docker_inspect): Option<DockerInspect> = evaluate_json_command(command).await?
+        else {
+            log::error!("Docker inspect produced no deserializable output");
+            return Err(DevContainerError::CommandFailed(self.docker_cli.clone()));
+        };
+        Ok(docker_inspect)
+    }
+
+    async fn get_docker_compose_config(
+        &self,
+        config_files: &Vec<PathBuf>,
+    ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
+        let command = self.create_docker_compose_config_command(config_files);
+        evaluate_json_command(command).await
+    }
+
+    async fn docker_compose_build(
+        &self,
+        config_files: &Vec<PathBuf>,
+        project_name: &str,
+    ) -> Result<(), DevContainerError> {
+        let mut command = Command::new(&self.docker_cli);
+        if !self.is_podman() {
+            command.env("DOCKER_BUILDKIT", "1");
+        }
+        command.args(&["compose", "--project-name", project_name]);
+        for docker_compose_file in config_files {
+            command.args(&["-f", &docker_compose_file.display().to_string()]);
+        }
+        command.arg("build");
+
+        let output = command.output().await.map_err(|e| {
+            log::error!("Error running docker compose up: {e}");
+            DevContainerError::CommandFailed(command.get_program().display().to_string())
+        })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("Non-success status from docker compose up: {}", stderr);
+            return Err(DevContainerError::CommandFailed(
+                command.get_program().display().to_string(),
+            ));
+        }
+
+        Ok(())
+    }
+    async fn run_docker_exec(
+        &self,
+        container_id: &str,
+        remote_folder: &str,
+        user: &str,
+        env: &HashMap<String, String>,
+        inner_command: Command,
+    ) -> Result<(), DevContainerError> {
+        let mut command = Command::new(&self.docker_cli);
+
+        command.args(&["exec", "-w", remote_folder, "-u", user]);
+
+        for (k, v) in env.iter() {
+            command.arg("-e");
+            let env_declaration = format!("{}={}", k, v);
+            command.arg(&env_declaration);
+        }
+
+        command.arg(container_id);
+
+        command.arg("sh");
+
+        let mut inner_program_script: Vec<String> =
+            vec![inner_command.get_program().display().to_string()];
+        let mut args: Vec<String> = inner_command
+            .get_args()
+            .map(|arg| arg.display().to_string())
+            .collect();
+        inner_program_script.append(&mut args);
+        command.args(&["-c", &inner_program_script.join(" ")]);
+
+        let output = command.output().await.map_err(|e| {
+            log::error!("Error running command {e} in container exec");
+            DevContainerError::ContainerNotValid(container_id.to_string())
+        })?;
+        if !output.status.success() {
+            let std_err = String::from_utf8_lossy(&output.stderr);
+            log::error!("Command produced a non-successful output. StdErr: {std_err}");
+        }
+        let std_out = String::from_utf8_lossy(&output.stdout);
+        log::debug!("Command output:\n {std_out}");
+
+        Ok(())
+    }
+    async fn start_container(&self, id: &str) -> Result<(), DevContainerError> {
+        let mut command = Command::new(&self.docker_cli);
+
+        command.args(&["start", id]);
+
+        let output = command.output().await.map_err(|e| {
+            log::error!("Error running docker start: {e}");
+            DevContainerError::CommandFailed(command.get_program().display().to_string())
+        })?;
+
+        if !output.status.success() {
+            let stderr = String::from_utf8_lossy(&output.stderr);
+            log::error!("Non-success status from docker start: {stderr}");
+            return Err(DevContainerError::CommandFailed(
+                command.get_program().display().to_string(),
+            ));
+        }
+
+        Ok(())
+    }
+
+    async fn find_process_by_filters(
+        &self,
+        filters: Vec<String>,
+    ) -> Result<Option<DockerPs>, DevContainerError> {
+        let command = self.create_docker_query_containers(filters);
+        evaluate_json_command(command).await
+    }
+
+    fn docker_cli(&self) -> String {
+        self.docker_cli.clone()
+    }
+
+    fn supports_compose_buildkit(&self) -> bool {
+        !self.is_podman()
+    }
+}
+
+#[async_trait]
+pub(crate) trait DockerClient {
+    async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError>;
+    async fn get_docker_compose_config(
+        &self,
+        config_files: &Vec<PathBuf>,
+    ) -> Result<Option<DockerComposeConfig>, DevContainerError>;
+    async fn docker_compose_build(
+        &self,
+        config_files: &Vec<PathBuf>,
+        project_name: &str,
+    ) -> Result<(), DevContainerError>;
+    async fn run_docker_exec(
+        &self,
+        container_id: &str,
+        remote_folder: &str,
+        user: &str,
+        env: &HashMap<String, String>,
+        inner_command: Command,
+    ) -> Result<(), DevContainerError>;
+    async fn start_container(&self, id: &str) -> Result<(), DevContainerError>;
+    async fn find_process_by_filters(
+        &self,
+        filters: Vec<String>,
+    ) -> Result<Option<DockerPs>, DevContainerError>;
+    fn supports_compose_buildkit(&self) -> bool;
+    /// This operates as an escape hatch for more custom uses of the docker API.
+    /// See DevContainerManifest::create_docker_build as an example
+    fn docker_cli(&self) -> String;
+}
+
+fn deserialize_metadata<'de, D>(
+    deserializer: D,
+) -> Result<Option<Vec<HashMap<String, serde_json_lenient::Value>>>, D::Error>
+where
+    D: Deserializer<'de>,
+{
+    let s: Option<String> = Option::deserialize(deserializer)?;
+    match s {
+        Some(json_string) => {
+            let parsed: Vec<HashMap<String, serde_json_lenient::Value>> =
+                serde_json_lenient::from_str(&json_string).map_err(|e| {
+                    log::error!("Error deserializing metadata: {e}");
+                    serde::de::Error::custom(e)
+                })?;
+            Ok(Some(parsed))
+        }
+        None => Ok(None),
+    }
+}
+
+pub(crate) fn get_remote_dir_from_config(
+    config: &DockerInspect,
+    local_dir: String,
+) -> Result<String, DevContainerError> {
+    let local_path = PathBuf::from(&local_dir);
+
+    let Some(mounts) = &config.mounts else {
+        log::error!("No mounts defined for container");
+        return Err(DevContainerError::ContainerNotValid(config.id.clone()));
+    };
+
+    for mount in mounts {
+        // Sometimes docker will mount the local filesystem on host_mnt for system isolation
+        let mount_source = PathBuf::from(&mount.source.trim_start_matches("/host_mnt"));
+        if let Ok(relative_path_to_project) = local_path.strip_prefix(&mount_source) {
+            let remote_dir = format!(
+                "{}/{}",
+                &mount.destination,
+                relative_path_to_project.display()
+            );
+            return Ok(remote_dir);
+        }
+        if mount.source == local_dir {
+            return Ok(mount.destination.clone());
+        }
+    }
+    log::error!("No mounts to local folder");
+    Err(DevContainerError::ContainerNotValid(config.id.clone()))
+}
+
+#[cfg(test)]
+mod test {
+    use std::{
+        collections::HashMap,
+        ffi::OsStr,
+        process::{ExitStatus, Output},
+    };
+
+    use crate::{
+        command_json::deserialize_json_output,
+        devcontainer_json::MountDefinition,
+        docker::{
+            Docker, DockerComposeConfig, DockerComposeService, DockerComposeVolume, DockerInspect,
+            DockerPs, get_remote_dir_from_config,
+        },
+    };
+
+    #[test]
+    fn should_create_docker_inspect_command() {
+        let docker = Docker::new("docker");
+        let given_id = "given_docker_id";
+
+        let command = docker.create_docker_inspect(given_id);
+
+        assert_eq!(
+            command.get_args().collect::<Vec<&OsStr>>(),
+            vec![
+                OsStr::new("inspect"),
+                OsStr::new("--format={{json . }}"),
+                OsStr::new(given_id)
+            ]
+        )
+    }
+
+    #[test]
+    fn should_deserialize_docker_ps_with_filters() {
+        // First, deserializes empty
+        let empty_output = Output {
+            status: ExitStatus::default(),
+            stderr: vec![],
+            stdout: String::from("").into_bytes(),
+        };
+
+        let result: Option<DockerPs> = deserialize_json_output(empty_output).unwrap();
+
+        assert!(result.is_none());
+
+        let full_output = Output {
+                status: ExitStatus::default(),
+                stderr: vec![],
+                stdout: String::from(r#"
+    {
+        "Command": "\"/bin/sh -c 'echo Co…\"",
+        "CreatedAt": "2026-02-04 15:44:21 -0800 PST",
+        "ID": "abdb6ab59573",
+        "Image": "mcr.microsoft.com/devcontainers/base:ubuntu",

crates/dev_container/src/features.rs 🔗

@@ -0,0 +1,254 @@
+use std::{collections::HashMap, path::PathBuf, sync::Arc};
+
+use fs::Fs;
+use serde::Deserialize;
+use serde_json_lenient::Value;
+
+use crate::{
+    devcontainer_api::DevContainerError,
+    devcontainer_json::{FeatureOptions, MountDefinition},
+    safe_id_upper,
+};
+
+/// Parsed components of an OCI feature reference such as
+/// `ghcr.io/devcontainers/features/aws-cli:1`.
+///
+/// Mirrors the CLI's `OCIRef` in `containerCollectionsOCI.ts`.
+#[derive(Debug, Clone)]
+pub(crate) struct OciFeatureRef {
+    /// Registry hostname, e.g. `ghcr.io`
+    pub registry: String,
+    /// Full repository path within the registry, e.g. `devcontainers/features/aws-cli`
+    pub path: String,
+    /// Version tag, digest, or `latest`
+    pub version: String,
+}
+
+/// Minimal representation of a `devcontainer-feature.json` file, used to
+/// extract option default values after the feature tarball is downloaded.
+///
+/// See: https://containers.dev/implementors/features/#devcontainer-featurejson-properties
+#[derive(Debug, Deserialize, Eq, PartialEq, Default)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct DevContainerFeatureJson {
+    #[serde(rename = "id")]
+    pub(crate) _id: Option<String>,
+    #[serde(default)]
+    pub(crate) options: HashMap<String, FeatureOptionDefinition>,
+    pub(crate) mounts: Option<Vec<MountDefinition>>,
+    pub(crate) privileged: Option<bool>,
+    pub(crate) entrypoint: Option<String>,
+    pub(crate) container_env: Option<HashMap<String, String>>,
+}
+
+/// A single option definition inside `devcontainer-feature.json`.
+/// We only need the `default` field to populate env variables.
+#[derive(Debug, Deserialize, Eq, PartialEq)]
+pub(crate) struct FeatureOptionDefinition {
+    pub(crate) default: Option<Value>,
+}
+
+impl FeatureOptionDefinition {
+    fn serialize_default(&self) -> Option<String> {
+        self.default.as_ref().map(|some_value| match some_value {
+            Value::Bool(b) => b.to_string(),
+            Value::String(s) => s.to_string(),
+            Value::Number(n) => n.to_string(),
+            other => other.to_string(),
+        })
+    }
+}
+
+#[derive(Debug, Eq, PartialEq, Default)]
+pub(crate) struct FeatureManifest {
+    consecutive_id: String,
+    file_path: PathBuf,
+    feature_json: DevContainerFeatureJson,
+}
+
+impl FeatureManifest {
+    pub(crate) fn new(
+        consecutive_id: String,
+        file_path: PathBuf,
+        feature_json: DevContainerFeatureJson,
+    ) -> Self {
+        Self {
+            consecutive_id,
+            file_path,
+            feature_json,
+        }
+    }
+    pub(crate) fn container_env(&self) -> HashMap<String, String> {
+        self.feature_json.container_env.clone().unwrap_or_default()
+    }
+
+    pub(crate) fn generate_dockerfile_feature_layer(
+        &self,
+        use_buildkit: bool,
+        dest: &str,
+    ) -> String {
+        let id = &self.consecutive_id;
+        if use_buildkit {
+            format!(
+                r#"
+RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./{id},target=/tmp/build-features-src/{id} \
+cp -ar /tmp/build-features-src/{id} {dest} \
+&& chmod -R 0755 {dest}/{id} \
+&& cd {dest}/{id} \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh \
+&& rm -rf {dest}/{id}
+"#,
+            )
+        } else {
+            let source = format!("/tmp/build-features/{id}");
+            let full_dest = format!("{dest}/{id}");
+            format!(
+                r#"
+COPY --chown=root:root --from=dev_containers_feature_content_source {source} {full_dest}
+RUN chmod -R 0755 {full_dest} \
+&& cd {full_dest} \
+&& chmod +x ./devcontainer-features-install.sh \
+&& ./devcontainer-features-install.sh
+"#
+            )
+        }
+    }
+
+    pub(crate) fn generate_dockerfile_env(&self) -> String {
+        let mut layer = "".to_string();
+        let env = self.container_env();
+        let mut env: Vec<(&String, &String)> = env.iter().collect();
+        env.sort();
+
+        for (key, value) in env {
+            layer = format!("{layer}ENV {key}={value}\n")
+        }
+        layer
+    }
+
+    /// Merges user options from devcontainer.json with default options defined in this feature manifest
+    pub(crate) fn generate_merged_env(&self, options: &FeatureOptions) -> HashMap<String, String> {
+        let mut merged: HashMap<String, String> = self
+            .feature_json
+            .options
+            .iter()
+            .filter_map(|(k, v)| {
+                v.serialize_default()
+                    .map(|v_some| (safe_id_upper(k), v_some))
+            })
+            .collect();
+
+        match options {
+            FeatureOptions::Bool(_) => {}
+            FeatureOptions::String(version) => {
+                merged.insert("VERSION".to_string(), version.clone());
+            }
+            FeatureOptions::Options(map) => {
+                for (key, value) in map {
+                    merged.insert(safe_id_upper(key), value.to_string());
+                }
+            }
+        }
+        merged
+    }
+
+    pub(crate) async fn write_feature_env(
+        &self,
+        fs: &Arc<dyn Fs>,
+        options: &FeatureOptions,
+    ) -> Result<String, DevContainerError> {
+        let merged_env = self.generate_merged_env(options);
+
+        let mut env_vars: Vec<(&String, &String)> = merged_env.iter().collect();
+        env_vars.sort();
+
+        let env_file_content = env_vars
+            .iter()
+            .fold("".to_string(), |acc, (k, v)| format!("{acc}{}={}\n", k, v));
+
+        fs.write(
+            &self.file_path.join("devcontainer-features.env"),
+            env_file_content.as_bytes(),
+        )
+        .await
+        .map_err(|e| {
+            log::error!("error writing devcontainer feature environment: {e}");
+            DevContainerError::FilesystemError
+        })?;
+
+        Ok(env_file_content)
+    }
+
+    pub(crate) fn mounts(&self) -> Vec<MountDefinition> {
+        if let Some(mounts) = &self.feature_json.mounts {
+            mounts.clone()
+        } else {
+            vec![]
+        }
+    }
+
+    pub(crate) fn privileged(&self) -> bool {
+        self.feature_json.privileged.unwrap_or(false)
+    }
+
+    pub(crate) fn entrypoint(&self) -> Option<String> {
+        self.feature_json.entrypoint.clone()
+    }
+
+    pub(crate) fn file_path(&self) -> PathBuf {
+        self.file_path.clone()
+    }
+}
+
+/// Parses an OCI feature reference string into its components.
+///
+/// Handles formats like:
+/// - `ghcr.io/devcontainers/features/aws-cli:1`
+/// - `ghcr.io/user/repo/go`  (implicitly `:latest`)
+/// - `ghcr.io/devcontainers/features/rust@sha256:abc123`
+///
+/// Returns `None` for local paths (`./…`) and direct tarball URIs (`https://…`).
+pub(crate) fn parse_oci_feature_ref(input: &str) -> Option<OciFeatureRef> {
+    if input.starts_with('.')
+        || input.starts_with('/')
+        || input.starts_with("https://")
+        || input.starts_with("http://")
+    {
+        return None;
+    }
+
+    let input_lower = input.to_lowercase();
+
+    let (resource, version) = if let Some(at_idx) = input_lower.rfind('@') {
+        // Digest-based: ghcr.io/foo/bar@sha256:abc
+        (
+            input_lower[..at_idx].to_string(),
+            input_lower[at_idx + 1..].to_string(),
+        )
+    } else {
+        let last_slash = input_lower.rfind('/');
+        let last_colon = input_lower.rfind(':');
+        match (last_slash, last_colon) {
+            (Some(slash), Some(colon)) if colon > slash => (
+                input_lower[..colon].to_string(),
+                input_lower[colon + 1..].to_string(),
+            ),
+            _ => (input_lower, "latest".to_string()),
+        }
+    };
+
+    let parts: Vec<&str> = resource.split('/').collect();
+    if parts.len() < 3 {
+        return None;
+    }
+
+    let registry = parts[0].to_string();
+    let path = parts[1..].join("/");
+
+    Some(OciFeatureRef {
+        registry,
+        path,
+        version,
+    })
+}

crates/dev_container/src/lib.rs 🔗

@@ -1,11 +1,14 @@
 use std::path::Path;
 
+use fs::Fs;
 use gpui::AppContext;
 use gpui::Entity;
 use gpui::Task;
+use gpui::WeakEntity;
 use http_client::anyhow;
 use picker::Picker;
 use picker::PickerDelegate;
+use project::ProjectEnvironment;
 use settings::RegisterSetting;
 use settings::Settings;
 use std::collections::HashMap;
@@ -25,8 +28,9 @@ use ui::Tooltip;
 use ui::h_flex;
 use ui::rems_from_px;
 use ui::v_flex;
+use util::shell::Shell;
 
-use gpui::{Action, DismissEvent, EventEmitter, FocusHandle, Focusable, RenderOnce, WeakEntity};
+use gpui::{Action, DismissEvent, EventEmitter, FocusHandle, Focusable, RenderOnce};
 use serde::Deserialize;
 use ui::{
     AnyElement, App, Color, CommonAnimationExt, Context, Headline, HeadlineSize, Icon, IconName,
@@ -37,40 +41,94 @@ use util::ResultExt;
 use util::rel_path::RelPath;
 use workspace::{ModalView, Workspace, with_active_or_new_workspace};
 
-use futures::AsyncReadExt;
-use http::Request;
-use http_client::{AsyncBody, HttpClient};
+use http_client::HttpClient;
 
+mod command_json;
 mod devcontainer_api;
+mod devcontainer_json;
+mod devcontainer_manifest;
+mod docker;
+mod features;
+mod oci;
 
-use devcontainer_api::ensure_devcontainer_cli;
-use devcontainer_api::read_devcontainer_configuration;
+use devcontainer_api::read_default_devcontainer_configuration;
 
 use crate::devcontainer_api::DevContainerError;
-use crate::devcontainer_api::apply_dev_container_template;
+use crate::devcontainer_api::apply_devcontainer_template;
+use crate::oci::get_deserializable_oci_blob;
+use crate::oci::get_latest_oci_manifest;
+use crate::oci::get_oci_token;
 
 pub use devcontainer_api::{
     DevContainerConfig, find_configs_in_snapshot, find_devcontainer_configs,
     start_dev_container_with_config,
 };
 
+/// Converts a string to a safe environment variable name.
+///
+/// Mirrors the CLI's `getSafeId` in `containerFeatures.ts`:
+/// replaces non-alphanumeric/underscore characters with `_`, replaces a
+/// leading sequence of digits/underscores with a single `_`, and uppercases.
+pub(crate) fn safe_id_lower(input: &str) -> String {
+    get_safe_id(input).to_lowercase()
+}
+pub(crate) fn safe_id_upper(input: &str) -> String {
+    get_safe_id(input).to_uppercase()
+}
+fn get_safe_id(input: &str) -> String {
+    let replaced: String = input
+        .chars()
+        .map(|c| {
+            if c.is_alphanumeric() || c == '_' {
+                c
+            } else {
+                '_'
+            }
+        })
+        .collect();
+    let without_leading = replaced.trim_start_matches(|c: char| c.is_ascii_digit() || c == '_');
+    let result = if without_leading.len() < replaced.len() {
+        format!("_{}", without_leading)
+    } else {
+        replaced
+    };
+    result
+}
+
 pub struct DevContainerContext {
     pub project_directory: Arc<Path>,
     pub use_podman: bool,
-    pub node_runtime: node_runtime::NodeRuntime,
+    pub fs: Arc<dyn Fs>,
+    pub http_client: Arc<dyn HttpClient>,
+    pub environment: WeakEntity<ProjectEnvironment>,
 }
 
 impl DevContainerContext {
     pub fn from_workspace(workspace: &Workspace, cx: &App) -> Option<Self> {
         let project_directory = workspace.project().read(cx).active_project_directory(cx)?;
         let use_podman = DevContainerSettings::get_global(cx).use_podman;
-        let node_runtime = workspace.app_state().node_runtime.clone();
+        let http_client = cx.http_client().clone();
+        let fs = workspace.app_state().fs.clone();
+        let environment = workspace.project().read(cx).environment().downgrade();
         Some(Self {
             project_directory,
             use_podman,
-            node_runtime,
+            fs,
+            http_client,
+            environment,
         })
     }
+
+    pub async fn environment(&self, cx: &mut impl AppContext) -> HashMap<String, String> {
+        let Ok(task) = self.environment.update(cx, |this, cx| {
+            this.local_directory_environment(&Shell::System, self.project_directory.clone(), cx)
+        }) else {
+            return HashMap::default();
+        };
+        task.await
+            .map(|env| env.into_iter().collect::<std::collections::HashMap<_, _>>())
+            .unwrap_or_default()
+    }
 }
 
 #[derive(RegisterSetting)]
@@ -1043,7 +1101,7 @@ impl StatefulModal for DevContainerModal {
                     let Ok(client) = cx.update(|_, cx| cx.http_client()) else {
                         return;
                     };
-                    match get_templates(client).await {
+                    match get_ghcr_templates(client).await {
                         Ok(templates) => {
                             let message =
                                 DevContainerMessage::TemplatesRetrieved(templates.templates);
@@ -1209,7 +1267,7 @@ impl StatefulModal for DevContainerModal {
                     let Ok(client) = cx.update(|_, cx| cx.http_client()) else {
                         return;
                     };
-                    let Some(features) = get_features(client).await.log_err() else {
+                    let Some(features) = get_ghcr_features(client).await.log_err() else {
                         return;
                     };
                     let message = DevContainerMessage::FeaturesRetrieved(features.features);
@@ -1328,17 +1386,7 @@ trait StatefulModal: ModalView + EventEmitter<DismissEvent> + Render {
     }
 }
 
-#[derive(Debug, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct GithubTokenResponse {
-    token: String,
-}
-
-fn ghcr_url() -> &'static str {
-    "https://ghcr.io"
-}
-
-fn ghcr_domain() -> &'static str {
+fn ghcr_registry() -> &'static str {
     "ghcr.io"
 }
 
@@ -1350,11 +1398,6 @@ fn devcontainer_features_repository() -> &'static str {
     "devcontainers/features"
 }
 
-#[derive(Debug, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct ManifestLayer {
-    digest: String,
-}
 #[derive(Debug, Deserialize, Clone, PartialEq, Eq)]
 #[serde(rename_all = "camelCase")]
 struct TemplateOptions {
@@ -1409,12 +1452,6 @@ impl TemplateOptions {
     }
 }
 
-#[derive(Debug, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct DockerManifestsResponse {
-    layers: Vec<ManifestLayer>,
-}
-
 #[derive(Debug, Deserialize, Clone, PartialEq, Eq, Hash)]
 #[serde(rename_all = "camelCase")]
 struct DevContainerFeature {
@@ -1480,23 +1517,11 @@ fn dispatch_apply_templates(
             return;
         };
 
-        let Ok(cli) = ensure_devcontainer_cli(&context.node_runtime).await else {
-            this.update_in(cx, |this, window, cx| {
-                this.accept_message(
-                    DevContainerMessage::FailedToWriteTemplate(
-                        DevContainerError::DevContainerCliNotAvailable,
-                    ),
-                    window,
-                    cx,
-                );
-            })
-            .log_err();
-            return;
-        };
+        let environment = context.environment(cx).await;
 
         {
             if check_for_existing
-                && read_devcontainer_configuration(&context, &cli, None)
+                && read_default_devcontainer_configuration(&context, environment)
                     .await
                     .is_ok()
             {
@@ -1511,12 +1536,17 @@ fn dispatch_apply_templates(
                 return;
             }
 
-            let files = match apply_dev_container_template(
+            let worktree = workspace.read_with(cx, |workspace, cx| {
+                workspace.project().read(cx).worktree_for_id(tree_id, cx)
+            });
+
+            let files = match apply_devcontainer_template(
+                worktree.unwrap(),
                 &template_entry.template,
                 &template_entry.options_selected,
                 &template_entry.features_selected,
                 &context,
-                &cli,
+                cx,
             )
             .await
             {
@@ -1524,7 +1554,9 @@ fn dispatch_apply_templates(
                 Err(e) => {
                     this.update_in(cx, |this, window, cx| {
                         this.accept_message(
-                            DevContainerMessage::FailedToWriteTemplate(e),
+                            DevContainerMessage::FailedToWriteTemplate(
+                                DevContainerError::DevContainerTemplateApplyFailed(e.to_string()),
+                            ),
                             window,
                             cx,
                         );
@@ -1534,10 +1566,9 @@ fn dispatch_apply_templates(
                 }
             };
 
-            if files
-                .files
-                .contains(&"./.devcontainer/devcontainer.json".to_string())
-            {
+            if files.project_files.contains(&Arc::from(
+                RelPath::unix(".devcontainer/devcontainer.json").unwrap(),
+            )) {
                 let Some(workspace_task) = workspace
                     .update_in(cx, |workspace, window, cx| {
                         let Ok(path) = RelPath::unix(".devcontainer/devcontainer.json") else {
@@ -1563,250 +1594,90 @@ fn dispatch_apply_templates(
     .detach();
 }
 
-async fn get_templates(
+async fn get_ghcr_templates(
     client: Arc<dyn HttpClient>,
 ) -> Result<DevContainerTemplatesResponse, String> {
-    let token = get_ghcr_token(&client).await?;
-    let manifest = get_latest_manifest(&token.token, &client).await?;
-
-    let mut template_response =
-        get_devcontainer_templates(&token.token, &manifest.layers[0].digest, &client).await?;
+    let token = get_oci_token(
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &client,
+    )
+    .await?;
+    let manifest = get_latest_oci_manifest(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &client,
+        None,
+    )
+    .await?;
+
+    let mut template_response: DevContainerTemplatesResponse = get_deserializable_oci_blob(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &manifest.layers[0].digest,
+        &client,
+    )
+    .await?;
 
     for template in &mut template_response.templates {
         template.source_repository = Some(format!(
             "{}/{}",
-            ghcr_domain(),
+            ghcr_registry(),
             devcontainer_templates_repository()
         ));
     }
     Ok(template_response)
 }
 
-async fn get_features(client: Arc<dyn HttpClient>) -> Result<DevContainerFeaturesResponse, String> {
-    let token = get_ghcr_token(&client).await?;
-    let manifest = get_latest_feature_manifest(&token.token, &client).await?;
+async fn get_ghcr_features(
+    client: Arc<dyn HttpClient>,
+) -> Result<DevContainerFeaturesResponse, String> {
+    let token = get_oci_token(
+        ghcr_registry(),
+        devcontainer_templates_repository(),
+        &client,
+    )
+    .await?;
 
-    let mut features_response =
-        get_devcontainer_features(&token.token, &manifest.layers[0].digest, &client).await?;
+    let manifest = get_latest_oci_manifest(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_features_repository(),
+        &client,
+        None,
+    )
+    .await?;
+
+    let mut features_response: DevContainerFeaturesResponse = get_deserializable_oci_blob(
+        &token.token,
+        ghcr_registry(),
+        devcontainer_features_repository(),
+        &manifest.layers[0].digest,
+        &client,
+    )
+    .await?;
 
     for feature in &mut features_response.features {
         feature.source_repository = Some(format!(
             "{}/{}",
-            ghcr_domain(),
+            ghcr_registry(),
             devcontainer_features_repository()
         ));
     }
     Ok(features_response)
 }
 
-async fn get_ghcr_token(client: &Arc<dyn HttpClient>) -> Result<GithubTokenResponse, String> {
-    let url = format!(
-        "{}/token?service=ghcr.io&scope=repository:{}:pull",
-        ghcr_url(),
-        devcontainer_templates_repository()
-    );
-    get_deserialized_response("", &url, client).await
-}
-
-async fn get_latest_feature_manifest(
-    token: &str,
-    client: &Arc<dyn HttpClient>,
-) -> Result<DockerManifestsResponse, String> {
-    let url = format!(
-        "{}/v2/{}/manifests/latest",
-        ghcr_url(),
-        devcontainer_features_repository()
-    );
-    get_deserialized_response(token, &url, client).await
-}
-
-async fn get_latest_manifest(
-    token: &str,
-    client: &Arc<dyn HttpClient>,
-) -> Result<DockerManifestsResponse, String> {
-    let url = format!(
-        "{}/v2/{}/manifests/latest",
-        ghcr_url(),
-        devcontainer_templates_repository()
-    );
-    get_deserialized_response(token, &url, client).await
-}
-
-async fn get_devcontainer_features(
-    token: &str,
-    blob_digest: &str,
-    client: &Arc<dyn HttpClient>,
-) -> Result<DevContainerFeaturesResponse, String> {
-    let url = format!(
-        "{}/v2/{}/blobs/{}",
-        ghcr_url(),
-        devcontainer_features_repository(),
-        blob_digest
-    );
-    get_deserialized_response(token, &url, client).await
-}
-
-async fn get_devcontainer_templates(
-    token: &str,
-    blob_digest: &str,
-    client: &Arc<dyn HttpClient>,
-) -> Result<DevContainerTemplatesResponse, String> {
-    let url = format!(
-        "{}/v2/{}/blobs/{}",
-        ghcr_url(),
-        devcontainer_templates_repository(),
-        blob_digest
-    );
-    get_deserialized_response(token, &url, client).await
-}
-
-async fn get_deserialized_response<T>(
-    token: &str,
-    url: &str,
-    client: &Arc<dyn HttpClient>,
-) -> Result<T, String>
-where
-    T: for<'de> Deserialize<'de>,
-{
-    let request = match Request::get(url)
-        .header("Authorization", format!("Bearer {}", token))
-        .header("Accept", "application/vnd.oci.image.manifest.v1+json")
-        .body(AsyncBody::default())
-    {
-        Ok(request) => request,
-        Err(e) => return Err(format!("Failed to create request: {}", e)),
-    };
-    let response = match client.send(request).await {
-        Ok(response) => response,
-        Err(e) => {
-            return Err(format!("Failed to send request: {}", e));
-        }
-    };
-
-    let mut output = String::new();
-
-    if let Err(e) = response.into_body().read_to_string(&mut output).await {
-        return Err(format!("Failed to read response body: {}", e));
-    };
-
-    match serde_json::from_str(&output) {
-        Ok(response) => Ok(response),
-        Err(e) => Err(format!("Failed to deserialize response: {}", e)),
-    }
-}
-
 #[cfg(test)]
 mod tests {
-    use gpui::TestAppContext;
     use http_client::{FakeHttpClient, anyhow};
 
     use crate::{
-        GithubTokenResponse, devcontainer_templates_repository, get_deserialized_response,
-        get_devcontainer_templates, get_ghcr_token, get_latest_manifest,
+        DevContainerTemplatesResponse, devcontainer_templates_repository,
+        get_deserializable_oci_blob, ghcr_registry,
     };
 
-    #[gpui::test]
-    async fn test_get_deserialized_response(_cx: &mut TestAppContext) {
-        let client = FakeHttpClient::create(|_request| async move {
-            Ok(http_client::Response::builder()
-                .status(200)
-                .body("{ \"token\": \"thisisatoken\" }".into())
-                .unwrap())
-        });
-
-        let response =
-            get_deserialized_response::<GithubTokenResponse>("", "https://ghcr.io/token", &client)
-                .await;
-        assert!(response.is_ok());
-        assert_eq!(response.unwrap().token, "thisisatoken".to_string())
-    }
-
-    #[gpui::test]
-    async fn test_get_ghcr_token() {
-        let client = FakeHttpClient::create(|request| async move {
-            let host = request.uri().host();
-            if host.is_none() || host.unwrap() != "ghcr.io" {
-                return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
-            }
-            let path = request.uri().path();
-            if path != "/token" {
-                return Err(anyhow!("Unexpected path: {}", path));
-            }
-            let query = request.uri().query();
-            if query.is_none()
-                || query.unwrap()
-                    != format!(
-                        "service=ghcr.io&scope=repository:{}:pull",
-                        devcontainer_templates_repository()
-                    )
-            {
-                return Err(anyhow!("Unexpected query: {}", query.unwrap_or_default()));
-            }
-            Ok(http_client::Response::builder()
-                .status(200)
-                .body("{ \"token\": \"thisisatoken\" }".into())
-                .unwrap())
-        });
-
-        let response = get_ghcr_token(&client).await;
-        assert!(response.is_ok());
-        assert_eq!(response.unwrap().token, "thisisatoken".to_string());
-    }
-
-    #[gpui::test]
-    async fn test_get_latest_manifests() {
-        let client = FakeHttpClient::create(|request| async move {
-            let host = request.uri().host();
-            if host.is_none() || host.unwrap() != "ghcr.io" {
-                return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
-            }
-            let path = request.uri().path();
-            if path
-                != format!(
-                    "/v2/{}/manifests/latest",
-                    devcontainer_templates_repository()
-                )
-            {
-                return Err(anyhow!("Unexpected path: {}", path));
-            }
-            Ok(http_client::Response::builder()
-                .status(200)
-                .body("{
-                    \"schemaVersion\": 2,
-                    \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",
-                    \"config\": {
-                        \"mediaType\": \"application/vnd.devcontainers\",
-                        \"digest\": \"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a\",
-                        \"size\": 2
-                    },
-                    \"layers\": [
-                        {
-                            \"mediaType\": \"application/vnd.devcontainers.collection.layer.v1+json\",
-                            \"digest\": \"sha256:035e9c9fd9bd61f6d3965fa4bf11f3ddfd2490a8cf324f152c13cc3724d67d09\",
-                            \"size\": 65235,
-                            \"annotations\": {
-                                \"org.opencontainers.image.title\": \"devcontainer-collection.json\"
-                            }
-                        }
-                    ],
-                    \"annotations\": {
-                        \"com.github.package.type\": \"devcontainer_collection\"
-                    }
-                }".into())
-                .unwrap())
-        });
-
-        let response = get_latest_manifest("", &client).await;
-        assert!(response.is_ok());
-        let response = response.unwrap();
-
-        assert_eq!(response.layers.len(), 1);
-        assert_eq!(
-            response.layers[0].digest,
-            "sha256:035e9c9fd9bd61f6d3965fa4bf11f3ddfd2490a8cf324f152c13cc3724d67d09"
-        );
-    }
-
     #[gpui::test]
     async fn test_get_devcontainer_templates() {
         let client = FakeHttpClient::create(|request| async move {
@@ -1872,8 +1743,10 @@ mod tests {
                 }".into())
                 .unwrap())
         });
-        let response = get_devcontainer_templates(
+        let response: Result<DevContainerTemplatesResponse, String> = get_deserializable_oci_blob(
             "",
+            ghcr_registry(),
+            devcontainer_templates_repository(),
             "sha256:035e9c9fd9bd61f6d3965fa4bf11f3ddfd2490a8cf324f152c13cc3724d67d09",
             &client,
         )

crates/dev_container/src/oci.rs 🔗

@@ -0,0 +1,470 @@
+use std::{path::PathBuf, pin::Pin, sync::Arc};
+
+use fs::Fs;
+use futures::{AsyncRead, AsyncReadExt, io::BufReader};
+use http::Request;
+use http_client::{AsyncBody, HttpClient};
+use serde::{Deserialize, Serialize};
+
+use crate::devcontainer_api::DevContainerError;
+
+#[derive(Debug, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct TokenResponse {
+    pub(crate) token: String,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct DockerManifestsResponse {
+    pub(crate) layers: Vec<ManifestLayer>,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub(crate) struct ManifestLayer {
+    pub(crate) digest: String,
+}
+
+/// Gets a bearer token for pulling from a container registry repository.
+///
+/// This uses the registry's `/token` endpoint directly, which works for
+/// `ghcr.io` and other registries that follow the same convention.  For
+/// registries that require a full `WWW-Authenticate` negotiation flow this
+/// would need to be extended.
+pub(crate) async fn get_oci_token(
+    registry: &str,
+    repository_path: &str,
+    client: &Arc<dyn HttpClient>,
+) -> Result<TokenResponse, String> {
+    let url = format!(
+        "https://{registry}/token?service={registry}&scope=repository:{repository_path}:pull",
+    );
+    log::debug!("Fetching OCI token from: {}", url);
+    get_deserialized_response("", &url, client)
+        .await
+        .map_err(|e| {
+            log::error!("OCI token request failed for {}: {e}", url);
+            e
+        })
+}
+
+pub(crate) async fn get_latest_oci_manifest(
+    token: &str,
+    registry: &str,
+    repository_path: &str,
+    client: &Arc<dyn HttpClient>,
+    id: Option<&str>,
+) -> Result<DockerManifestsResponse, String> {
+    get_oci_manifest(registry, repository_path, token, client, "latest", id).await
+}
+
+pub(crate) async fn get_oci_manifest(
+    registry: &str,
+    repository_path: &str,
+    token: &str,
+    client: &Arc<dyn HttpClient>,
+    version: &str,
+    id: Option<&str>,
+) -> Result<DockerManifestsResponse, String> {
+    let url = match id {
+        Some(id) => format!("https://{registry}/v2/{repository_path}/{id}/manifests/{version}"),
+        None => format!("https://{registry}/v2/{repository_path}/manifests/{version}"),
+    };
+
+    get_deserialized_response(token, &url, client).await
+}
+
+pub(crate) async fn get_deserializable_oci_blob<T>(
+    token: &str,
+    registry: &str,
+    repository_path: &str,
+    blob_digest: &str,
+    client: &Arc<dyn HttpClient>,
+) -> Result<T, String>
+where
+    T: for<'a> Deserialize<'a>,
+{
+    let url = format!("https://{registry}/v2/{repository_path}/blobs/{blob_digest}");
+    get_deserialized_response(token, &url, client).await
+}
+
+pub(crate) async fn download_oci_tarball(
+    token: &str,
+    registry: &str,
+    repository_path: &str,
+    blob_digest: &str,
+    accept_header: &str,
+    dest_dir: &PathBuf,
+    client: &Arc<dyn HttpClient>,
+    fs: &Arc<dyn Fs>,
+    id: Option<&str>,
+) -> Result<(), DevContainerError> {
+    let url = match id {
+        Some(id) => format!("https://{registry}/v2/{repository_path}/{id}/blobs/{blob_digest}"),
+        None => format!("https://{registry}/v2/{repository_path}/blobs/{blob_digest}"),
+    };
+
+    let request = Request::get(&url)
+        .header("Authorization", format!("Bearer {}", token))
+        .header("Accept", accept_header)
+        .body(AsyncBody::default())
+        .map_err(|e| {
+            log::error!("Failed to create blob request: {e}");
+            DevContainerError::ResourceFetchFailed
+        })?;
+
+    let mut response = client.send(request).await.map_err(|e| {
+        log::error!("Failed to download feature blob: {e}");
+        DevContainerError::ResourceFetchFailed
+    })?;
+    let status = response.status();
+
+    let body = BufReader::new(response.body_mut());
+
+    if !status.is_success() {
+        let body_text = String::from_utf8_lossy(body.buffer());
+        log::error!(
+            "Feature blob download returned HTTP {}: {}",
+            status.as_u16(),
+            body_text,
+        );
+        return Err(DevContainerError::ResourceFetchFailed);
+    }
+
+    futures::pin_mut!(body);
+    let body: Pin<&mut (dyn AsyncRead + Send)> = body;
+    let archive = async_tar::Archive::new(body);
+    fs.extract_tar_file(dest_dir, archive).await.map_err(|e| {
+        log::error!("Failed to extract feature tarball: {e}");
+        DevContainerError::FilesystemError
+    })?;
+
+    Ok(())
+}
+
+pub(crate) async fn get_deserialized_response<T>(
+    token: &str,
+    url: &str,
+    client: &Arc<dyn HttpClient>,
+) -> Result<T, String>
+where
+    T: for<'de> Deserialize<'de>,
+{
+    let request = match Request::get(url)
+        .header("Authorization", format!("Bearer {}", token))
+        .header("Accept", "application/vnd.oci.image.manifest.v1+json")
+        .body(AsyncBody::default())
+    {
+        Ok(request) => request,
+        Err(e) => return Err(format!("Failed to create request: {}", e)),
+    };
+    let response = match client.send(request).await {
+        Ok(response) => response,
+        Err(e) => {
+            return Err(format!("Failed to send request to {}: {}", url, e));
+        }
+    };
+
+    let status = response.status();
+    let mut output = String::new();
+
+    if let Err(e) = response.into_body().read_to_string(&mut output).await {
+        return Err(format!("Failed to read response body from {}: {}", url, e));
+    };
+
+    if !status.is_success() {
+        return Err(format!(
+            "OCI request to {} returned HTTP {}: {}",
+            url,
+            status.as_u16(),
+            &output[..output.len().min(500)],
+        ));
+    }
+
+    match serde_json_lenient::from_str(&output) {
+        Ok(response) => Ok(response),
+        Err(e) => Err(format!(
+            "Failed to deserialize response from {}: {} (body: {})",
+            url,
+            e,
+            &output[..output.len().min(500)],
+        )),
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use std::{path::PathBuf, sync::Arc};
+
+    use fs::{FakeFs, Fs};
+    use gpui::TestAppContext;
+    use http_client::{FakeHttpClient, anyhow};
+    use serde::Deserialize;
+
+    use crate::oci::{
+        TokenResponse, download_oci_tarball, get_deserializable_oci_blob,
+        get_deserialized_response, get_latest_oci_manifest, get_oci_token,
+    };
+
+    async fn build_test_tarball() -> Vec<u8> {
+        let devcontainer_json = concat!(
+            "// For format details, see https://aka.ms/devcontainer.json. For config options, see the\n",
+            "// README at: https://github.com/devcontainers/templates/tree/main/src/alpine\n",
+            "{\n",
+            "\t\"name\": \"Alpine\",\n",
+            "\t// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile\n",
+            "\t\"image\": \"mcr.microsoft.com/devcontainers/base:alpine-${templateOption:imageVariant}\"\n",
+            "}\n",
+        );
+
+        let dependabot_yml = concat!(
+            "version: 2\n",
+            "updates:\n",
+            " - package-ecosystem: \"devcontainers\"\n",
+            "   directory: \"/\"\n",
+            "   schedule:\n",
+            "     interval: weekly\n",
+        );
+
+        let buffer = futures::io::Cursor::new(Vec::new());
+        let mut builder = async_tar::Builder::new(buffer);
+
+        let files: &[(&str, &[u8], u32)] = &[
+            (
+                ".devcontainer/devcontainer.json",
+                devcontainer_json.as_bytes(),
+                0o644,
+            ),
+            (".github/dependabot.yml", dependabot_yml.as_bytes(), 0o644),
+            ("NOTES.md", b"Some notes", 0o644),
+            ("README.md", b"# Alpine\n", 0o644),
+        ];
+
+        for (path, data, mode) in files {
+            let mut header = async_tar::Header::new_gnu();
+            header.set_size(data.len() as u64);
+            header.set_mode(*mode);
+            header.set_entry_type(async_tar::EntryType::Regular);
+            header.set_cksum();
+            builder.append_data(&mut header, path, *data).await.unwrap();
+        }
+
+        let buffer = builder.into_inner().await.unwrap();
+        buffer.into_inner()
+    }
+    fn test_oci_registry() -> &'static str {
+        "ghcr.io"
+    }
+    fn test_oci_repository() -> &'static str {
+        "repository"
+    }
+
+    #[gpui::test]
+    async fn test_get_deserialized_response(_cx: &mut TestAppContext) {
+        let client = FakeHttpClient::create(|_request| async move {
+            Ok(http_client::Response::builder()
+                .status(200)
+                .body("{ \"token\": \"thisisatoken\" }".into())
+                .unwrap())
+        });
+
+        let response =
+            get_deserialized_response::<TokenResponse>("", "https://ghcr.io/token", &client).await;
+        assert!(response.is_ok());
+        assert_eq!(response.unwrap().token, "thisisatoken".to_string())
+    }
+
+    #[gpui::test]
+    async fn test_get_oci_token() {
+        let client = FakeHttpClient::create(|request| async move {
+            let host = request.uri().host();
+            if host.is_none() || host.unwrap() != test_oci_registry() {
+                return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
+            }
+            let path = request.uri().path();
+            if path != "/token" {
+                return Err(anyhow!("Unexpected path: {}", path));
+            }
+            let query = request.uri().query();
+            if query.is_none()
+                || query.unwrap()
+                    != format!(
+                        "service=ghcr.io&scope=repository:{}:pull",
+                        test_oci_repository()
+                    )
+            {
+                return Err(anyhow!("Unexpected query: {}", query.unwrap_or_default()));
+            }
+            Ok(http_client::Response::builder()
+                .status(200)
+                .body("{ \"token\": \"thisisatoken\" }".into())
+                .unwrap())
+        });
+
+        let response = get_oci_token(test_oci_registry(), test_oci_repository(), &client).await;
+
+        assert!(response.is_ok());
+        assert_eq!(response.unwrap().token, "thisisatoken".to_string());
+    }
+
+    #[gpui::test]
+    async fn test_get_latest_manifests() {
+        let client = FakeHttpClient::create(|request| async move {
+            let host = request.uri().host();
+            if host.is_none() || host.unwrap() != test_oci_registry() {
+                return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
+            }
+            let path = request.uri().path();
+            if path != format!("/v2/{}/manifests/latest", test_oci_repository()) {
+                return Err(anyhow!("Unexpected path: {}", path));
+            }
+            Ok(http_client::Response::builder()
+                .status(200)
+                .body("{
+                    \"schemaVersion\": 2,
+                    \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\",
+                    \"config\": {
+                        \"mediaType\": \"application/vnd.devcontainers\",
+                        \"digest\": \"sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a\",
+                        \"size\": 2
+                    },
+                    \"layers\": [
+                        {
+                            \"mediaType\": \"application/vnd.devcontainers.collection.layer.v1+json\",
+                            \"digest\": \"sha256:035e9c9fd9bd61f6d3965fa4bf11f3ddfd2490a8cf324f152c13cc3724d67d09\",
+                            \"size\": 65235,
+                            \"annotations\": {
+                                \"org.opencontainers.image.title\": \"devcontainer-collection.json\"
+                            }
+                        }
+                    ],
+                    \"annotations\": {
+                        \"com.github.package.type\": \"devcontainer_collection\"
+                    }
+                }".into())
+                .unwrap())
+        });
+
+        let response = get_latest_oci_manifest(
+            "",
+            test_oci_registry(),
+            test_oci_repository(),
+            &client,
+            None,
+        )
+        .await;
+        assert!(response.is_ok());
+        let response = response.unwrap();
+
+        assert_eq!(response.layers.len(), 1);
+        assert_eq!(
+            response.layers[0].digest,
+            "sha256:035e9c9fd9bd61f6d3965fa4bf11f3ddfd2490a8cf324f152c13cc3724d67d09"
+        );
+    }
+
+    #[gpui::test]
+    async fn test_get_oci_blob() {
+        #[derive(Debug, Deserialize)]
+        struct DeserializableTestStruct {
+            foo: String,
+        }
+
+        let client = FakeHttpClient::create(|request| async move {
+            let host = request.uri().host();
+            if host.is_none() || host.unwrap() != test_oci_registry() {
+                return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
+            }
+            let path = request.uri().path();
+            if path != format!("/v2/{}/blobs/blobdigest", test_oci_repository()) {
+                return Err(anyhow!("Unexpected path: {}", path));
+            }
+            Ok(http_client::Response::builder()
+                .status(200)
+                .body(
+                    r#"
+                    {
+                        "foo": "bar"
+                    }
+                    "#
+                    .into(),
+                )
+                .unwrap())
+        });
+
+        let response: Result<DeserializableTestStruct, String> = get_deserializable_oci_blob(
+            "",
+            test_oci_registry(),
+            test_oci_repository(),
+            "blobdigest",
+            &client,
+        )
+        .await;
+        assert!(response.is_ok());
+        let response = response.unwrap();
+
+        assert_eq!(response.foo, "bar".to_string());
+    }
+
+    #[gpui::test]
+    async fn test_download_oci_tarball(cx: &mut TestAppContext) {
+        cx.executor().allow_parking();
+        let fs: Arc<dyn Fs> = FakeFs::new(cx.executor());
+
+        let destination_dir = PathBuf::from("/tmp/extracted");
+        fs.create_dir(&destination_dir).await.unwrap();
+
+        let tarball_bytes = build_test_tarball().await;
+        let tarball = std::sync::Arc::new(tarball_bytes);
+
+        let client = FakeHttpClient::create(move |request| {
+            let tarball = tarball.clone();
+            async move {
+                let host = request.uri().host();
+                if host.is_none() || host.unwrap() != test_oci_registry() {
+                    return Err(anyhow!("Unexpected host: {}", host.unwrap_or_default()));
+                }
+                let path = request.uri().path();
+                if path != format!("/v2/{}/blobs/blobdigest", test_oci_repository()) {
+                    return Err(anyhow!("Unexpected path: {}", path));
+                }
+                Ok(http_client::Response::builder()
+                    .status(200)
+                    .body(tarball.to_vec().into())
+                    .unwrap())
+            }
+        });
+
+        let response = download_oci_tarball(
+            "",
+            test_oci_registry(),
+            test_oci_repository(),
+            "blobdigest",
+            "header",
+            &destination_dir,
+            &client,
+            &fs,
+            None,
+        )
+        .await;
+        assert!(response.is_ok());
+
+        let expected_devcontainer_json = concat!(
+            "// For format details, see https://aka.ms/devcontainer.json. For config options, see the\n",
+            "// README at: https://github.com/devcontainers/templates/tree/main/src/alpine\n",
+            "{\n",
+            "\t\"name\": \"Alpine\",\n",
+            "\t// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile\n",
+            "\t\"image\": \"mcr.microsoft.com/devcontainers/base:alpine-${templateOption:imageVariant}\"\n",
+            "}\n",
+        );
+
+        assert_eq!(
+            fs.load(&destination_dir.join(".devcontainer/devcontainer.json"))
+                .await
+                .unwrap(),
+            expected_devcontainer_json
+        )
+    }
+}

crates/recent_projects/src/recent_projects.rs 🔗

@@ -2068,9 +2068,16 @@ mod tests {
             )
             .await;
 
+        // Open a file path (not a directory) so that the worktree root is a
+        // file. This means `active_project_directory` returns `None`, which
+        // causes `DevContainerContext::from_workspace` to return `None`,
+        // preventing `open_dev_container` from spawning real I/O (docker
+        // commands, shell environment loading) that is incompatible with the
+        // test scheduler. The modal is still created and the re-entrancy
+        // guard that this test validates is still exercised.
         cx.update(|cx| {
             open_paths(
-                &[PathBuf::from(path!("/project"))],
+                &[PathBuf::from(path!("/project/src/main.rs"))],
                 app_state,
                 workspace::OpenOptions::default(),
                 cx,

crates/recent_projects/src/remote_connections.rs 🔗

@@ -96,6 +96,7 @@ impl From<Connection> for RemoteConnectionOptions {
                     container_id: conn.container_id,
                     upload_binary_over_docker_exec: false,
                     use_podman: conn.use_podman,
+                    remote_env: conn.remote_env,
                 })
             }
         }

crates/recent_projects/src/remote_servers.rs 🔗

@@ -11,6 +11,7 @@ use dev_container::{
 };
 use editor::Editor;
 
+use extension_host::ExtensionStore;
 use futures::{FutureExt, channel::oneshot, future::Shared};
 use gpui::{
     Action, AnyElement, App, ClickEvent, ClipboardItem, Context, DismissEvent, Entity,
@@ -41,6 +42,7 @@ use std::{
         atomic::{self, AtomicUsize},
     },
 };
+
 use ui::{
     CommonAnimationExt, IconButtonShape, KeyBinding, List, ListItem, ListSeparator, Modal,
     ModalFooter, ModalHeader, Navigable, NavigableEntry, Section, Tooltip, WithScrollbar,
@@ -1854,10 +1856,13 @@ impl RemoteServerProjects {
     ) {
         let replace_window = window.window_handle().downcast::<MultiWorkspace>();
         let app_state = Arc::downgrade(&app_state);
+
         cx.spawn_in(window, async move |entity, cx| {
-            let (connection, starting_dir) =
-                match start_dev_container_with_config(context, config).await {
-                    Ok((c, s)) => (Connection::DevContainer(c), s),
+            let environment = context.environment(cx).await;
+
+            let (dev_container_connection, starting_dir) =
+                match start_dev_container_with_config(context, config, environment).await {
+                    Ok((c, s)) => (c, s),
                     Err(e) => {
                         log::error!("Failed to start dev container: {:?}", e);
                         cx.prompt(
@@ -1881,6 +1886,16 @@ impl RemoteServerProjects {
                         return;
                     }
                 };
+            cx.update(|_, cx| {
+                ExtensionStore::global(cx).update(cx, |this, cx| {
+                    for extension in &dev_container_connection.extension_ids {
+                        log::info!("Installing extension {extension} from devcontainer");
+                        this.install_latest_extension(Arc::from(extension.clone()), cx);
+                    }
+                })
+            })
+            .log_err();
+
             entity
                 .update(cx, |_, cx| {
                     cx.emit(DismissEvent);
@@ -1891,7 +1906,7 @@ impl RemoteServerProjects {
                 return;
             };
             let result = open_remote_project(
-                connection.into(),
+                Connection::DevContainer(dev_container_connection).into(),
                 vec![starting_dir].into_iter().map(PathBuf::from).collect(),
                 app_state,
                 OpenOptions {

crates/remote/src/transport/docker.rs 🔗

@@ -6,6 +6,7 @@ use collections::HashMap;
 use parking_lot::Mutex;
 use release_channel::{AppCommitSha, AppVersion, ReleaseChannel};
 use semver::Version as SemanticVersion;
+use std::collections::BTreeMap;
 use std::time::Instant;
 use std::{
     path::{Path, PathBuf},
@@ -36,6 +37,7 @@ pub struct DockerConnectionOptions {
     pub remote_user: String,
     pub upload_binary_over_docker_exec: bool,
     pub use_podman: bool,
+    pub remote_env: BTreeMap<String, String>,
 }
 
 pub(crate) struct DockerExecConnection {
@@ -499,10 +501,14 @@ impl DockerExecConnection {
         args.push("-u".to_string());
         args.push(self.connection_options.remote_user.clone());
 
+        for (k, v) in self.connection_options.remote_env.iter() {
+            args.push("-e".to_string());
+            args.push(format!("{k}={v}"));
+        }
+
         for (k, v) in env.iter() {
             args.push("-e".to_string());
-            let env_declaration = format!("{}={}", k, v);
-            args.push(env_declaration);
+            args.push(format!("{k}={v}"));
         }
 
         args.push(self.connection_options.container_id.clone());
@@ -632,6 +638,11 @@ impl RemoteConnection for DockerExecConnection {
         };
 
         let mut docker_args = vec!["exec".to_string()];
+
+        for (k, v) in self.connection_options.remote_env.iter() {
+            docker_args.push("-e".to_string());
+            docker_args.push(format!("{k}={v}"));
+        }
         for env_var in ["RUST_LOG", "RUST_BACKTRACE", "ZED_GENERATE_MINIDUMPS"] {
             if let Some(value) = std::env::var(env_var).ok() {
                 docker_args.push("-e".to_string());
@@ -768,9 +779,14 @@ impl RemoteConnection for DockerExecConnection {
             docker_args.push(parsed_working_dir);
         }
 
+        for (k, v) in self.connection_options.remote_env.iter() {
+            docker_args.push("-e".to_string());
+            docker_args.push(format!("{k}={v}"));
+        }
+
         for (k, v) in env.iter() {
             docker_args.push("-e".to_string());
-            docker_args.push(format!("{}={}", k, v));
+            docker_args.push(format!("{k}={v}"));
         }
 
         match interactive {

crates/settings_content/src/settings_content.rs 🔗

@@ -65,7 +65,8 @@ macro_rules! settings_overrides {
         }
     }
 }
-use std::collections::BTreeSet;
+use std::collections::{BTreeMap, BTreeSet};
+use std::hash::Hash;
 use std::sync::Arc;
 pub use util::serde::default_true;
 
@@ -1023,6 +1024,8 @@ pub struct DevContainerConnection {
     pub remote_user: String,
     pub container_id: String,
     pub use_podman: bool,
+    pub extension_ids: Vec<String>,
+    pub remote_env: BTreeMap<String, String>,
 }
 
 #[with_fallible_options]

crates/util/src/command.rs 🔗

@@ -68,6 +68,10 @@ impl Command {
         self
     }
 
+    pub fn get_args(&self) -> impl Iterator<Item = &OsStr> {
+        self.0.get_args()
+    }
+
     pub fn env(&mut self, key: impl AsRef<OsStr>, val: impl AsRef<OsStr>) -> &mut Self {
         self.0.env(key, val);
         self
@@ -129,4 +133,8 @@ impl Command {
     pub async fn status(&mut self) -> std::io::Result<std::process::ExitStatus> {
         self.0.status().await
     }
+
+    pub fn get_program(&self) -> &OsStr {
+        self.0.get_program()
+    }
 }

crates/util/src/command/darwin.rs 🔗

@@ -104,6 +104,10 @@ impl Command {
         self
     }
 
+    pub fn get_args(&self) -> impl Iterator<Item = &OsStr> {
+        self.args.iter().map(|s| s.as_os_str())
+    }
+
     pub fn env(&mut self, key: impl AsRef<OsStr>, val: impl AsRef<OsStr>) -> &mut Self {
         self.envs
             .insert(key.as_ref().to_owned(), Some(val.as_ref().to_owned()));
@@ -217,6 +221,10 @@ impl Command {
         let mut child = self.spawn()?;
         child.status().await
     }
+
+    pub fn get_program(&self) -> &OsStr {
+        self.program.as_os_str()
+    }
 }
 
 #[derive(Debug)]

crates/workspace/src/persistence.rs 🔗

@@ -971,6 +971,9 @@ impl Domain for WorkspaceDb {
         sql!(
             ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
         ),
+        sql!(
+            ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
+        ),
     ];
 
     // Allow recovering from bad migration that was initially shipped to nightly
@@ -1500,6 +1503,7 @@ impl WorkspaceDb {
         let mut name = None;
         let mut container_id = None;
         let mut use_podman = None;
+        let mut remote_env = None;
         match options {
             RemoteConnectionOptions::Ssh(options) => {
                 kind = RemoteConnectionKind::Ssh;
@@ -1518,6 +1522,7 @@ impl WorkspaceDb {
                 name = Some(options.name);
                 use_podman = Some(options.use_podman);
                 user = Some(options.remote_user);
+                remote_env = serde_json::to_string(&options.remote_env).ok();
             }
             #[cfg(any(test, feature = "test-support"))]
             RemoteConnectionOptions::Mock(options) => {
@@ -1536,6 +1541,7 @@ impl WorkspaceDb {
             name,
             container_id,
             use_podman,
+            remote_env,
         )
     }
 
@@ -1549,6 +1555,7 @@ impl WorkspaceDb {
         name: Option<String>,
         container_id: Option<String>,
         use_podman: Option<bool>,
+        remote_env: Option<String>,
     ) -> Result<RemoteConnectionId> {
         if let Some(id) = this.select_row_bound(sql!(
             SELECT id
@@ -1582,8 +1589,9 @@ impl WorkspaceDb {
                     distro,
                     name,
                     container_id,
-                    use_podman
-                ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)
+                    use_podman,
+                    remote_env
+                    ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
                 RETURNING id
             ))?((
                 kind.serialize(),
@@ -1594,6 +1602,7 @@ impl WorkspaceDb {
                 name,
                 container_id,
                 use_podman,
+                remote_env,
             ))?
             .context("failed to insert remote project")?;
             Ok(RemoteConnectionId(id))
@@ -1695,13 +1704,13 @@ impl WorkspaceDb {
     fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
         Ok(self.select(sql!(
             SELECT
-                id, kind, host, port, user, distro, container_id, name, use_podman
+                id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
             FROM
                 remote_connections
         ))?()?
         .into_iter()
         .filter_map(
-            |(id, kind, host, port, user, distro, container_id, name, use_podman)| {
+            |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
                 Some((
                     RemoteConnectionId(id),
                     Self::remote_connection_from_row(
@@ -1713,6 +1722,7 @@ impl WorkspaceDb {
                         container_id,
                         name,
                         use_podman,
+                        remote_env,
                     )?,
                 ))
             },
@@ -1724,9 +1734,9 @@ impl WorkspaceDb {
         &self,
         id: RemoteConnectionId,
     ) -> Result<RemoteConnectionOptions> {
-        let (kind, host, port, user, distro, container_id, name, use_podman) =
+        let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
             self.select_row_bound(sql!(
-                SELECT kind, host, port, user, distro, container_id, name, use_podman
+                SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
                 FROM remote_connections
                 WHERE id = ?
             ))?(id.0)?
@@ -1740,6 +1750,7 @@ impl WorkspaceDb {
             container_id,
             name,
             use_podman,
+            remote_env,
         )
         .context("invalid remote_connection row")
     }
@@ -1753,6 +1764,7 @@ impl WorkspaceDb {
         container_id: Option<String>,
         name: Option<String>,
         use_podman: Option<bool>,
+        remote_env: Option<String>,
     ) -> Option<RemoteConnectionOptions> {
         match RemoteConnectionKind::deserialize(&kind)? {
             RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
@@ -1766,12 +1778,15 @@ impl WorkspaceDb {
                 ..Default::default()
             })),
             RemoteConnectionKind::Docker => {
+                let remote_env: BTreeMap<String, String> =
+                    serde_json::from_str(&remote_env?).ok()?;
                 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
                     container_id: container_id?,
                     name: name?,
                     remote_user: user?,
                     upload_binary_over_docker_exec: false,
                     use_podman: use_podman?,
+                    remote_env,
                 }))
             }
         }