devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use regex::Regex;
  10
  11use fs::Fs;
  12use http_client::HttpClient;
  13use util::{ResultExt, command::Command, normalize_path};
  14
  15use crate::{
  16    DevContainerConfig, DevContainerContext,
  17    command_json::{CommandRunner, DefaultCommandRunner},
  18    devcontainer_api::{DevContainerError, DevContainerUp},
  19    devcontainer_json::{
  20        ContainerBuild, DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort,
  21        MountDefinition, deserialize_devcontainer_json, deserialize_devcontainer_json_from_value,
  22        deserialize_devcontainer_json_to_value,
  23    },
  24    docker::{
  25        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  26        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  27    },
  28    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  29    get_oci_token,
  30    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  31    safe_id_lower,
  32};
  33
  34enum ConfigStatus {
  35    Deserialized(DevContainer),
  36    VariableParsed(DevContainer),
  37}
  38
  39#[derive(Debug, Clone, Eq, PartialEq, Default)]
  40pub(crate) struct DockerComposeResources {
  41    files: Vec<PathBuf>,
  42    config: DockerComposeConfig,
  43}
  44
  45struct DevContainerManifest {
  46    http_client: Arc<dyn HttpClient>,
  47    fs: Arc<dyn Fs>,
  48    docker_client: Arc<dyn DockerClient>,
  49    command_runner: Arc<dyn CommandRunner>,
  50    raw_config: String,
  51    config: ConfigStatus,
  52    local_environment: HashMap<String, String>,
  53    local_project_directory: PathBuf,
  54    config_directory: PathBuf,
  55    file_name: String,
  56    root_image: Option<DockerInspect>,
  57    features_build_info: Option<FeaturesBuildInfo>,
  58    features: Vec<FeatureManifest>,
  59}
  60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
  61impl DevContainerManifest {
  62    async fn new(
  63        context: &DevContainerContext,
  64        environment: HashMap<String, String>,
  65        docker_client: Arc<dyn DockerClient>,
  66        command_runner: Arc<dyn CommandRunner>,
  67        local_config: DevContainerConfig,
  68        local_project_path: &Path,
  69    ) -> Result<Self, DevContainerError> {
  70        let config_path = local_project_path.join(local_config.config_path.clone());
  71        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  72        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  73            log::error!("Unable to read devcontainer contents: {e}");
  74            DevContainerError::DevContainerParseFailed
  75        })?;
  76
  77        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  78
  79        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  80            log::error!("Dev container file should be in a directory");
  81            DevContainerError::NotInValidProject
  82        })?;
  83        let file_name = config_path
  84            .file_name()
  85            .and_then(|f| f.to_str())
  86            .ok_or_else(|| {
  87                log::error!("Dev container file has no file name, or is invalid unicode");
  88                DevContainerError::DevContainerParseFailed
  89            })?;
  90
  91        Ok(Self {
  92            fs: context.fs.clone(),
  93            http_client: context.http_client.clone(),
  94            docker_client,
  95            command_runner,
  96            raw_config: devcontainer_contents,
  97            config: ConfigStatus::Deserialized(devcontainer),
  98            local_project_directory: local_project_path.to_path_buf(),
  99            local_environment: environment,
 100            config_directory: devcontainer_directory.to_path_buf(),
 101            file_name: file_name.to_string(),
 102            root_image: None,
 103            features_build_info: None,
 104            features: Vec::new(),
 105        })
 106    }
 107
 108    fn devcontainer_id(&self) -> String {
 109        let mut labels = self.identifying_labels();
 110        labels.sort_by_key(|(key, _)| *key);
 111
 112        let mut hasher = DefaultHasher::new();
 113        for (key, value) in &labels {
 114            key.hash(&mut hasher);
 115            value.hash(&mut hasher);
 116        }
 117
 118        format!("{:016x}", hasher.finish())
 119    }
 120
 121    fn identifying_labels(&self) -> Vec<(&str, String)> {
 122        let labels = vec![
 123            (
 124                "devcontainer.local_folder",
 125                (self.local_project_directory.display()).to_string(),
 126            ),
 127            (
 128                "devcontainer.config_file",
 129                (self.config_file().display()).to_string(),
 130            ),
 131        ];
 132        labels
 133    }
 134
 135    fn parse_nonremote_vars_for_content(
 136        &self,
 137        content: &str,
 138    ) -> Result<serde_json_lenient::Value, DevContainerError> {
 139        let mut value = deserialize_devcontainer_json_to_value(content)?;
 140        let mut to_visit = vec![&mut value];
 141
 142        while let Some(value) = to_visit.pop() {
 143            use serde_json_lenient::Value;
 144
 145            match value {
 146                Value::String(string) => {
 147                    *string = string
 148                        .replace("${devcontainerId}", &self.devcontainer_id())
 149                        .replace(
 150                            "${containerWorkspaceFolderBasename}",
 151                            &self.remote_workspace_base_name().unwrap_or_default(),
 152                        )
 153                        .replace(
 154                            "${localWorkspaceFolderBasename}",
 155                            &self.local_workspace_base_name()?,
 156                        )
 157                        .replace(
 158                            "${containerWorkspaceFolder}",
 159                            &self
 160                                .remote_workspace_folder()
 161                                .map(|path| path.display().to_string())
 162                                .unwrap_or_default()
 163                                .replace('\\', "/"),
 164                        )
 165                        .replace(
 166                            "${localWorkspaceFolder}",
 167                            &self.local_workspace_folder().replace('\\', "/"),
 168                        );
 169                    *string = Self::replace_environment_variables(
 170                        string,
 171                        "localEnv",
 172                        &self.local_environment,
 173                    );
 174                }
 175
 176                Value::Array(array) => to_visit.extend(array.iter_mut()),
 177                Value::Object(object) => to_visit.extend(object.values_mut()),
 178
 179                Value::Null | Value::Bool(_) | Value::Number(_) => {}
 180            }
 181        }
 182
 183        Ok(value)
 184    }
 185
 186    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 187        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 188        let parsed_config = deserialize_devcontainer_json_from_value(replaced_content)?;
 189
 190        self.config = ConfigStatus::VariableParsed(parsed_config);
 191
 192        Ok(())
 193    }
 194
 195    fn runtime_remote_env(
 196        &self,
 197        container_env: &HashMap<String, String>,
 198    ) -> Result<HashMap<String, String>, DevContainerError> {
 199        let mut merged_remote_env = container_env.clone();
 200        // HOME is user-specific, and we will often not run as the image user
 201        merged_remote_env.remove("HOME");
 202        if let Some(mut remote_env) = self.dev_container().remote_env.clone() {
 203            remote_env.values_mut().for_each(|value| {
 204                *value = Self::replace_environment_variables(value, "containerEnv", &container_env)
 205            });
 206            for (k, v) in remote_env {
 207                merged_remote_env.insert(k, v);
 208            }
 209        }
 210        Ok(merged_remote_env)
 211    }
 212
 213    fn replace_environment_variables(
 214        mut orig: &str,
 215        environment_source: &str,
 216        environment: &HashMap<String, String>,
 217    ) -> String {
 218        let mut replaced = String::with_capacity(orig.len());
 219        let prefix = format!("${{{environment_source}:");
 220        while let Some(start) = orig.find(&prefix) {
 221            let var_name_start = start + prefix.len();
 222            let Some(end) = orig[var_name_start..].find('}') else {
 223                // No closing `}` => malformed variable reference => paste as is.
 224                break;
 225            };
 226            let end = var_name_start + end;
 227
 228            let (var_name_end, default_start) =
 229                if let Some(var_name_end) = orig[var_name_start..end].find(':') {
 230                    let var_name_end = var_name_start + var_name_end;
 231                    (var_name_end, var_name_end + 1)
 232                } else {
 233                    (end, end)
 234                };
 235
 236            let var_name = &orig[var_name_start..var_name_end];
 237            if var_name.is_empty() {
 238                // Empty variable name => paste as is.
 239                replaced.push_str(&orig[..end + 1]);
 240                orig = &orig[end + 1..];
 241                continue;
 242            }
 243            let default = &orig[default_start..end];
 244
 245            replaced.push_str(&orig[..start]);
 246            replaced.push_str(
 247                environment
 248                    .get(var_name)
 249                    .map(|value| value.as_str())
 250                    .unwrap_or(default),
 251            );
 252            orig = &orig[end + 1..];
 253        }
 254        replaced.push_str(orig);
 255        replaced
 256    }
 257
 258    fn config_file(&self) -> PathBuf {
 259        self.config_directory.join(&self.file_name)
 260    }
 261
 262    fn dev_container(&self) -> &DevContainer {
 263        match &self.config {
 264            ConfigStatus::Deserialized(dev_container) => dev_container,
 265            ConfigStatus::VariableParsed(dev_container) => dev_container,
 266        }
 267    }
 268
 269    async fn dockerfile_location(&self) -> Option<PathBuf> {
 270        let dev_container = self.dev_container();
 271        match dev_container.build_type() {
 272            DevContainerBuildType::Image(_) => None,
 273            DevContainerBuildType::Dockerfile(build) => {
 274                Some(self.config_directory.join(&build.dockerfile))
 275            }
 276            DevContainerBuildType::DockerCompose => {
 277                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 278                    return None;
 279                };
 280                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 281                else {
 282                    return None;
 283                };
 284                main_service.build.and_then(|b| {
 285                    let compose_file = docker_compose_manifest.files.first()?;
 286                    resolve_compose_dockerfile(
 287                        compose_file,
 288                        b.context.as_deref(),
 289                        b.dockerfile.as_deref()?,
 290                    )
 291                })
 292            }
 293            DevContainerBuildType::None => None,
 294        }
 295    }
 296
 297    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 298        let mut hasher = DefaultHasher::new();
 299        let prefix = match &self.dev_container().name {
 300            Some(name) => &safe_id_lower(name),
 301            None => "zed-dc",
 302        };
 303        let prefix = prefix.get(..6).unwrap_or(prefix);
 304
 305        dockerfile_build_path.hash(&mut hasher);
 306
 307        let hash = hasher.finish();
 308        format!("{}-{:x}-features", prefix, hash)
 309    }
 310
 311    /// Gets the base image from the devcontainer with the following precedence:
 312    /// - The devcontainer image if an image is specified
 313    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 314    /// - The image sourced in the docker-compose main service, if one is specified
 315    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 316    /// If no such image is available, return an error
 317    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 318        match self.dev_container().build_type() {
 319            DevContainerBuildType::Image(image) => {
 320                return Ok(image);
 321            }
 322            DevContainerBuildType::Dockerfile(build) => {
 323                let dockerfile_contents = self.expanded_dockerfile_content().await?;
 324                return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
 325                    || {
 326                        log::error!("Unable to find base image in Dockerfile");
 327                        DevContainerError::DevContainerParseFailed
 328                    },
 329                );
 330            }
 331            DevContainerBuildType::DockerCompose => {
 332                let docker_compose_manifest = self.docker_compose_manifest().await?;
 333                let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 334
 335                if let Some(_) = main_service
 336                    .build
 337                    .as_ref()
 338                    .and_then(|b| b.dockerfile.as_ref())
 339                {
 340                    let dockerfile_contents = self.expanded_dockerfile_content().await?;
 341                    return image_from_dockerfile(
 342                        dockerfile_contents,
 343                        &main_service.build.as_ref().and_then(|b| b.target.clone()),
 344                    )
 345                    .ok_or_else(|| {
 346                        log::error!("Unable to find base image in Dockerfile");
 347                        DevContainerError::DevContainerParseFailed
 348                    });
 349                }
 350                if let Some(image) = &main_service.image {
 351                    return Ok(image.to_string());
 352                }
 353
 354                log::error!("No valid base image found in docker-compose configuration");
 355                return Err(DevContainerError::DevContainerParseFailed);
 356            }
 357            DevContainerBuildType::None => {
 358                log::error!("Not a valid devcontainer config for build");
 359                return Err(DevContainerError::NotInValidProject);
 360            }
 361        }
 362    }
 363
 364    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 365        let dev_container = match &self.config {
 366            ConfigStatus::Deserialized(_) => {
 367                log::error!(
 368                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 369                );
 370                return Err(DevContainerError::DevContainerParseFailed);
 371            }
 372            ConfigStatus::VariableParsed(dev_container) => dev_container,
 373        };
 374        let root_image_tag = self.get_base_image_from_config().await?;
 375        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 376
 377        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 378        let timestamp = std::time::SystemTime::now()
 379            .duration_since(std::time::UNIX_EPOCH)
 380            .map(|d| d.as_millis())
 381            .unwrap_or(0);
 382
 383        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 384        let empty_context_dir = temp_base.join("empty-folder");
 385
 386        self.fs
 387            .create_dir(&features_content_dir)
 388            .await
 389            .map_err(|e| {
 390                log::error!("Failed to create features content dir: {e}");
 391                DevContainerError::FilesystemError
 392            })?;
 393
 394        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 395            log::error!("Failed to create empty context dir: {e}");
 396            DevContainerError::FilesystemError
 397        })?;
 398
 399        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 400        let image_tag =
 401            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 402
 403        let build_info = FeaturesBuildInfo {
 404            dockerfile_path,
 405            features_content_dir,
 406            empty_context_dir,
 407            build_image: dev_container.image.clone(),
 408            image_tag,
 409        };
 410
 411        let features = match &dev_container.features {
 412            Some(features) => features,
 413            None => &HashMap::new(),
 414        };
 415
 416        let container_user = get_container_user_from_config(&root_image, self)?;
 417        let remote_user = get_remote_user_from_config(&root_image, self)?;
 418
 419        let builtin_env_content = format!(
 420            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 421            container_user, remote_user
 422        );
 423
 424        let builtin_env_path = build_info
 425            .features_content_dir
 426            .join("devcontainer-features.builtin.env");
 427
 428        self.fs
 429            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 430            .await
 431            .map_err(|e| {
 432                log::error!("Failed to write builtin env file: {e}");
 433                DevContainerError::FilesystemError
 434            })?;
 435
 436        let ordered_features =
 437            resolve_feature_order(features, &dev_container.override_feature_install_order);
 438
 439        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 440            if matches!(options, FeatureOptions::Bool(false)) {
 441                log::debug!(
 442                    "Feature '{}' is disabled (set to false), skipping",
 443                    feature_ref
 444                );
 445                continue;
 446            }
 447
 448            let feature_id = extract_feature_id(feature_ref);
 449            let consecutive_id = format!("{}_{}", feature_id, index);
 450            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 451
 452            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 453                log::error!(
 454                    "Failed to create feature directory for {}: {e}",
 455                    feature_ref
 456                );
 457                DevContainerError::FilesystemError
 458            })?;
 459
 460            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 461                log::error!(
 462                    "Feature '{}' is not a supported OCI feature reference",
 463                    feature_ref
 464                );
 465                DevContainerError::DevContainerParseFailed
 466            })?;
 467            let TokenResponse { token } =
 468                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 469                    .await
 470                    .map_err(|e| {
 471                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 472                        DevContainerError::ResourceFetchFailed
 473                    })?;
 474            let manifest = get_oci_manifest(
 475                &oci_ref.registry,
 476                &oci_ref.path,
 477                &token,
 478                &self.http_client,
 479                &oci_ref.version,
 480                None,
 481            )
 482            .await
 483            .map_err(|e| {
 484                log::error!(
 485                    "Failed to fetch OCI manifest for feature '{}': {e}",
 486                    feature_ref
 487                );
 488                DevContainerError::ResourceFetchFailed
 489            })?;
 490            let digest = &manifest
 491                .layers
 492                .first()
 493                .ok_or_else(|| {
 494                    log::error!(
 495                        "OCI manifest for feature '{}' contains no layers",
 496                        feature_ref
 497                    );
 498                    DevContainerError::ResourceFetchFailed
 499                })?
 500                .digest;
 501            download_oci_tarball(
 502                &token,
 503                &oci_ref.registry,
 504                &oci_ref.path,
 505                digest,
 506                "application/vnd.devcontainers.layer.v1+tar",
 507                &feature_dir,
 508                &self.http_client,
 509                &self.fs,
 510                None,
 511            )
 512            .await?;
 513
 514            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 515            if !self.fs.is_file(feature_json_path).await {
 516                let message = format!(
 517                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 518                    feature_json_path
 519                );
 520                log::error!("{}", &message);
 521                return Err(DevContainerError::ResourceFetchFailed);
 522            }
 523
 524            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 525                log::error!("error reading devcontainer-feature.json: {:?}", e);
 526                DevContainerError::FilesystemError
 527            })?;
 528
 529            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 530
 531            let feature_json: DevContainerFeatureJson =
 532                serde_json_lenient::from_value(contents_parsed).map_err(|e| {
 533                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 534                    DevContainerError::ResourceFetchFailed
 535                })?;
 536
 537            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 538
 539            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 540
 541            let env_content = feature_manifest
 542                .write_feature_env(&self.fs, options)
 543                .await?;
 544
 545            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 546
 547            self.fs
 548                .write(
 549                    &feature_manifest
 550                        .file_path()
 551                        .join("devcontainer-features-install.sh"),
 552                    &wrapper_content.as_bytes(),
 553                )
 554                .await
 555                .map_err(|e| {
 556                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 557                    DevContainerError::FilesystemError
 558                })?;
 559
 560            self.features.push(feature_manifest);
 561        }
 562
 563        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 564
 565        let is_compose = match dev_container.build_type() {
 566            DevContainerBuildType::DockerCompose => true,
 567            _ => false,
 568        };
 569        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 570
 571        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 572            self.fs.load(location).await.log_err()
 573        } else {
 574            None
 575        };
 576
 577        let build_target = if is_compose {
 578            find_primary_service(&self.docker_compose_manifest().await?, self)?
 579                .1
 580                .build
 581                .and_then(|b| b.target)
 582        } else {
 583            dev_container.build.as_ref().and_then(|b| b.target.clone())
 584        };
 585
 586        let dockerfile_content = dockerfile_base_content
 587            .map(|content| {
 588                dockerfile_inject_alias(
 589                    &content,
 590                    "dev_container_auto_added_stage_label",
 591                    build_target,
 592                )
 593            })
 594            .unwrap_or_default();
 595
 596        let dockerfile_content = self.generate_dockerfile_extended(
 597            &container_user,
 598            &remote_user,
 599            dockerfile_content,
 600            use_buildkit,
 601        );
 602
 603        self.fs
 604            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 605            .await
 606            .map_err(|e| {
 607                log::error!("Failed to write Dockerfile.extended: {e}");
 608                DevContainerError::FilesystemError
 609            })?;
 610
 611        log::debug!(
 612            "Features build resources written to {:?}",
 613            build_info.features_content_dir
 614        );
 615
 616        self.root_image = Some(root_image);
 617        self.features_build_info = Some(build_info);
 618
 619        Ok(())
 620    }
 621
 622    fn generate_dockerfile_extended(
 623        &self,
 624        container_user: &str,
 625        remote_user: &str,
 626        dockerfile_content: String,
 627        use_buildkit: bool,
 628    ) -> String {
 629        #[cfg(not(target_os = "windows"))]
 630        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 631        #[cfg(target_os = "windows")]
 632        let update_remote_user_uid = false;
 633        let feature_layers: String = self
 634            .features
 635            .iter()
 636            .map(|manifest| {
 637                manifest.generate_dockerfile_feature_layer(
 638                    use_buildkit,
 639                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 640                )
 641            })
 642            .collect();
 643
 644        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 645        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 646
 647        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 648
 649        let feature_content_source_stage = if use_buildkit {
 650            "".to_string()
 651        } else {
 652            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 653                .to_string()
 654        };
 655
 656        let builtin_env_source_path = if use_buildkit {
 657            "./devcontainer-features.builtin.env"
 658        } else {
 659            "/tmp/build-features/devcontainer-features.builtin.env"
 660        };
 661
 662        let mut extended_dockerfile = format!(
 663            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 664
 665{dockerfile_content}
 666{feature_content_source_stage}
 667FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 668USER root
 669COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 670RUN chmod -R 0755 /tmp/build-features/
 671
 672FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 673
 674USER root
 675
 676RUN mkdir -p {dest}
 677COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 678
 679RUN \
 680echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 681echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 682
 683{feature_layers}
 684
 685ARG _DEV_CONTAINERS_IMAGE_USER=root
 686USER $_DEV_CONTAINERS_IMAGE_USER
 687"#
 688        );
 689
 690        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 691        if !update_remote_user_uid {
 692            extended_dockerfile = format!(
 693                r#"{extended_dockerfile}
 694# Ensure that /etc/profile does not clobber the existing path
 695RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 696"#
 697            );
 698
 699            for feature in &self.features {
 700                let container_env_layer = feature.generate_dockerfile_env();
 701                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 702            }
 703
 704            if let Some(env) = &self.dev_container().container_env {
 705                for (key, value) in env {
 706                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 707                }
 708            }
 709        }
 710
 711        extended_dockerfile
 712    }
 713
 714    fn build_merged_resources(
 715        &self,
 716        base_image: DockerInspect,
 717    ) -> Result<DockerBuildResources, DevContainerError> {
 718        let dev_container = match &self.config {
 719            ConfigStatus::Deserialized(_) => {
 720                log::error!(
 721                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 722                );
 723                return Err(DevContainerError::DevContainerParseFailed);
 724            }
 725            ConfigStatus::VariableParsed(dev_container) => dev_container,
 726        };
 727        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 728
 729        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 730
 731        mounts.append(&mut feature_mounts);
 732
 733        let privileged = dev_container.privileged.unwrap_or(false)
 734            || self.features.iter().any(|f| f.privileged());
 735
 736        let mut entrypoint_script_lines = vec![
 737            "echo Container started".to_string(),
 738            "trap \"exit 0\" 15".to_string(),
 739        ];
 740
 741        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 742            entrypoint_script_lines.push(entrypoint.clone());
 743        }
 744        entrypoint_script_lines.append(&mut vec![
 745            "exec \"$@\"".to_string(),
 746            "while sleep 1 & wait $!; do :; done".to_string(),
 747        ]);
 748
 749        Ok(DockerBuildResources {
 750            image: base_image,
 751            additional_mounts: mounts,
 752            privileged,
 753            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 754        })
 755    }
 756
 757    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 758        if let ConfigStatus::Deserialized(_) = &self.config {
 759            log::error!(
 760                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 761            );
 762            return Err(DevContainerError::DevContainerParseFailed);
 763        }
 764        let dev_container = self.dev_container();
 765        match dev_container.build_type() {
 766            DevContainerBuildType::Image(base_image) => {
 767                let built_docker_image = self.build_docker_image().await?;
 768
 769                let built_docker_image = self
 770                    .update_remote_user_uid(built_docker_image, &base_image)
 771                    .await?;
 772
 773                let resources = self.build_merged_resources(built_docker_image)?;
 774                Ok(DevContainerBuildResources::Docker(resources))
 775            }
 776            DevContainerBuildType::Dockerfile(_) => {
 777                let built_docker_image = self.build_docker_image().await?;
 778                let Some(features_build_info) = &self.features_build_info else {
 779                    log::error!(
 780                        "Can't attempt to build update UID dockerfile before initial docker build"
 781                    );
 782                    return Err(DevContainerError::DevContainerParseFailed);
 783                };
 784                let built_docker_image = self
 785                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 786                    .await?;
 787
 788                let resources = self.build_merged_resources(built_docker_image)?;
 789                Ok(DevContainerBuildResources::Docker(resources))
 790            }
 791            DevContainerBuildType::DockerCompose => {
 792                log::debug!("Using docker compose. Building extended compose files");
 793                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 794
 795                return Ok(DevContainerBuildResources::DockerCompose(
 796                    docker_compose_resources,
 797                ));
 798            }
 799            DevContainerBuildType::None => {
 800                return Err(DevContainerError::DevContainerParseFailed);
 801            }
 802        }
 803    }
 804
 805    async fn run_dev_container(
 806        &self,
 807        build_resources: DevContainerBuildResources,
 808    ) -> Result<DevContainerUp, DevContainerError> {
 809        let ConfigStatus::VariableParsed(_) = &self.config else {
 810            log::error!(
 811                "Variables have not been parsed; cannot proceed with running the dev container"
 812            );
 813            return Err(DevContainerError::DevContainerParseFailed);
 814        };
 815        let running_container = match build_resources {
 816            DevContainerBuildResources::DockerCompose(resources) => {
 817                self.run_docker_compose(resources).await?
 818            }
 819            DevContainerBuildResources::Docker(resources) => {
 820                self.run_docker_image(resources).await?
 821            }
 822        };
 823
 824        let remote_user = get_remote_user_from_config(&running_container, self)?;
 825        let remote_workspace_folder = self.remote_workspace_folder()?;
 826
 827        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 828
 829        Ok(DevContainerUp {
 830            container_id: running_container.id,
 831            remote_user,
 832            remote_workspace_folder: remote_workspace_folder.display().to_string(),
 833            extension_ids: self.extension_ids(),
 834            remote_env,
 835        })
 836    }
 837
 838    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 839        let dev_container = match &self.config {
 840            ConfigStatus::Deserialized(_) => {
 841                log::error!(
 842                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 843                );
 844                return Err(DevContainerError::DevContainerParseFailed);
 845            }
 846            ConfigStatus::VariableParsed(dev_container) => dev_container,
 847        };
 848        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 849            return Err(DevContainerError::DevContainerParseFailed);
 850        };
 851        let docker_compose_full_paths = docker_compose_files
 852            .iter()
 853            .map(|relative| self.config_directory.join(relative))
 854            .collect::<Vec<PathBuf>>();
 855
 856        let Some(config) = self
 857            .docker_client
 858            .get_docker_compose_config(&docker_compose_full_paths)
 859            .await?
 860        else {
 861            log::error!("Output could not deserialize into DockerComposeConfig");
 862            return Err(DevContainerError::DevContainerParseFailed);
 863        };
 864        Ok(DockerComposeResources {
 865            files: docker_compose_full_paths,
 866            config,
 867        })
 868    }
 869
 870    async fn build_and_extend_compose_files(
 871        &self,
 872    ) -> Result<DockerComposeResources, DevContainerError> {
 873        let dev_container = match &self.config {
 874            ConfigStatus::Deserialized(_) => {
 875                log::error!(
 876                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 877                );
 878                return Err(DevContainerError::DevContainerParseFailed);
 879            }
 880            ConfigStatus::VariableParsed(dev_container) => dev_container,
 881        };
 882
 883        let Some(features_build_info) = &self.features_build_info else {
 884            log::error!(
 885                "Cannot build and extend compose files: features build info is not yet constructed"
 886            );
 887            return Err(DevContainerError::DevContainerParseFailed);
 888        };
 889        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 890        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 891
 892        let (main_service_name, main_service) =
 893            find_primary_service(&docker_compose_resources, self)?;
 894        let (built_service_image, built_service_image_tag) = if main_service
 895            .build
 896            .as_ref()
 897            .map(|b| b.dockerfile.as_ref())
 898            .is_some()
 899        {
 900            if !supports_buildkit {
 901                self.build_feature_content_image().await?;
 902            }
 903
 904            let dockerfile_path = &features_build_info.dockerfile_path;
 905
 906            let build_args = if !supports_buildkit {
 907                HashMap::from([
 908                    (
 909                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 910                        "dev_container_auto_added_stage_label".to_string(),
 911                    ),
 912                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 913                ])
 914            } else {
 915                HashMap::from([
 916                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 917                    (
 918                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 919                        "dev_container_auto_added_stage_label".to_string(),
 920                    ),
 921                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 922                ])
 923            };
 924
 925            let additional_contexts = if !supports_buildkit {
 926                None
 927            } else {
 928                Some(HashMap::from([(
 929                    "dev_containers_feature_content_source".to_string(),
 930                    features_build_info
 931                        .features_content_dir
 932                        .display()
 933                        .to_string(),
 934                )]))
 935            };
 936
 937            let build_override = DockerComposeConfig {
 938                name: None,
 939                services: HashMap::from([(
 940                    main_service_name.clone(),
 941                    DockerComposeService {
 942                        image: Some(features_build_info.image_tag.clone()),
 943                        entrypoint: None,
 944                        cap_add: None,
 945                        security_opt: None,
 946                        labels: None,
 947                        build: Some(DockerComposeServiceBuild {
 948                            context: Some(
 949                                main_service
 950                                    .build
 951                                    .as_ref()
 952                                    .and_then(|b| b.context.clone())
 953                                    .unwrap_or_else(|| {
 954                                        features_build_info.empty_context_dir.display().to_string()
 955                                    }),
 956                            ),
 957                            dockerfile: Some(dockerfile_path.display().to_string()),
 958                            target: Some("dev_containers_target_stage".to_string()),
 959                            args: Some(build_args),
 960                            additional_contexts,
 961                        }),
 962                        volumes: Vec::new(),
 963                        ..Default::default()
 964                    },
 965                )]),
 966                volumes: HashMap::new(),
 967            };
 968
 969            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 970            let config_location = temp_base.join("docker_compose_build.json");
 971
 972            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 973                log::error!("Error serializing docker compose runtime override: {e}");
 974                DevContainerError::DevContainerParseFailed
 975            })?;
 976
 977            self.fs
 978                .write(&config_location, config_json.as_bytes())
 979                .await
 980                .map_err(|e| {
 981                    log::error!("Error writing the runtime override file: {e}");
 982                    DevContainerError::FilesystemError
 983                })?;
 984
 985            docker_compose_resources.files.push(config_location);
 986
 987            self.docker_client
 988                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 989                .await?;
 990            (
 991                self.docker_client
 992                    .inspect(&features_build_info.image_tag)
 993                    .await?,
 994                &features_build_info.image_tag,
 995            )
 996        } else if let Some(image) = &main_service.image {
 997            if dev_container
 998                .features
 999                .as_ref()
1000                .is_none_or(|features| features.is_empty())
1001            {
1002                (self.docker_client.inspect(image).await?, image)
1003            } else {
1004                if !supports_buildkit {
1005                    self.build_feature_content_image().await?;
1006                }
1007
1008                let dockerfile_path = &features_build_info.dockerfile_path;
1009
1010                let build_args = if !supports_buildkit {
1011                    HashMap::from([
1012                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1013                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1014                    ])
1015                } else {
1016                    HashMap::from([
1017                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
1018                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1019                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1020                    ])
1021                };
1022
1023                let additional_contexts = if !supports_buildkit {
1024                    None
1025                } else {
1026                    Some(HashMap::from([(
1027                        "dev_containers_feature_content_source".to_string(),
1028                        features_build_info
1029                            .features_content_dir
1030                            .display()
1031                            .to_string(),
1032                    )]))
1033                };
1034
1035                let build_override = DockerComposeConfig {
1036                    name: None,
1037                    services: HashMap::from([(
1038                        main_service_name.clone(),
1039                        DockerComposeService {
1040                            image: Some(features_build_info.image_tag.clone()),
1041                            entrypoint: None,
1042                            cap_add: None,
1043                            security_opt: None,
1044                            labels: None,
1045                            build: Some(DockerComposeServiceBuild {
1046                                context: Some(
1047                                    features_build_info.empty_context_dir.display().to_string(),
1048                                ),
1049                                dockerfile: Some(dockerfile_path.display().to_string()),
1050                                target: Some("dev_containers_target_stage".to_string()),
1051                                args: Some(build_args),
1052                                additional_contexts,
1053                            }),
1054                            volumes: Vec::new(),
1055                            ..Default::default()
1056                        },
1057                    )]),
1058                    volumes: HashMap::new(),
1059                };
1060
1061                let temp_base = std::env::temp_dir().join("devcontainer-zed");
1062                let config_location = temp_base.join("docker_compose_build.json");
1063
1064                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1065                    log::error!("Error serializing docker compose runtime override: {e}");
1066                    DevContainerError::DevContainerParseFailed
1067                })?;
1068
1069                self.fs
1070                    .write(&config_location, config_json.as_bytes())
1071                    .await
1072                    .map_err(|e| {
1073                        log::error!("Error writing the runtime override file: {e}");
1074                        DevContainerError::FilesystemError
1075                    })?;
1076
1077                docker_compose_resources.files.push(config_location);
1078
1079                self.docker_client
1080                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1081                    .await?;
1082
1083                (
1084                    self.docker_client
1085                        .inspect(&features_build_info.image_tag)
1086                        .await?,
1087                    &features_build_info.image_tag,
1088                )
1089            }
1090        } else {
1091            log::error!("Docker compose must have either image or dockerfile defined");
1092            return Err(DevContainerError::DevContainerParseFailed);
1093        };
1094
1095        let built_service_image = self
1096            .update_remote_user_uid(built_service_image, built_service_image_tag)
1097            .await?;
1098
1099        let resources = self.build_merged_resources(built_service_image)?;
1100
1101        let network_mode = main_service.network_mode.as_ref();
1102        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1103        let runtime_override_file = self
1104            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1105            .await?;
1106
1107        docker_compose_resources.files.push(runtime_override_file);
1108
1109        Ok(docker_compose_resources)
1110    }
1111
1112    async fn write_runtime_override_file(
1113        &self,
1114        main_service_name: &str,
1115        network_mode_service: Option<&str>,
1116        resources: DockerBuildResources,
1117    ) -> Result<PathBuf, DevContainerError> {
1118        let config =
1119            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1120        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1121        let config_location = temp_base.join("docker_compose_runtime.json");
1122
1123        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1124            log::error!("Error serializing docker compose runtime override: {e}");
1125            DevContainerError::DevContainerParseFailed
1126        })?;
1127
1128        self.fs
1129            .write(&config_location, config_json.as_bytes())
1130            .await
1131            .map_err(|e| {
1132                log::error!("Error writing the runtime override file: {e}");
1133                DevContainerError::FilesystemError
1134            })?;
1135
1136        Ok(config_location)
1137    }
1138
1139    fn build_runtime_override(
1140        &self,
1141        main_service_name: &str,
1142        network_mode_service: Option<&str>,
1143        resources: DockerBuildResources,
1144    ) -> Result<DockerComposeConfig, DevContainerError> {
1145        let mut runtime_labels = HashMap::new();
1146
1147        if let Some(metadata) = &resources.image.config.labels.metadata {
1148            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1149                log::error!("Error serializing docker image metadata: {e}");
1150                DevContainerError::ContainerNotValid(resources.image.id.clone())
1151            })?;
1152
1153            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1154        }
1155
1156        for (k, v) in self.identifying_labels() {
1157            runtime_labels.insert(k.to_string(), v.to_string());
1158        }
1159
1160        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1161            .additional_mounts
1162            .iter()
1163            .filter_map(|mount| {
1164                if let Some(mount_type) = &mount.mount_type
1165                    && mount_type.to_lowercase() == "volume"
1166                    && let Some(source) = &mount.source
1167                {
1168                    Some((
1169                        source.clone(),
1170                        DockerComposeVolume {
1171                            name: source.clone(),
1172                        },
1173                    ))
1174                } else {
1175                    None
1176                }
1177            })
1178            .collect();
1179
1180        let volumes: Vec<MountDefinition> = resources
1181            .additional_mounts
1182            .iter()
1183            .map(|v| MountDefinition {
1184                source: v.source.clone(),
1185                target: v.target.clone(),
1186                mount_type: v.mount_type.clone(),
1187            })
1188            .collect();
1189
1190        let mut main_service = DockerComposeService {
1191            entrypoint: Some(vec![
1192                "/bin/sh".to_string(),
1193                "-c".to_string(),
1194                resources.entrypoint_script,
1195                "-".to_string(),
1196            ]),
1197            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1198            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1199            labels: Some(runtime_labels),
1200            volumes,
1201            privileged: Some(resources.privileged),
1202            ..Default::default()
1203        };
1204        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1205        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1206        if let Some(forward_ports) = &self.dev_container().forward_ports {
1207            let main_service_ports: Vec<String> = forward_ports
1208                .iter()
1209                .filter_map(|f| match f {
1210                    ForwardPort::Number(port) => Some(port.to_string()),
1211                    ForwardPort::String(port) => {
1212                        let parts: Vec<&str> = port.split(":").collect();
1213                        if parts.len() <= 1 {
1214                            Some(port.to_string())
1215                        } else if parts.len() == 2 {
1216                            if parts[0] == main_service_name {
1217                                Some(parts[1].to_string())
1218                            } else {
1219                                None
1220                            }
1221                        } else {
1222                            None
1223                        }
1224                    }
1225                })
1226                .collect();
1227            for port in main_service_ports {
1228                // If the main service uses a different service's network bridge, append to that service's ports instead
1229                if let Some(network_service_name) = network_mode_service {
1230                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1231                        service.ports.push(DockerComposeServicePort {
1232                            target: port.clone(),
1233                            published: port.clone(),
1234                            ..Default::default()
1235                        });
1236                    } else {
1237                        service_declarations.insert(
1238                            network_service_name.to_string(),
1239                            DockerComposeService {
1240                                ports: vec![DockerComposeServicePort {
1241                                    target: port.clone(),
1242                                    published: port.clone(),
1243                                    ..Default::default()
1244                                }],
1245                                ..Default::default()
1246                            },
1247                        );
1248                    }
1249                } else {
1250                    main_service.ports.push(DockerComposeServicePort {
1251                        target: port.clone(),
1252                        published: port.clone(),
1253                        ..Default::default()
1254                    });
1255                }
1256            }
1257            let other_service_ports: Vec<(&str, &str)> = forward_ports
1258                .iter()
1259                .filter_map(|f| match f {
1260                    ForwardPort::Number(_) => None,
1261                    ForwardPort::String(port) => {
1262                        let parts: Vec<&str> = port.split(":").collect();
1263                        if parts.len() != 2 {
1264                            None
1265                        } else {
1266                            if parts[0] == main_service_name {
1267                                None
1268                            } else {
1269                                Some((parts[0], parts[1]))
1270                            }
1271                        }
1272                    }
1273                })
1274                .collect();
1275            for (service_name, port) in other_service_ports {
1276                if let Some(service) = service_declarations.get_mut(service_name) {
1277                    service.ports.push(DockerComposeServicePort {
1278                        target: port.to_string(),
1279                        published: port.to_string(),
1280                        ..Default::default()
1281                    });
1282                } else {
1283                    service_declarations.insert(
1284                        service_name.to_string(),
1285                        DockerComposeService {
1286                            ports: vec![DockerComposeServicePort {
1287                                target: port.to_string(),
1288                                published: port.to_string(),
1289                                ..Default::default()
1290                            }],
1291                            ..Default::default()
1292                        },
1293                    );
1294                }
1295            }
1296        }
1297
1298        service_declarations.insert(main_service_name.to_string(), main_service);
1299        let new_docker_compose_config = DockerComposeConfig {
1300            name: None,
1301            services: service_declarations,
1302            volumes: config_volumes,
1303        };
1304
1305        Ok(new_docker_compose_config)
1306    }
1307
1308    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1309        let dev_container = match &self.config {
1310            ConfigStatus::Deserialized(_) => {
1311                log::error!(
1312                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1313                );
1314                return Err(DevContainerError::DevContainerParseFailed);
1315            }
1316            ConfigStatus::VariableParsed(dev_container) => dev_container,
1317        };
1318
1319        match dev_container.build_type() {
1320            DevContainerBuildType::Image(image_tag) => {
1321                let base_image = self.docker_client.inspect(&image_tag).await?;
1322                if dev_container
1323                    .features
1324                    .as_ref()
1325                    .is_none_or(|features| features.is_empty())
1326                {
1327                    log::debug!("No features to add. Using base image");
1328                    return Ok(base_image);
1329                }
1330            }
1331            DevContainerBuildType::Dockerfile(_) => {}
1332            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1333                return Err(DevContainerError::DevContainerParseFailed);
1334            }
1335        };
1336
1337        let mut command = self.create_docker_build()?;
1338
1339        let output = self
1340            .command_runner
1341            .run_command(&mut command)
1342            .await
1343            .map_err(|e| {
1344                log::error!("Error building docker image: {e}");
1345                DevContainerError::CommandFailed(command.get_program().display().to_string())
1346            })?;
1347
1348        if !output.status.success() {
1349            let stderr = String::from_utf8_lossy(&output.stderr);
1350            log::error!("docker buildx build failed: {stderr}");
1351            return Err(DevContainerError::CommandFailed(
1352                command.get_program().display().to_string(),
1353            ));
1354        }
1355
1356        // After a successful build, inspect the newly tagged image to get its metadata
1357        let Some(features_build_info) = &self.features_build_info else {
1358            log::error!("Features build info expected, but not created");
1359            return Err(DevContainerError::DevContainerParseFailed);
1360        };
1361        let image = self
1362            .docker_client
1363            .inspect(&features_build_info.image_tag)
1364            .await?;
1365
1366        Ok(image)
1367    }
1368
1369    #[cfg(target_os = "windows")]
1370    async fn update_remote_user_uid(
1371        &self,
1372        image: DockerInspect,
1373        _base_image: &str,
1374    ) -> Result<DockerInspect, DevContainerError> {
1375        Ok(image)
1376    }
1377    #[cfg(not(target_os = "windows"))]
1378    async fn update_remote_user_uid(
1379        &self,
1380        image: DockerInspect,
1381        base_image: &str,
1382    ) -> Result<DockerInspect, DevContainerError> {
1383        let dev_container = self.dev_container();
1384
1385        let Some(features_build_info) = &self.features_build_info else {
1386            return Ok(image);
1387        };
1388
1389        // updateRemoteUserUID defaults to true per the devcontainers spec
1390        if dev_container.update_remote_user_uid == Some(false) {
1391            return Ok(image);
1392        }
1393
1394        let remote_user = get_remote_user_from_config(&image, self)?;
1395        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1396            return Ok(image);
1397        }
1398
1399        let image_user = image
1400            .config
1401            .image_user
1402            .as_deref()
1403            .unwrap_or("root")
1404            .to_string();
1405
1406        let host_uid = Command::new("id")
1407            .arg("-u")
1408            .output()
1409            .await
1410            .map_err(|e| {
1411                log::error!("Failed to get host UID: {e}");
1412                DevContainerError::CommandFailed("id -u".to_string())
1413            })
1414            .and_then(|output| {
1415                String::from_utf8_lossy(&output.stdout)
1416                    .trim()
1417                    .parse::<u32>()
1418                    .map_err(|e| {
1419                        log::error!("Failed to parse host UID: {e}");
1420                        DevContainerError::CommandFailed("id -u".to_string())
1421                    })
1422            })?;
1423
1424        let host_gid = Command::new("id")
1425            .arg("-g")
1426            .output()
1427            .await
1428            .map_err(|e| {
1429                log::error!("Failed to get host GID: {e}");
1430                DevContainerError::CommandFailed("id -g".to_string())
1431            })
1432            .and_then(|output| {
1433                String::from_utf8_lossy(&output.stdout)
1434                    .trim()
1435                    .parse::<u32>()
1436                    .map_err(|e| {
1437                        log::error!("Failed to parse host GID: {e}");
1438                        DevContainerError::CommandFailed("id -g".to_string())
1439                    })
1440            })?;
1441
1442        let dockerfile_content = self.generate_update_uid_dockerfile();
1443
1444        let dockerfile_path = features_build_info
1445            .features_content_dir
1446            .join("updateUID.Dockerfile");
1447        self.fs
1448            .write(&dockerfile_path, dockerfile_content.as_bytes())
1449            .await
1450            .map_err(|e| {
1451                log::error!("Failed to write updateUID Dockerfile: {e}");
1452                DevContainerError::FilesystemError
1453            })?;
1454
1455        let updated_image_tag = features_build_info.image_tag.clone();
1456
1457        let mut command = Command::new(self.docker_client.docker_cli());
1458        command.args(["build"]);
1459        command.args(["-f", &dockerfile_path.display().to_string()]);
1460        command.args(["-t", &updated_image_tag]);
1461        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1462        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1463        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1464        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1465        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1466        command.arg(features_build_info.empty_context_dir.display().to_string());
1467
1468        let output = self
1469            .command_runner
1470            .run_command(&mut command)
1471            .await
1472            .map_err(|e| {
1473                log::error!("Error building UID update image: {e}");
1474                DevContainerError::CommandFailed(command.get_program().display().to_string())
1475            })?;
1476
1477        if !output.status.success() {
1478            let stderr = String::from_utf8_lossy(&output.stderr);
1479            log::error!("UID update build failed: {stderr}");
1480            return Err(DevContainerError::CommandFailed(
1481                command.get_program().display().to_string(),
1482            ));
1483        }
1484
1485        self.docker_client.inspect(&updated_image_tag).await
1486    }
1487
1488    #[cfg(not(target_os = "windows"))]
1489    fn generate_update_uid_dockerfile(&self) -> String {
1490        let mut dockerfile = r#"ARG BASE_IMAGE
1491FROM $BASE_IMAGE
1492
1493USER root
1494
1495ARG REMOTE_USER
1496ARG NEW_UID
1497ARG NEW_GID
1498SHELL ["/bin/sh", "-c"]
1499RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1500	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1501	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1502	if [ -z "$OLD_UID" ]; then \
1503		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1504	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1505		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1506	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1507		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1508	else \
1509		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1510			FREE_GID=65532; \
1511			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1512			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1513			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1514		fi; \
1515		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1516		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1517		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1518			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1519		fi; \
1520		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1521	fi;
1522
1523ARG IMAGE_USER
1524USER $IMAGE_USER
1525
1526# Ensure that /etc/profile does not clobber the existing path
1527RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1528"#.to_string();
1529        for feature in &self.features {
1530            let container_env_layer = feature.generate_dockerfile_env();
1531            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1532        }
1533
1534        if let Some(env) = &self.dev_container().container_env {
1535            for (key, value) in env {
1536                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1537            }
1538        }
1539        dockerfile
1540    }
1541
1542    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1543        let Some(features_build_info) = &self.features_build_info else {
1544            log::error!("Features build info not available for building feature content image");
1545            return Err(DevContainerError::DevContainerParseFailed);
1546        };
1547        let features_content_dir = &features_build_info.features_content_dir;
1548
1549        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1550        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1551
1552        self.fs
1553            .write(&dockerfile_path, dockerfile_content.as_bytes())
1554            .await
1555            .map_err(|e| {
1556                log::error!("Failed to write feature content Dockerfile: {e}");
1557                DevContainerError::FilesystemError
1558            })?;
1559
1560        let mut command = Command::new(self.docker_client.docker_cli());
1561        command.args([
1562            "build",
1563            "-t",
1564            "dev_container_feature_content_temp",
1565            "-f",
1566            &dockerfile_path.display().to_string(),
1567            &features_content_dir.display().to_string(),
1568        ]);
1569
1570        let output = self
1571            .command_runner
1572            .run_command(&mut command)
1573            .await
1574            .map_err(|e| {
1575                log::error!("Error building feature content image: {e}");
1576                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1577            })?;
1578
1579        if !output.status.success() {
1580            let stderr = String::from_utf8_lossy(&output.stderr);
1581            log::error!("Feature content image build failed: {stderr}");
1582            return Err(DevContainerError::CommandFailed(
1583                self.docker_client.docker_cli(),
1584            ));
1585        }
1586
1587        Ok(())
1588    }
1589
1590    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1591        let dev_container = match &self.config {
1592            ConfigStatus::Deserialized(_) => {
1593                log::error!(
1594                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1595                );
1596                return Err(DevContainerError::DevContainerParseFailed);
1597            }
1598            ConfigStatus::VariableParsed(dev_container) => dev_container,
1599        };
1600
1601        let Some(features_build_info) = &self.features_build_info else {
1602            log::error!(
1603                "Cannot create docker build command; features build info has not been constructed"
1604            );
1605            return Err(DevContainerError::DevContainerParseFailed);
1606        };
1607        let mut command = Command::new(self.docker_client.docker_cli());
1608
1609        command.args(["buildx", "build"]);
1610
1611        // --load is short for --output=docker, loading the built image into the local docker images
1612        command.arg("--load");
1613
1614        // BuildKit build context: provides the features content directory as a named context
1615        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1616        command.args([
1617            "--build-context",
1618            &format!(
1619                "dev_containers_feature_content_source={}",
1620                features_build_info.features_content_dir.display()
1621            ),
1622        ]);
1623
1624        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1625        if let Some(build_image) = &features_build_info.build_image {
1626            command.args([
1627                "--build-arg",
1628                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1629            ]);
1630        } else {
1631            command.args([
1632                "--build-arg",
1633                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1634            ]);
1635        }
1636
1637        command.args([
1638            "--build-arg",
1639            &format!(
1640                "_DEV_CONTAINERS_IMAGE_USER={}",
1641                self.root_image
1642                    .as_ref()
1643                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1644                    .unwrap_or(&"root".to_string())
1645            ),
1646        ]);
1647
1648        command.args([
1649            "--build-arg",
1650            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1651        ]);
1652
1653        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1654            for (key, value) in args {
1655                command.args(["--build-arg", &format!("{}={}", key, value)]);
1656            }
1657        }
1658
1659        if let Some(options) = dev_container
1660            .build
1661            .as_ref()
1662            .and_then(|b| b.options.as_ref())
1663        {
1664            for option in options {
1665                command.arg(option);
1666            }
1667        }
1668
1669        if let Some(cache_from_images) = dev_container
1670            .build
1671            .as_ref()
1672            .and_then(|b| b.cache_from.as_ref())
1673        {
1674            for cache_from_image in cache_from_images {
1675                command.args(["--cache-from", cache_from_image]);
1676            }
1677        }
1678
1679        command.args(["--target", "dev_containers_target_stage"]);
1680
1681        command.args([
1682            "-f",
1683            &features_build_info.dockerfile_path.display().to_string(),
1684        ]);
1685
1686        command.args(["-t", &features_build_info.image_tag]);
1687
1688        if let DevContainerBuildType::Dockerfile(build) = dev_container.build_type() {
1689            command.arg(self.calculate_context_dir(build).display().to_string());
1690        } else {
1691            // Use an empty folder as the build context to avoid pulling in unneeded files.
1692            // The actual feature content is supplied via the BuildKit build context above.
1693            command.arg(features_build_info.empty_context_dir.display().to_string());
1694        }
1695
1696        Ok(command)
1697    }
1698
1699    async fn run_docker_compose(
1700        &self,
1701        resources: DockerComposeResources,
1702    ) -> Result<DockerInspect, DevContainerError> {
1703        let mut command = Command::new(self.docker_client.docker_cli());
1704        command.args(&["compose", "--project-name", &self.project_name()]);
1705        for docker_compose_file in resources.files {
1706            command.args(&["-f", &docker_compose_file.display().to_string()]);
1707        }
1708        command.args(&["up", "-d"]);
1709
1710        let output = self
1711            .command_runner
1712            .run_command(&mut command)
1713            .await
1714            .map_err(|e| {
1715                log::error!("Error running docker compose up: {e}");
1716                DevContainerError::CommandFailed(command.get_program().display().to_string())
1717            })?;
1718
1719        if !output.status.success() {
1720            let stderr = String::from_utf8_lossy(&output.stderr);
1721            log::error!("Non-success status from docker compose up: {}", stderr);
1722            return Err(DevContainerError::CommandFailed(
1723                command.get_program().display().to_string(),
1724            ));
1725        }
1726
1727        if let Some(docker_ps) = self.check_for_existing_container().await? {
1728            log::debug!("Found newly created dev container");
1729            return self.docker_client.inspect(&docker_ps.id).await;
1730        }
1731
1732        log::error!("Could not find existing container after docker compose up");
1733
1734        Err(DevContainerError::DevContainerParseFailed)
1735    }
1736
1737    async fn run_docker_image(
1738        &self,
1739        build_resources: DockerBuildResources,
1740    ) -> Result<DockerInspect, DevContainerError> {
1741        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1742
1743        let output = self
1744            .command_runner
1745            .run_command(&mut docker_run_command)
1746            .await
1747            .map_err(|e| {
1748                log::error!("Error running docker run: {e}");
1749                DevContainerError::CommandFailed(
1750                    docker_run_command.get_program().display().to_string(),
1751                )
1752            })?;
1753
1754        if !output.status.success() {
1755            let std_err = String::from_utf8_lossy(&output.stderr);
1756            log::error!("Non-success status from docker run. StdErr: {std_err}");
1757            return Err(DevContainerError::CommandFailed(
1758                docker_run_command.get_program().display().to_string(),
1759            ));
1760        }
1761
1762        log::debug!("Checking for container that was started");
1763        let Some(docker_ps) = self.check_for_existing_container().await? else {
1764            log::error!("Could not locate container just created");
1765            return Err(DevContainerError::DevContainerParseFailed);
1766        };
1767        self.docker_client.inspect(&docker_ps.id).await
1768    }
1769
1770    fn local_workspace_folder(&self) -> String {
1771        self.local_project_directory.display().to_string()
1772    }
1773    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1774        self.local_project_directory
1775            .file_name()
1776            .map(|f| f.display().to_string())
1777            .ok_or(DevContainerError::DevContainerParseFailed)
1778    }
1779
1780    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1781        self.dev_container()
1782            .workspace_folder
1783            .as_ref()
1784            .map(|folder| PathBuf::from(folder))
1785            .or(Some(
1786                // We explicitly use "/" here, instead of PathBuf::join
1787                // because we want remote targets to use unix-style filepaths,
1788                // even on a Windows host
1789                PathBuf::from(format!(
1790                    "{}/{}",
1791                    DEFAULT_REMOTE_PROJECT_DIR,
1792                    self.local_workspace_base_name()?
1793                )),
1794            ))
1795            .ok_or(DevContainerError::DevContainerParseFailed)
1796    }
1797    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1798        self.remote_workspace_folder().and_then(|f| {
1799            f.file_name()
1800                .map(|file_name| file_name.display().to_string())
1801                .ok_or(DevContainerError::DevContainerParseFailed)
1802        })
1803    }
1804
1805    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1806        if let Some(mount) = &self.dev_container().workspace_mount {
1807            return Ok(mount.clone());
1808        }
1809        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1810            return Err(DevContainerError::DevContainerParseFailed);
1811        };
1812
1813        Ok(MountDefinition {
1814            source: Some(self.local_workspace_folder()),
1815            // We explicitly use "/" here, instead of PathBuf::join
1816            // because we want the remote target to use unix-style filepaths,
1817            // even on a Windows host
1818            target: format!(
1819                "{}/{}",
1820                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1821                project_directory_name.display()
1822            ),
1823            mount_type: None,
1824        })
1825    }
1826
1827    fn create_docker_run_command(
1828        &self,
1829        build_resources: DockerBuildResources,
1830    ) -> Result<Command, DevContainerError> {
1831        let remote_workspace_mount = self.remote_workspace_mount()?;
1832
1833        let docker_cli = self.docker_client.docker_cli();
1834        let mut command = Command::new(&docker_cli);
1835
1836        command.arg("run");
1837
1838        if build_resources.privileged {
1839            command.arg("--privileged");
1840        }
1841
1842        let run_args = match &self.dev_container().run_args {
1843            Some(run_args) => run_args,
1844            None => &Vec::new(),
1845        };
1846
1847        for arg in run_args {
1848            command.arg(arg);
1849        }
1850
1851        let run_if_missing = {
1852            |arg_name: &str, arg: &str, command: &mut Command| {
1853                if !run_args
1854                    .iter()
1855                    .any(|arg| arg.strip_prefix(arg_name).is_some())
1856                {
1857                    command.arg(arg);
1858                }
1859            }
1860        };
1861
1862        if &docker_cli == "podman" {
1863            run_if_missing(
1864                "--security-opt",
1865                "--security-opt=label=disable",
1866                &mut command,
1867            );
1868            run_if_missing("--userns", "--userns=keep-id", &mut command);
1869        }
1870
1871        run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1872        command.arg("-d");
1873        command.arg("--mount");
1874        command.arg(remote_workspace_mount.to_string());
1875
1876        for mount in &build_resources.additional_mounts {
1877            command.arg("--mount");
1878            command.arg(mount.to_string());
1879        }
1880
1881        for (key, val) in self.identifying_labels() {
1882            command.arg("-l");
1883            command.arg(format!("{}={}", key, val));
1884        }
1885
1886        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1887            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1888                log::error!("Problem serializing image metadata: {e}");
1889                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1890            })?;
1891            command.arg("-l");
1892            command.arg(format!(
1893                "{}={}",
1894                "devcontainer.metadata", serialized_metadata
1895            ));
1896        }
1897
1898        if let Some(forward_ports) = &self.dev_container().forward_ports {
1899            for port in forward_ports {
1900                if let ForwardPort::Number(port_number) = port {
1901                    command.arg("-p");
1902                    command.arg(format!("{port_number}:{port_number}"));
1903                }
1904            }
1905        }
1906        for app_port in &self.dev_container().app_port {
1907            command.arg("-p");
1908            command.arg(app_port);
1909        }
1910
1911        command.arg("--entrypoint");
1912        command.arg("/bin/sh");
1913        command.arg(&build_resources.image.id);
1914        command.arg("-c");
1915
1916        command.arg(build_resources.entrypoint_script);
1917        command.arg("-");
1918
1919        Ok(command)
1920    }
1921
1922    fn extension_ids(&self) -> Vec<String> {
1923        self.dev_container()
1924            .customizations
1925            .as_ref()
1926            .map(|c| c.zed.extensions.clone())
1927            .unwrap_or_default()
1928    }
1929
1930    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1931        self.dev_container().validate_devcontainer_contents()?;
1932
1933        self.run_initialize_commands().await?;
1934
1935        self.download_feature_and_dockerfile_resources().await?;
1936
1937        let build_resources = self.build_resources().await?;
1938
1939        let devcontainer_up = self.run_dev_container(build_resources).await?;
1940
1941        self.run_remote_scripts(&devcontainer_up, true).await?;
1942
1943        Ok(devcontainer_up)
1944    }
1945
1946    async fn run_remote_scripts(
1947        &self,
1948        devcontainer_up: &DevContainerUp,
1949        new_container: bool,
1950    ) -> Result<(), DevContainerError> {
1951        let ConfigStatus::VariableParsed(config) = &self.config else {
1952            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1953            return Err(DevContainerError::DevContainerScriptsFailed);
1954        };
1955        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1956
1957        if new_container {
1958            if let Some(on_create_command) = &config.on_create_command {
1959                for (command_name, command) in on_create_command.script_commands() {
1960                    log::debug!("Running on create command {command_name}");
1961                    self.docker_client
1962                        .run_docker_exec(
1963                            &devcontainer_up.container_id,
1964                            &remote_folder,
1965                            &devcontainer_up.remote_user,
1966                            &devcontainer_up.remote_env,
1967                            command,
1968                        )
1969                        .await?;
1970                }
1971            }
1972            if let Some(update_content_command) = &config.update_content_command {
1973                for (command_name, command) in update_content_command.script_commands() {
1974                    log::debug!("Running update content command {command_name}");
1975                    self.docker_client
1976                        .run_docker_exec(
1977                            &devcontainer_up.container_id,
1978                            &remote_folder,
1979                            &devcontainer_up.remote_user,
1980                            &devcontainer_up.remote_env,
1981                            command,
1982                        )
1983                        .await?;
1984                }
1985            }
1986
1987            if let Some(post_create_command) = &config.post_create_command {
1988                for (command_name, command) in post_create_command.script_commands() {
1989                    log::debug!("Running post create command {command_name}");
1990                    self.docker_client
1991                        .run_docker_exec(
1992                            &devcontainer_up.container_id,
1993                            &remote_folder,
1994                            &devcontainer_up.remote_user,
1995                            &devcontainer_up.remote_env,
1996                            command,
1997                        )
1998                        .await?;
1999                }
2000            }
2001            if let Some(post_start_command) = &config.post_start_command {
2002                for (command_name, command) in post_start_command.script_commands() {
2003                    log::debug!("Running post start command {command_name}");
2004                    self.docker_client
2005                        .run_docker_exec(
2006                            &devcontainer_up.container_id,
2007                            &remote_folder,
2008                            &devcontainer_up.remote_user,
2009                            &devcontainer_up.remote_env,
2010                            command,
2011                        )
2012                        .await?;
2013                }
2014            }
2015        }
2016        if let Some(post_attach_command) = &config.post_attach_command {
2017            for (command_name, command) in post_attach_command.script_commands() {
2018                log::debug!("Running post attach command {command_name}");
2019                self.docker_client
2020                    .run_docker_exec(
2021                        &devcontainer_up.container_id,
2022                        &remote_folder,
2023                        &devcontainer_up.remote_user,
2024                        &devcontainer_up.remote_env,
2025                        command,
2026                    )
2027                    .await?;
2028            }
2029        }
2030
2031        Ok(())
2032    }
2033
2034    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
2035        let ConfigStatus::VariableParsed(config) = &self.config else {
2036            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
2037            return Err(DevContainerError::DevContainerParseFailed);
2038        };
2039
2040        if let Some(initialize_command) = &config.initialize_command {
2041            log::debug!("Running initialize command");
2042            initialize_command
2043                .run(&self.command_runner, &self.local_project_directory)
2044                .await
2045        } else {
2046            log::warn!("No initialize command found");
2047            Ok(())
2048        }
2049    }
2050
2051    async fn check_for_existing_devcontainer(
2052        &self,
2053    ) -> Result<Option<DevContainerUp>, DevContainerError> {
2054        if let Some(docker_ps) = self.check_for_existing_container().await? {
2055            log::debug!("Dev container already found. Proceeding with it");
2056
2057            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
2058
2059            if !docker_inspect.is_running() {
2060                log::debug!("Container not running. Will attempt to start, and then proceed");
2061                self.docker_client.start_container(&docker_ps.id).await?;
2062            }
2063
2064            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
2065
2066            let remote_folder = self.remote_workspace_folder()?;
2067
2068            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
2069
2070            let dev_container_up = DevContainerUp {
2071                container_id: docker_ps.id,
2072                remote_user: remote_user,
2073                remote_workspace_folder: remote_folder.display().to_string(),
2074                extension_ids: self.extension_ids(),
2075                remote_env,
2076            };
2077
2078            self.run_remote_scripts(&dev_container_up, false).await?;
2079
2080            Ok(Some(dev_container_up))
2081        } else {
2082            log::debug!("Existing container not found.");
2083
2084            Ok(None)
2085        }
2086    }
2087
2088    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2089        self.docker_client
2090            .find_process_by_filters(
2091                self.identifying_labels()
2092                    .iter()
2093                    .map(|(k, v)| format!("label={k}={v}"))
2094                    .collect(),
2095            )
2096            .await
2097    }
2098
2099    fn project_name(&self) -> String {
2100        if let Some(name) = &self.dev_container().name {
2101            safe_id_lower(name)
2102        } else {
2103            let alternate_name = &self
2104                .local_workspace_base_name()
2105                .unwrap_or(self.local_workspace_folder());
2106            safe_id_lower(alternate_name)
2107        }
2108    }
2109
2110    async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2111        let Some(dockerfile_path) = self.dockerfile_location().await else {
2112            log::error!("Tried to expand dockerfile for an image-type config");
2113            return Err(DevContainerError::DevContainerParseFailed);
2114        };
2115
2116        // For docker-compose configs the build args live on the primary
2117        // compose service rather than on dev_container.build.
2118        let devcontainer_args = match self.dev_container().build_type() {
2119            DevContainerBuildType::DockerCompose => {
2120                let compose = self.docker_compose_manifest().await?;
2121                find_primary_service(&compose, self)?
2122                    .1
2123                    .build
2124                    .and_then(|b| b.args)
2125                    .unwrap_or_default()
2126            }
2127            _ => self
2128                .dev_container()
2129                .build
2130                .as_ref()
2131                .and_then(|b| b.args.clone())
2132                .unwrap_or_default(),
2133        };
2134        let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2135            log::error!("Failed to load Dockerfile: {e}");
2136            DevContainerError::FilesystemError
2137        })?;
2138        let mut parsed_lines: Vec<String> = Vec::new();
2139        let mut inline_args: Vec<(String, String)> = Vec::new();
2140        let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2141
2142        for line in contents.lines() {
2143            let mut parsed_line = line.to_string();
2144            // Replace from devcontainer args first, since they take precedence
2145            for (key, value) in &devcontainer_args {
2146                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2147            }
2148            for (key, value) in &inline_args {
2149                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2150            }
2151            if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2152                let trimmed = arg_directives.trim();
2153                let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2154                for (i, captures) in key_matches.iter().enumerate() {
2155                    let key = captures[1].to_string();
2156                    // Insert the devcontainer overrides here if needed
2157                    let value_start = captures.get(0).expect("full match").end();
2158                    let value_end = if i + 1 < key_matches.len() {
2159                        key_matches[i + 1].get(0).expect("full match").start()
2160                    } else {
2161                        trimmed.len()
2162                    };
2163                    let raw_value = trimmed[value_start..value_end].trim();
2164                    let value = if raw_value.starts_with('"')
2165                        && raw_value.ends_with('"')
2166                        && raw_value.len() > 1
2167                    {
2168                        &raw_value[1..raw_value.len() - 1]
2169                    } else {
2170                        raw_value
2171                    };
2172                    inline_args.push((key, value.to_string()));
2173                }
2174            }
2175            parsed_lines.push(parsed_line);
2176        }
2177
2178        Ok(parsed_lines.join("\n"))
2179    }
2180
2181    fn calculate_context_dir(&self, build: ContainerBuild) -> PathBuf {
2182        let Some(context) = build.context else {
2183            return self.config_directory.clone();
2184        };
2185        let context_path = PathBuf::from(context);
2186
2187        if context_path.is_absolute() {
2188            context_path
2189        } else {
2190            self.config_directory.join(context_path)
2191        }
2192    }
2193}
2194
2195/// Holds all the information needed to construct a `docker buildx build` command
2196/// that extends a base image with dev container features.
2197///
2198/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2199/// (cli/src/spec-node/containerFeatures.ts).
2200#[derive(Debug, Eq, PartialEq)]
2201pub(crate) struct FeaturesBuildInfo {
2202    /// Path to the generated Dockerfile.extended
2203    pub dockerfile_path: PathBuf,
2204    /// Path to the features content directory (used as a BuildKit build context)
2205    pub features_content_dir: PathBuf,
2206    /// Path to an empty directory used as the Docker build context
2207    pub empty_context_dir: PathBuf,
2208    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2209    pub build_image: Option<String>,
2210    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2211    pub image_tag: String,
2212}
2213
2214pub(crate) async fn read_devcontainer_configuration(
2215    config: DevContainerConfig,
2216    context: &DevContainerContext,
2217    environment: HashMap<String, String>,
2218) -> Result<DevContainer, DevContainerError> {
2219    let docker = if context.use_podman {
2220        Docker::new("podman").await
2221    } else {
2222        Docker::new("docker").await
2223    };
2224    let mut dev_container = DevContainerManifest::new(
2225        context,
2226        environment,
2227        Arc::new(docker),
2228        Arc::new(DefaultCommandRunner::new()),
2229        config,
2230        &context.project_directory.as_ref(),
2231    )
2232    .await?;
2233    dev_container.parse_nonremote_vars()?;
2234    Ok(dev_container.dev_container().clone())
2235}
2236
2237pub(crate) async fn spawn_dev_container(
2238    context: &DevContainerContext,
2239    environment: HashMap<String, String>,
2240    config: DevContainerConfig,
2241    local_project_path: &Path,
2242) -> Result<DevContainerUp, DevContainerError> {
2243    let docker = if context.use_podman {
2244        Docker::new("podman").await
2245    } else {
2246        Docker::new("docker").await
2247    };
2248    let mut devcontainer_manifest = DevContainerManifest::new(
2249        context,
2250        environment,
2251        Arc::new(docker),
2252        Arc::new(DefaultCommandRunner::new()),
2253        config,
2254        local_project_path,
2255    )
2256    .await?;
2257
2258    devcontainer_manifest.parse_nonremote_vars()?;
2259
2260    log::debug!("Checking for existing container");
2261    if let Some(devcontainer) = devcontainer_manifest
2262        .check_for_existing_devcontainer()
2263        .await?
2264    {
2265        Ok(devcontainer)
2266    } else {
2267        log::debug!("Existing container not found. Building");
2268
2269        devcontainer_manifest.build_and_run().await
2270    }
2271}
2272
2273#[derive(Debug)]
2274struct DockerBuildResources {
2275    image: DockerInspect,
2276    additional_mounts: Vec<MountDefinition>,
2277    privileged: bool,
2278    entrypoint_script: String,
2279}
2280
2281#[derive(Debug)]
2282enum DevContainerBuildResources {
2283    DockerCompose(DockerComposeResources),
2284    Docker(DockerBuildResources),
2285}
2286
2287fn find_primary_service(
2288    docker_compose: &DockerComposeResources,
2289    devcontainer: &DevContainerManifest,
2290) -> Result<(String, DockerComposeService), DevContainerError> {
2291    let Some(service_name) = &devcontainer.dev_container().service else {
2292        return Err(DevContainerError::DevContainerParseFailed);
2293    };
2294
2295    match docker_compose.config.services.get(service_name) {
2296        Some(service) => Ok((service_name.clone(), service.clone())),
2297        None => Err(DevContainerError::DevContainerParseFailed),
2298    }
2299}
2300
2301/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2302/// `dockerfile` is relative to the build `context`, and `context` is relative to
2303/// the compose file's directory.
2304fn resolve_compose_dockerfile(
2305    compose_file: &Path,
2306    context: Option<&str>,
2307    dockerfile: &str,
2308) -> Option<PathBuf> {
2309    let dockerfile = PathBuf::from(dockerfile);
2310    if dockerfile.is_absolute() {
2311        return Some(dockerfile);
2312    }
2313    let compose_dir = compose_file.parent()?;
2314    let context_dir = match context {
2315        Some(ctx) => {
2316            let ctx = PathBuf::from(ctx);
2317            if ctx.is_absolute() {
2318                ctx
2319            } else {
2320                normalize_path(&compose_dir.join(ctx))
2321            }
2322        }
2323        None => compose_dir.to_path_buf(),
2324    };
2325    Some(context_dir.join(dockerfile))
2326}
2327
2328/// Destination folder inside the container where feature content is staged during build.
2329/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2330const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2331
2332/// Escapes regex special characters in a string.
2333fn escape_regex_chars(input: &str) -> String {
2334    let mut result = String::with_capacity(input.len() * 2);
2335    for c in input.chars() {
2336        if ".*+?^${}()|[]\\".contains(c) {
2337            result.push('\\');
2338        }
2339        result.push(c);
2340    }
2341    result
2342}
2343
2344/// Extracts the short feature ID from a full feature reference string.
2345///
2346/// Examples:
2347/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2348/// - `ghcr.io/user/repo/go` → `go`
2349/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2350/// - `./myFeature` → `myFeature`
2351fn extract_feature_id(feature_ref: &str) -> &str {
2352    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2353        &feature_ref[..at_idx]
2354    } else {
2355        let last_slash = feature_ref.rfind('/');
2356        let last_colon = feature_ref.rfind(':');
2357        match (last_slash, last_colon) {
2358            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2359            _ => feature_ref,
2360        }
2361    };
2362    match without_version.rfind('/') {
2363        Some(idx) => &without_version[idx + 1..],
2364        None => without_version,
2365    }
2366}
2367
2368/// Generates a shell command that looks up a user's passwd entry.
2369///
2370/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2371/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2372fn get_ent_passwd_shell_command(user: &str) -> String {
2373    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2374    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2375    format!(
2376        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2377        shell = escaped_for_shell,
2378        re = escaped_for_regex,
2379    )
2380}
2381
2382/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2383///
2384/// Features listed in the override come first (in the specified order), followed
2385/// by any remaining features sorted lexicographically by their full reference ID.
2386fn resolve_feature_order<'a>(
2387    features: &'a HashMap<String, FeatureOptions>,
2388    override_order: &Option<Vec<String>>,
2389) -> Vec<(&'a String, &'a FeatureOptions)> {
2390    if let Some(order) = override_order {
2391        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2392        for ordered_id in order {
2393            if let Some((key, options)) = features.get_key_value(ordered_id) {
2394                ordered.push((key, options));
2395            }
2396        }
2397        let mut remaining: Vec<_> = features
2398            .iter()
2399            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2400            .collect();
2401        remaining.sort_by_key(|(id, _)| id.as_str());
2402        ordered.extend(remaining);
2403        ordered
2404    } else {
2405        let mut entries: Vec<_> = features.iter().collect();
2406        entries.sort_by_key(|(id, _)| id.as_str());
2407        entries
2408    }
2409}
2410
2411/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2412///
2413/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2414/// `containerFeaturesConfiguration.ts`.
2415fn generate_install_wrapper(
2416    feature_ref: &str,
2417    feature_id: &str,
2418    env_variables: &str,
2419) -> Result<String, DevContainerError> {
2420    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2421        log::error!("Error escaping feature ref {feature_ref}: {e}");
2422        DevContainerError::DevContainerParseFailed
2423    })?;
2424    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2425        log::error!("Error escaping feature {feature_id}: {e}");
2426        DevContainerError::DevContainerParseFailed
2427    })?;
2428    let options_indented: String = env_variables
2429        .lines()
2430        .filter(|l| !l.is_empty())
2431        .map(|l| format!("    {}", l))
2432        .collect::<Vec<_>>()
2433        .join("\n");
2434    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2435        log::error!("Error escaping options {options_indented}: {e}");
2436        DevContainerError::DevContainerParseFailed
2437    })?;
2438
2439    let script = format!(
2440        r#"#!/bin/sh
2441set -e
2442
2443on_exit () {{
2444    [ $? -eq 0 ] && exit
2445    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2446}}
2447
2448trap on_exit EXIT
2449
2450echo ===========================================================================
2451echo 'Feature       : {escaped_name}'
2452echo 'Id            : {escaped_id}'
2453echo 'Options       :'
2454echo {escaped_options}
2455echo ===========================================================================
2456
2457set -a
2458. ../devcontainer-features.builtin.env
2459. ./devcontainer-features.env
2460set +a
2461
2462chmod +x ./install.sh
2463./install.sh
2464"#
2465    );
2466
2467    Ok(script)
2468}
2469
2470fn dockerfile_inject_alias(
2471    dockerfile_content: &str,
2472    alias: &str,
2473    build_target: Option<String>,
2474) -> String {
2475    let from_lines: Vec<(usize, &str)> = dockerfile_content
2476        .lines()
2477        .enumerate()
2478        .filter(|(_, line)| line.starts_with("FROM"))
2479        .collect();
2480
2481    let target_entry = match &build_target {
2482        Some(target) => from_lines.iter().rfind(|(_, line)| {
2483            let parts: Vec<&str> = line.split_whitespace().collect();
2484            parts.len() >= 3
2485                && parts
2486                    .get(parts.len() - 2)
2487                    .map_or(false, |p| p.eq_ignore_ascii_case("as"))
2488                && parts
2489                    .last()
2490                    .map_or(false, |p| p.eq_ignore_ascii_case(target))
2491        }),
2492        None => from_lines.last(),
2493    };
2494
2495    let Some(&(line_idx, from_line)) = target_entry else {
2496        return dockerfile_content.to_string();
2497    };
2498
2499    let parts: Vec<&str> = from_line.split_whitespace().collect();
2500    let has_alias = parts.len() >= 3
2501        && parts
2502            .get(parts.len() - 2)
2503            .map_or(false, |p| p.eq_ignore_ascii_case("as"));
2504
2505    if has_alias {
2506        let Some(existing_alias) = parts.last() else {
2507            return dockerfile_content.to_string();
2508        };
2509        format!("{dockerfile_content}\nFROM {existing_alias} AS {alias}")
2510    } else {
2511        let lines: Vec<&str> = dockerfile_content.lines().collect();
2512        let mut result = String::new();
2513        for (i, line) in lines.iter().enumerate() {
2514            if i > 0 {
2515                result.push('\n');
2516            }
2517            if i == line_idx {
2518                result.push_str(&format!("{line} AS {alias}"));
2519            } else {
2520                result.push_str(line);
2521            }
2522        }
2523        if dockerfile_content.ends_with('\n') {
2524            result.push('\n');
2525        }
2526        result
2527    }
2528}
2529
2530fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2531    dockerfile_contents
2532        .lines()
2533        .filter(|line| line.starts_with("FROM"))
2534        .rfind(|from_line| match &target {
2535            Some(target) => {
2536                let parts = from_line.split(' ').collect::<Vec<&str>>();
2537                if parts.len() >= 3
2538                    && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2539                {
2540                    parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2541                } else {
2542                    false
2543                }
2544            }
2545            None => true,
2546        })
2547        .and_then(|from_line| {
2548            from_line
2549                .split(' ')
2550                .collect::<Vec<&str>>()
2551                .get(1)
2552                .map(|s| s.to_string())
2553        })
2554}
2555
2556fn get_remote_user_from_config(
2557    docker_config: &DockerInspect,
2558    devcontainer: &DevContainerManifest,
2559) -> Result<String, DevContainerError> {
2560    if let DevContainer {
2561        remote_user: Some(user),
2562        ..
2563    } = &devcontainer.dev_container()
2564    {
2565        return Ok(user.clone());
2566    }
2567    if let Some(metadata) = &docker_config.config.labels.metadata {
2568        for metadatum in metadata {
2569            if let Some(remote_user) = metadatum.get("remoteUser") {
2570                if let Some(remote_user_str) = remote_user.as_str() {
2571                    return Ok(remote_user_str.to_string());
2572                }
2573            }
2574        }
2575    }
2576    if let Some(image_user) = &docker_config.config.image_user {
2577        if !image_user.is_empty() {
2578            return Ok(image_user.to_string());
2579        }
2580    }
2581    Ok("root".to_string())
2582}
2583
2584// This should come from spec - see the docs
2585fn get_container_user_from_config(
2586    docker_config: &DockerInspect,
2587    devcontainer: &DevContainerManifest,
2588) -> Result<String, DevContainerError> {
2589    if let Some(user) = &devcontainer.dev_container().container_user {
2590        return Ok(user.to_string());
2591    }
2592    if let Some(metadata) = &docker_config.config.labels.metadata {
2593        for metadatum in metadata {
2594            if let Some(container_user) = metadatum.get("containerUser") {
2595                if let Some(container_user_str) = container_user.as_str() {
2596                    return Ok(container_user_str.to_string());
2597                }
2598            }
2599        }
2600    }
2601    if let Some(image_user) = &docker_config.config.image_user {
2602        return Ok(image_user.to_string());
2603    }
2604
2605    Ok("root".to_string())
2606}
2607
2608#[cfg(test)]
2609mod test {
2610    use std::{
2611        collections::HashMap,
2612        ffi::OsStr,
2613        path::{Path, PathBuf},
2614        process::{ExitStatus, Output},
2615        sync::{Arc, Mutex},
2616    };
2617
2618    use async_trait::async_trait;
2619    use fs::{FakeFs, Fs};
2620    use gpui::{AppContext, TestAppContext};
2621    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2622    use project::{
2623        ProjectEnvironment,
2624        worktree_store::{WorktreeIdCounter, WorktreeStore},
2625    };
2626    use serde_json_lenient::Value;
2627    use util::{command::Command, paths::SanitizedPath};
2628
2629    #[cfg(not(target_os = "windows"))]
2630    use crate::docker::DockerComposeServicePort;
2631    use crate::{
2632        DevContainerConfig, DevContainerContext,
2633        command_json::CommandRunner,
2634        devcontainer_api::DevContainerError,
2635        devcontainer_json::MountDefinition,
2636        devcontainer_manifest::{
2637            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2638            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2639            image_from_dockerfile, resolve_compose_dockerfile,
2640        },
2641        docker::{
2642            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2643            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2644            DockerPs,
2645        },
2646        oci::TokenResponse,
2647    };
2648    #[cfg(not(target_os = "windows"))]
2649    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2650    #[cfg(target_os = "windows")]
2651    const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2652
2653    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2654        let buffer = futures::io::Cursor::new(Vec::new());
2655        let mut builder = async_tar::Builder::new(buffer);
2656        for (file_name, content) in content {
2657            if content.is_empty() {
2658                let mut header = async_tar::Header::new_gnu();
2659                header.set_size(0);
2660                header.set_mode(0o755);
2661                header.set_entry_type(async_tar::EntryType::Directory);
2662                header.set_cksum();
2663                builder
2664                    .append_data(&mut header, file_name, &[] as &[u8])
2665                    .await
2666                    .unwrap();
2667            } else {
2668                let data = content.as_bytes();
2669                let mut header = async_tar::Header::new_gnu();
2670                header.set_size(data.len() as u64);
2671                header.set_mode(0o755);
2672                header.set_entry_type(async_tar::EntryType::Regular);
2673                header.set_cksum();
2674                builder
2675                    .append_data(&mut header, file_name, data)
2676                    .await
2677                    .unwrap();
2678            }
2679        }
2680        let buffer = builder.into_inner().await.unwrap();
2681        buffer.into_inner()
2682    }
2683
2684    fn test_project_filename() -> String {
2685        PathBuf::from(TEST_PROJECT_PATH)
2686            .file_name()
2687            .expect("is valid")
2688            .display()
2689            .to_string()
2690    }
2691
2692    async fn init_devcontainer_config(
2693        fs: &Arc<FakeFs>,
2694        devcontainer_contents: &str,
2695    ) -> DevContainerConfig {
2696        fs.insert_tree(
2697            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2698            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2699        )
2700        .await;
2701
2702        DevContainerConfig::default_config()
2703    }
2704
2705    struct TestDependencies {
2706        fs: Arc<FakeFs>,
2707        _http_client: Arc<dyn HttpClient>,
2708        docker: Arc<FakeDocker>,
2709        command_runner: Arc<TestCommandRunner>,
2710    }
2711
2712    async fn init_default_devcontainer_manifest(
2713        cx: &mut TestAppContext,
2714        devcontainer_contents: &str,
2715    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2716        let fs = FakeFs::new(cx.executor());
2717        let http_client = fake_http_client();
2718        let command_runner = Arc::new(TestCommandRunner::new());
2719        let docker = Arc::new(FakeDocker::new());
2720        let environment = HashMap::new();
2721
2722        init_devcontainer_manifest(
2723            cx,
2724            fs,
2725            http_client,
2726            docker,
2727            command_runner,
2728            environment,
2729            devcontainer_contents,
2730        )
2731        .await
2732    }
2733
2734    async fn init_devcontainer_manifest(
2735        cx: &mut TestAppContext,
2736        fs: Arc<FakeFs>,
2737        http_client: Arc<dyn HttpClient>,
2738        docker_client: Arc<FakeDocker>,
2739        command_runner: Arc<TestCommandRunner>,
2740        environment: HashMap<String, String>,
2741        devcontainer_contents: &str,
2742    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2743        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2744        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2745        let worktree_store =
2746            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2747        let project_environment =
2748            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2749
2750        let context = DevContainerContext {
2751            project_directory: SanitizedPath::cast_arc(project_path),
2752            use_podman: false,
2753            fs: fs.clone(),
2754            http_client: http_client.clone(),
2755            environment: project_environment.downgrade(),
2756        };
2757
2758        let test_dependencies = TestDependencies {
2759            fs: fs.clone(),
2760            _http_client: http_client.clone(),
2761            docker: docker_client.clone(),
2762            command_runner: command_runner.clone(),
2763        };
2764        let manifest = DevContainerManifest::new(
2765            &context,
2766            environment,
2767            docker_client,
2768            command_runner,
2769            local_config,
2770            &PathBuf::from(TEST_PROJECT_PATH),
2771        )
2772        .await?;
2773
2774        Ok((test_dependencies, manifest))
2775    }
2776
2777    #[gpui::test]
2778    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2779        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2780            cx,
2781            r#"
2782// These are some external comments. serde_lenient should handle them
2783{
2784    // These are some internal comments
2785    "image": "image",
2786    "remoteUser": "root",
2787}
2788            "#,
2789        )
2790        .await
2791        .unwrap();
2792
2793        let mut metadata = HashMap::new();
2794        metadata.insert(
2795            "remoteUser".to_string(),
2796            serde_json_lenient::Value::String("vsCode".to_string()),
2797        );
2798        let given_docker_config = DockerInspect {
2799            id: "docker_id".to_string(),
2800            config: DockerInspectConfig {
2801                labels: DockerConfigLabels {
2802                    metadata: Some(vec![metadata]),
2803                },
2804                image_user: None,
2805                env: Vec::new(),
2806            },
2807            mounts: None,
2808            state: None,
2809        };
2810
2811        let remote_user =
2812            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2813
2814        assert_eq!(remote_user, "root".to_string())
2815    }
2816
2817    #[gpui::test]
2818    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2819        let (_, devcontainer_manifest) =
2820            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2821        let mut metadata = HashMap::new();
2822        metadata.insert(
2823            "remoteUser".to_string(),
2824            serde_json_lenient::Value::String("vsCode".to_string()),
2825        );
2826        let given_docker_config = DockerInspect {
2827            id: "docker_id".to_string(),
2828            config: DockerInspectConfig {
2829                labels: DockerConfigLabels {
2830                    metadata: Some(vec![metadata]),
2831                },
2832                image_user: None,
2833                env: Vec::new(),
2834            },
2835            mounts: None,
2836            state: None,
2837        };
2838
2839        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2840
2841        assert!(remote_user.is_ok());
2842        let remote_user = remote_user.expect("ok");
2843        assert_eq!(&remote_user, "vsCode")
2844    }
2845
2846    #[test]
2847    fn should_extract_feature_id_from_references() {
2848        assert_eq!(
2849            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2850            "aws-cli"
2851        );
2852        assert_eq!(
2853            extract_feature_id("ghcr.io/devcontainers/features/go"),
2854            "go"
2855        );
2856        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2857        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2858        assert_eq!(
2859            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2860            "rust"
2861        );
2862    }
2863
2864    #[gpui::test]
2865    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2866        let mut metadata = HashMap::new();
2867        metadata.insert(
2868            "remoteUser".to_string(),
2869            serde_json_lenient::Value::String("vsCode".to_string()),
2870        );
2871
2872        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2873            cx,
2874            r#"{
2875                    "name": "TODO"
2876                }"#,
2877        )
2878        .await
2879        .unwrap();
2880        let build_resources = DockerBuildResources {
2881            image: DockerInspect {
2882                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2883                config: DockerInspectConfig {
2884                    labels: DockerConfigLabels { metadata: None },
2885                    image_user: None,
2886                    env: Vec::new(),
2887                },
2888                mounts: None,
2889                state: None,
2890            },
2891            additional_mounts: vec![],
2892            privileged: false,
2893            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2894        };
2895        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2896
2897        assert!(docker_run_command.is_ok());
2898        let docker_run_command = docker_run_command.expect("ok");
2899
2900        assert_eq!(docker_run_command.get_program(), "docker");
2901        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2902            .join(".devcontainer")
2903            .join("devcontainer.json");
2904        let expected_config_file_label = expected_config_file_label.display();
2905        assert_eq!(
2906            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2907            vec![
2908                OsStr::new("run"),
2909                OsStr::new("--sig-proxy=false"),
2910                OsStr::new("-d"),
2911                OsStr::new("--mount"),
2912                OsStr::new(&format!(
2913                    "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
2914                )),
2915                OsStr::new("-l"),
2916                OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
2917                OsStr::new("-l"),
2918                OsStr::new(&format!(
2919                    "devcontainer.config_file={expected_config_file_label}"
2920                )),
2921                OsStr::new("--entrypoint"),
2922                OsStr::new("/bin/sh"),
2923                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2924                OsStr::new("-c"),
2925                OsStr::new(
2926                    "
2927    echo Container started
2928    trap \"exit 0\" 15
2929    exec \"$@\"
2930    while sleep 1 & wait $!; do :; done
2931                        "
2932                    .trim()
2933                ),
2934                OsStr::new("-"),
2935            ]
2936        )
2937    }
2938
2939    #[gpui::test]
2940    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2941        // State where service not defined in dev container
2942        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2943        let given_docker_compose_config = DockerComposeResources {
2944            config: DockerComposeConfig {
2945                name: Some("devcontainers".to_string()),
2946                services: HashMap::new(),
2947                ..Default::default()
2948            },
2949            ..Default::default()
2950        };
2951
2952        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2953
2954        assert!(bad_result.is_err());
2955
2956        // State where service defined in devcontainer, not found in DockerCompose config
2957        let (_, given_dev_container) =
2958            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2959                .await
2960                .unwrap();
2961        let given_docker_compose_config = DockerComposeResources {
2962            config: DockerComposeConfig {
2963                name: Some("devcontainers".to_string()),
2964                services: HashMap::new(),
2965                ..Default::default()
2966            },
2967            ..Default::default()
2968        };
2969
2970        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2971
2972        assert!(bad_result.is_err());
2973        // State where service defined in devcontainer and in DockerCompose config
2974
2975        let (_, given_dev_container) =
2976            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2977                .await
2978                .unwrap();
2979        let given_docker_compose_config = DockerComposeResources {
2980            config: DockerComposeConfig {
2981                name: Some("devcontainers".to_string()),
2982                services: HashMap::from([(
2983                    "found_service".to_string(),
2984                    DockerComposeService {
2985                        ..Default::default()
2986                    },
2987                )]),
2988                ..Default::default()
2989            },
2990            ..Default::default()
2991        };
2992
2993        let (service_name, _) =
2994            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2995
2996        assert_eq!(service_name, "found_service".to_string());
2997    }
2998
2999    #[gpui::test]
3000    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
3001        let fs = FakeFs::new(cx.executor());
3002        let given_devcontainer_contents = r#"
3003// These are some external comments. serde_lenient should handle them
3004{
3005    // These are some internal comments
3006    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
3007    "name": "myDevContainer-${devcontainerId}",
3008    "remoteUser": "root",
3009    "remoteEnv": {
3010        "DEVCONTAINER_ID": "${devcontainerId}",
3011        "MYVAR2": "myvarothervalue",
3012        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3013        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3014        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3015        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
3016        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
3017        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}",
3018        "LOCAL_ENV_VAR_3": "before-${localEnv:missing_local_env}-after",
3019        "LOCAL_ENV_VAR_4": "${localEnv:with_defaults:default}"
3020
3021    }
3022}
3023                    "#;
3024        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
3025            cx,
3026            fs,
3027            fake_http_client(),
3028            Arc::new(FakeDocker::new()),
3029            Arc::new(TestCommandRunner::new()),
3030            HashMap::from([
3031                ("local_env_1".to_string(), "local_env_value1".to_string()),
3032                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
3033            ]),
3034            given_devcontainer_contents,
3035        )
3036        .await
3037        .unwrap();
3038
3039        devcontainer_manifest.parse_nonremote_vars().unwrap();
3040
3041        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3042            &devcontainer_manifest.config
3043        else {
3044            panic!("Config not parsed");
3045        };
3046
3047        // ${devcontainerId}
3048        let devcontainer_id = devcontainer_manifest.devcontainer_id();
3049        assert_eq!(
3050            variable_replaced_devcontainer.name,
3051            Some(format!("myDevContainer-{devcontainer_id}"))
3052        );
3053        assert_eq!(
3054            variable_replaced_devcontainer
3055                .remote_env
3056                .as_ref()
3057                .and_then(|env| env.get("DEVCONTAINER_ID")),
3058            Some(&devcontainer_id)
3059        );
3060
3061        // ${containerWorkspaceFolderBasename}
3062        assert_eq!(
3063            variable_replaced_devcontainer
3064                .remote_env
3065                .as_ref()
3066                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3067            Some(&test_project_filename())
3068        );
3069
3070        // ${localWorkspaceFolderBasename}
3071        assert_eq!(
3072            variable_replaced_devcontainer
3073                .remote_env
3074                .as_ref()
3075                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3076            Some(&test_project_filename())
3077        );
3078
3079        // ${containerWorkspaceFolder}
3080        assert_eq!(
3081            variable_replaced_devcontainer
3082                .remote_env
3083                .as_ref()
3084                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3085            Some(&format!("/workspaces/{}", test_project_filename()))
3086        );
3087
3088        // ${localWorkspaceFolder}
3089        assert_eq!(
3090            variable_replaced_devcontainer
3091                .remote_env
3092                .as_ref()
3093                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3094            // We replace backslashes with forward slashes during variable replacement for JSON safety
3095            Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3096        );
3097
3098        // ${localEnv:VARIABLE_NAME}
3099        assert_eq!(
3100            variable_replaced_devcontainer
3101                .remote_env
3102                .as_ref()
3103                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
3104            Some(&"local_env_value1".to_string())
3105        );
3106        assert_eq!(
3107            variable_replaced_devcontainer
3108                .remote_env
3109                .as_ref()
3110                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
3111            Some(&"THISVALUEHERE".to_string())
3112        );
3113        assert_eq!(
3114            variable_replaced_devcontainer
3115                .remote_env
3116                .as_ref()
3117                .and_then(|env| env.get("LOCAL_ENV_VAR_3")),
3118            Some(&"before--after".to_string())
3119        );
3120        assert_eq!(
3121            variable_replaced_devcontainer
3122                .remote_env
3123                .as_ref()
3124                .and_then(|env| env.get("LOCAL_ENV_VAR_4")),
3125            Some(&"default".to_string())
3126        );
3127    }
3128
3129    #[test]
3130    fn test_replace_environment_variables() {
3131        let replaced = DevContainerManifest::replace_environment_variables(
3132            "before ${containerEnv:FOUND} middle ${containerEnv:MISSING:default-value} after${containerEnv:MISSING2}",
3133            "containerEnv",
3134            &HashMap::from([("FOUND".to_string(), "value".to_string())]),
3135        );
3136
3137        assert_eq!(replaced, "before value middle default-value after");
3138    }
3139
3140    #[test]
3141    fn test_replace_environment_variables_supports_defaults_with_colons() {
3142        let replaced = DevContainerManifest::replace_environment_variables(
3143            "before ${containerEnv:MISSING:one:two} after",
3144            "containerEnv",
3145            &HashMap::new(),
3146        );
3147
3148        assert_eq!(replaced, "before one:two after");
3149    }
3150
3151    #[gpui::test]
3152    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
3153        let given_devcontainer_contents = r#"
3154                // These are some external comments. serde_lenient should handle them
3155                {
3156                    // These are some internal comments
3157                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
3158                    "name": "myDevContainer-${devcontainerId}",
3159                    "remoteUser": "root",
3160                    "remoteEnv": {
3161                        "DEVCONTAINER_ID": "${devcontainerId}",
3162                        "MYVAR2": "myvarothervalue",
3163                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3164                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3165                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3166                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
3167
3168                    },
3169                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
3170                    "workspaceFolder": "/workspace/customfolder"
3171                }
3172            "#;
3173
3174        let (_, mut devcontainer_manifest) =
3175            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3176                .await
3177                .unwrap();
3178
3179        devcontainer_manifest.parse_nonremote_vars().unwrap();
3180
3181        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3182            &devcontainer_manifest.config
3183        else {
3184            panic!("Config not parsed");
3185        };
3186
3187        // ${devcontainerId}
3188        let devcontainer_id = devcontainer_manifest.devcontainer_id();
3189        assert_eq!(
3190            variable_replaced_devcontainer.name,
3191            Some(format!("myDevContainer-{devcontainer_id}"))
3192        );
3193        assert_eq!(
3194            variable_replaced_devcontainer
3195                .remote_env
3196                .as_ref()
3197                .and_then(|env| env.get("DEVCONTAINER_ID")),
3198            Some(&devcontainer_id)
3199        );
3200
3201        // ${containerWorkspaceFolderBasename}
3202        assert_eq!(
3203            variable_replaced_devcontainer
3204                .remote_env
3205                .as_ref()
3206                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3207            Some(&"customfolder".to_string())
3208        );
3209
3210        // ${localWorkspaceFolderBasename}
3211        assert_eq!(
3212            variable_replaced_devcontainer
3213                .remote_env
3214                .as_ref()
3215                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3216            Some(&"project".to_string())
3217        );
3218
3219        // ${containerWorkspaceFolder}
3220        assert_eq!(
3221            variable_replaced_devcontainer
3222                .remote_env
3223                .as_ref()
3224                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3225            Some(&"/workspace/customfolder".to_string())
3226        );
3227
3228        // ${localWorkspaceFolder}
3229        assert_eq!(
3230            variable_replaced_devcontainer
3231                .remote_env
3232                .as_ref()
3233                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3234            // We replace backslashes with forward slashes during variable replacement for JSON safety
3235            Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3236        );
3237    }
3238
3239    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3240    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3241    #[cfg(not(target_os = "windows"))]
3242    #[gpui::test]
3243    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3244        cx.executor().allow_parking();
3245        env_logger::try_init().ok();
3246        let given_devcontainer_contents = r#"
3247            /*---------------------------------------------------------------------------------------------
3248             *  Copyright (c) Microsoft Corporation. All rights reserved.
3249             *  Licensed under the MIT License. See License.txt in the project root for license information.
3250             *--------------------------------------------------------------------------------------------*/
3251            {
3252              "name": "cli-${devcontainerId}",
3253              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3254              "build": {
3255                "dockerfile": "Dockerfile",
3256                "args": {
3257                  "VARIANT": "18-bookworm",
3258                  "FOO": "bar",
3259                },
3260              },
3261              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3262              "workspaceFolder": "/workspace2",
3263              "mounts": [
3264                // Keep command history across instances
3265                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3266              ],
3267
3268              "runArgs": [
3269                "--cap-add=SYS_PTRACE",
3270                "--sig-proxy=true",
3271              ],
3272
3273              "forwardPorts": [
3274                8082,
3275                8083,
3276              ],
3277              "appPort": [
3278                8084,
3279                "8085:8086",
3280              ],
3281
3282              "containerEnv": {
3283                "VARIABLE_VALUE": "value",
3284              },
3285
3286              "initializeCommand": "touch IAM.md",
3287
3288              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3289
3290              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3291
3292              "postCreateCommand": {
3293                "yarn": "yarn install",
3294                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3295              },
3296
3297              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3298
3299              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3300
3301              "remoteUser": "node",
3302
3303              "remoteEnv": {
3304                "PATH": "${containerEnv:PATH}:/some/other/path",
3305                "OTHER_ENV": "other_env_value"
3306              },
3307
3308              "features": {
3309                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3310                  "moby": false,
3311                },
3312                "ghcr.io/devcontainers/features/go:1": {},
3313              },
3314
3315              "customizations": {
3316                "vscode": {
3317                  "extensions": [
3318                    "dbaeumer.vscode-eslint",
3319                    "GitHub.vscode-pull-request-github",
3320                  ],
3321                },
3322                "zed": {
3323                  "extensions": ["vue", "ruby"],
3324                },
3325                "codespaces": {
3326                  "repositories": {
3327                    "devcontainers/features": {
3328                      "permissions": {
3329                        "contents": "write",
3330                        "workflows": "write",
3331                      },
3332                    },
3333                  },
3334                },
3335              },
3336            }
3337            "#;
3338
3339        let (test_dependencies, mut devcontainer_manifest) =
3340            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3341                .await
3342                .unwrap();
3343
3344        test_dependencies
3345            .fs
3346            .atomic_write(
3347                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3348                r#"
3349#  Copyright (c) Microsoft Corporation. All rights reserved.
3350#  Licensed under the MIT License. See License.txt in the project root for license information.
3351ARG VARIANT="16-bullseye"
3352FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3353
3354RUN mkdir -p /workspaces && chown node:node /workspaces
3355
3356ARG USERNAME=node
3357USER $USERNAME
3358
3359# Save command line history
3360RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3361&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3362&& mkdir -p /home/$USERNAME/commandhistory \
3363&& touch /home/$USERNAME/commandhistory/.bash_history \
3364&& chown -R $USERNAME /home/$USERNAME/commandhistory
3365                    "#.trim().to_string(),
3366            )
3367            .await
3368            .unwrap();
3369
3370        devcontainer_manifest.parse_nonremote_vars().unwrap();
3371
3372        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3373
3374        assert_eq!(
3375            devcontainer_up.extension_ids,
3376            vec!["vue".to_string(), "ruby".to_string()]
3377        );
3378
3379        let files = test_dependencies.fs.files();
3380        let feature_dockerfile = files
3381            .iter()
3382            .find(|f| {
3383                f.file_name()
3384                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3385            })
3386            .expect("to be found");
3387        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3388        assert_eq!(
3389            &feature_dockerfile,
3390            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3391
3392#  Copyright (c) Microsoft Corporation. All rights reserved.
3393#  Licensed under the MIT License. See License.txt in the project root for license information.
3394ARG VARIANT="16-bullseye"
3395FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3396
3397RUN mkdir -p /workspaces && chown node:node /workspaces
3398
3399ARG USERNAME=node
3400USER $USERNAME
3401
3402# Save command line history
3403RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3404&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3405&& mkdir -p /home/$USERNAME/commandhistory \
3406&& touch /home/$USERNAME/commandhistory/.bash_history \
3407&& chown -R $USERNAME /home/$USERNAME/commandhistory
3408
3409FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3410USER root
3411COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3412RUN chmod -R 0755 /tmp/build-features/
3413
3414FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3415
3416USER root
3417
3418RUN mkdir -p /tmp/dev-container-features
3419COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3420
3421RUN \
3422echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3423echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3424
3425
3426RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3427cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3428&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3429&& cd /tmp/dev-container-features/docker-in-docker_0 \
3430&& chmod +x ./devcontainer-features-install.sh \
3431&& ./devcontainer-features-install.sh \
3432&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3433
3434RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3435cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3436&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3437&& cd /tmp/dev-container-features/go_1 \
3438&& chmod +x ./devcontainer-features-install.sh \
3439&& ./devcontainer-features-install.sh \
3440&& rm -rf /tmp/dev-container-features/go_1
3441
3442
3443ARG _DEV_CONTAINERS_IMAGE_USER=root
3444USER $_DEV_CONTAINERS_IMAGE_USER
3445"#
3446        );
3447
3448        let uid_dockerfile = files
3449            .iter()
3450            .find(|f| {
3451                f.file_name()
3452                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3453            })
3454            .expect("to be found");
3455        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3456
3457        assert_eq!(
3458            &uid_dockerfile,
3459            r#"ARG BASE_IMAGE
3460FROM $BASE_IMAGE
3461
3462USER root
3463
3464ARG REMOTE_USER
3465ARG NEW_UID
3466ARG NEW_GID
3467SHELL ["/bin/sh", "-c"]
3468RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3469	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3470	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3471	if [ -z "$OLD_UID" ]; then \
3472		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3473	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3474		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3475	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3476		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3477	else \
3478		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3479			FREE_GID=65532; \
3480			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3481			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3482			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3483		fi; \
3484		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3485		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3486		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3487			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3488		fi; \
3489		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3490	fi;
3491
3492ARG IMAGE_USER
3493USER $IMAGE_USER
3494
3495# Ensure that /etc/profile does not clobber the existing path
3496RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3497
3498ENV DOCKER_BUILDKIT=1
3499
3500ENV GOPATH=/go
3501ENV GOROOT=/usr/local/go
3502ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3503ENV VARIABLE_VALUE=value
3504"#
3505        );
3506
3507        let golang_install_wrapper = files
3508            .iter()
3509            .find(|f| {
3510                f.file_name()
3511                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3512                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3513            })
3514            .expect("to be found");
3515        let golang_install_wrapper = test_dependencies
3516            .fs
3517            .load(golang_install_wrapper)
3518            .await
3519            .unwrap();
3520        assert_eq!(
3521            &golang_install_wrapper,
3522            r#"#!/bin/sh
3523set -e
3524
3525on_exit () {
3526    [ $? -eq 0 ] && exit
3527    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3528}
3529
3530trap on_exit EXIT
3531
3532echo ===========================================================================
3533echo 'Feature       : go'
3534echo 'Id            : ghcr.io/devcontainers/features/go:1'
3535echo 'Options       :'
3536echo '    GOLANGCILINTVERSION=latest
3537    VERSION=latest'
3538echo ===========================================================================
3539
3540set -a
3541. ../devcontainer-features.builtin.env
3542. ./devcontainer-features.env
3543set +a
3544
3545chmod +x ./install.sh
3546./install.sh
3547"#
3548        );
3549
3550        let docker_commands = test_dependencies
3551            .command_runner
3552            .commands_by_program("docker");
3553
3554        let docker_run_command = docker_commands
3555            .iter()
3556            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3557            .expect("found");
3558
3559        assert_eq!(
3560            docker_run_command.args,
3561            vec![
3562                "run".to_string(),
3563                "--privileged".to_string(),
3564                "--cap-add=SYS_PTRACE".to_string(),
3565                "--sig-proxy=true".to_string(),
3566                "-d".to_string(),
3567                "--mount".to_string(),
3568                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3569                "--mount".to_string(),
3570                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3571                "--mount".to_string(),
3572                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3573                "-l".to_string(),
3574                "devcontainer.local_folder=/path/to/local/project".to_string(),
3575                "-l".to_string(),
3576                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3577                "-l".to_string(),
3578                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3579                "-p".to_string(),
3580                "8082:8082".to_string(),
3581                "-p".to_string(),
3582                "8083:8083".to_string(),
3583                "-p".to_string(),
3584                "8084:8084".to_string(),
3585                "-p".to_string(),
3586                "8085:8086".to_string(),
3587                "--entrypoint".to_string(),
3588                "/bin/sh".to_string(),
3589                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3590                "-c".to_string(),
3591                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3592                "-".to_string()
3593            ]
3594        );
3595
3596        let docker_exec_commands = test_dependencies
3597            .docker
3598            .exec_commands_recorded
3599            .lock()
3600            .unwrap();
3601
3602        assert!(docker_exec_commands.iter().all(|exec| {
3603            exec.env
3604                == HashMap::from([
3605                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3606                    (
3607                        "PATH".to_string(),
3608                        "/initial/path:/some/other/path".to_string(),
3609                    ),
3610                ])
3611        }))
3612    }
3613
3614    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3615    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3616    #[cfg(not(target_os = "windows"))]
3617    #[gpui::test]
3618    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3619        cx.executor().allow_parking();
3620        env_logger::try_init().ok();
3621        let given_devcontainer_contents = r#"
3622            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3623            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3624            {
3625              "features": {
3626                "ghcr.io/devcontainers/features/aws-cli:1": {},
3627                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3628              },
3629              "name": "Rust and PostgreSQL",
3630              "dockerComposeFile": "docker-compose.yml",
3631              "service": "app",
3632              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3633
3634              // Features to add to the dev container. More info: https://containers.dev/features.
3635              // "features": {},
3636
3637              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3638              "forwardPorts": [
3639                8083,
3640                "db:5432",
3641                "db:1234",
3642              ],
3643
3644              // Use 'postCreateCommand' to run commands after the container is created.
3645              // "postCreateCommand": "rustc --version",
3646
3647              // Configure tool-specific properties.
3648              // "customizations": {},
3649
3650              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3651              // "remoteUser": "root"
3652            }
3653            "#;
3654        let (test_dependencies, mut devcontainer_manifest) =
3655            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3656                .await
3657                .unwrap();
3658
3659        test_dependencies
3660            .fs
3661            .atomic_write(
3662                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3663                r#"
3664version: '3.8'
3665
3666volumes:
3667    postgres-data:
3668
3669services:
3670    app:
3671        build:
3672            context: .
3673            dockerfile: Dockerfile
3674        env_file:
3675            # Ensure that the variables in .env match the same variables in devcontainer.json
3676            - .env
3677
3678        volumes:
3679            - ../..:/workspaces:cached
3680
3681        # Overrides default command so things don't shut down after the process ends.
3682        command: sleep infinity
3683
3684        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3685        network_mode: service:db
3686
3687        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3688        # (Adding the "ports" property to this file will not forward from a Codespace.)
3689
3690    db:
3691        image: postgres:14.1
3692        restart: unless-stopped
3693        volumes:
3694            - postgres-data:/var/lib/postgresql/data
3695        env_file:
3696            # Ensure that the variables in .env match the same variables in devcontainer.json
3697            - .env
3698
3699        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3700        # (Adding the "ports" property to this file will not forward from a Codespace.)
3701                    "#.trim().to_string(),
3702            )
3703            .await
3704            .unwrap();
3705
3706        test_dependencies.fs.atomic_write(
3707            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3708            r#"
3709FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3710
3711# Include lld linker to improve build times either by using environment variable
3712# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3713RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3714    && apt-get -y install clang lld \
3715    && apt-get autoremove -y && apt-get clean -y
3716            "#.trim().to_string()).await.unwrap();
3717
3718        devcontainer_manifest.parse_nonremote_vars().unwrap();
3719
3720        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3721
3722        let files = test_dependencies.fs.files();
3723        let feature_dockerfile = files
3724            .iter()
3725            .find(|f| {
3726                f.file_name()
3727                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3728            })
3729            .expect("to be found");
3730        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3731        assert_eq!(
3732            &feature_dockerfile,
3733            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3734
3735FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3736
3737# Include lld linker to improve build times either by using environment variable
3738# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3739RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3740    && apt-get -y install clang lld \
3741    && apt-get autoremove -y && apt-get clean -y
3742
3743FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3744USER root
3745COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3746RUN chmod -R 0755 /tmp/build-features/
3747
3748FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3749
3750USER root
3751
3752RUN mkdir -p /tmp/dev-container-features
3753COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3754
3755RUN \
3756echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3757echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3758
3759
3760RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3761cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3762&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3763&& cd /tmp/dev-container-features/aws-cli_0 \
3764&& chmod +x ./devcontainer-features-install.sh \
3765&& ./devcontainer-features-install.sh \
3766&& rm -rf /tmp/dev-container-features/aws-cli_0
3767
3768RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3769cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3770&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3771&& cd /tmp/dev-container-features/docker-in-docker_1 \
3772&& chmod +x ./devcontainer-features-install.sh \
3773&& ./devcontainer-features-install.sh \
3774&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3775
3776
3777ARG _DEV_CONTAINERS_IMAGE_USER=root
3778USER $_DEV_CONTAINERS_IMAGE_USER
3779"#
3780        );
3781
3782        let uid_dockerfile = files
3783            .iter()
3784            .find(|f| {
3785                f.file_name()
3786                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3787            })
3788            .expect("to be found");
3789        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3790
3791        assert_eq!(
3792            &uid_dockerfile,
3793            r#"ARG BASE_IMAGE
3794FROM $BASE_IMAGE
3795
3796USER root
3797
3798ARG REMOTE_USER
3799ARG NEW_UID
3800ARG NEW_GID
3801SHELL ["/bin/sh", "-c"]
3802RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3803	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3804	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3805	if [ -z "$OLD_UID" ]; then \
3806		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3807	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3808		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3809	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3810		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3811	else \
3812		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3813			FREE_GID=65532; \
3814			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3815			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3816			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3817		fi; \
3818		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3819		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3820		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3821			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3822		fi; \
3823		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3824	fi;
3825
3826ARG IMAGE_USER
3827USER $IMAGE_USER
3828
3829# Ensure that /etc/profile does not clobber the existing path
3830RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3831
3832
3833ENV DOCKER_BUILDKIT=1
3834"#
3835        );
3836
3837        let build_override = files
3838            .iter()
3839            .find(|f| {
3840                f.file_name()
3841                    .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3842            })
3843            .expect("to be found");
3844        let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3845        let build_config: DockerComposeConfig =
3846            serde_json_lenient::from_str(&build_override).unwrap();
3847        let build_context = build_config
3848            .services
3849            .get("app")
3850            .and_then(|s| s.build.as_ref())
3851            .and_then(|b| b.context.clone())
3852            .expect("build override should have a context");
3853        assert_eq!(
3854            build_context, ".",
3855            "build override should preserve the original build context from docker-compose.yml"
3856        );
3857
3858        let runtime_override = files
3859            .iter()
3860            .find(|f| {
3861                f.file_name()
3862                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3863            })
3864            .expect("to be found");
3865        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3866
3867        let expected_runtime_override = DockerComposeConfig {
3868            name: None,
3869            services: HashMap::from([
3870                (
3871                    "app".to_string(),
3872                    DockerComposeService {
3873                        entrypoint: Some(vec![
3874                            "/bin/sh".to_string(),
3875                            "-c".to_string(),
3876                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3877                            "-".to_string(),
3878                        ]),
3879                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3880                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3881                        privileged: Some(true),
3882                        labels: Some(HashMap::from([
3883                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3884                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3885                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3886                        ])),
3887                        volumes: vec![
3888                            MountDefinition {
3889                                source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3890                                target: "/var/lib/docker".to_string(),
3891                                mount_type: Some("volume".to_string())
3892                            }
3893                        ],
3894                        ..Default::default()
3895                    },
3896                ),
3897                (
3898                    "db".to_string(),
3899                    DockerComposeService {
3900                        ports: vec![
3901                            DockerComposeServicePort {
3902                                target: "8083".to_string(),
3903                                published: "8083".to_string(),
3904                                ..Default::default()
3905                            },
3906                            DockerComposeServicePort {
3907                                target: "5432".to_string(),
3908                                published: "5432".to_string(),
3909                                ..Default::default()
3910                            },
3911                            DockerComposeServicePort {
3912                                target: "1234".to_string(),
3913                                published: "1234".to_string(),
3914                                ..Default::default()
3915                            },
3916                        ],
3917                        ..Default::default()
3918                    },
3919                ),
3920            ]),
3921            volumes: HashMap::from([(
3922                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3923                DockerComposeVolume {
3924                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3925                },
3926            )]),
3927        };
3928
3929        assert_eq!(
3930            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3931            expected_runtime_override
3932        )
3933    }
3934
3935    #[test]
3936    fn test_resolve_compose_dockerfile() {
3937        let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3938
3939        // Bug case (#53473): context ".." with relative dockerfile
3940        assert_eq!(
3941            resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3942            Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3943        );
3944
3945        // Compose path containing ".." (as docker_compose_manifest() produces)
3946        assert_eq!(
3947            resolve_compose_dockerfile(
3948                Path::new("/project/.devcontainer/../docker-compose.yml"),
3949                Some("."),
3950                "docker/Dockerfile",
3951            ),
3952            Some(PathBuf::from("/project/docker/Dockerfile")),
3953        );
3954
3955        // Absolute dockerfile returned as-is
3956        assert_eq!(
3957            resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3958            Some(PathBuf::from("/absolute/Dockerfile")),
3959        );
3960
3961        // Absolute context used directly
3962        assert_eq!(
3963            resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3964            Some(PathBuf::from("/abs/context/Dockerfile")),
3965        );
3966
3967        // No context defaults to compose file's directory
3968        assert_eq!(
3969            resolve_compose_dockerfile(compose, None, "Dockerfile"),
3970            Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3971        );
3972    }
3973
3974    #[gpui::test]
3975    async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
3976        cx.executor().allow_parking();
3977        env_logger::try_init().ok();
3978
3979        let given_devcontainer_contents = r#"
3980            {
3981              "name": "Test",
3982              "dockerComposeFile": "docker-compose-context-parent.yml",
3983              "service": "app",
3984              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
3985            }
3986            "#;
3987        let (_, mut devcontainer_manifest) =
3988            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3989                .await
3990                .unwrap();
3991
3992        devcontainer_manifest.parse_nonremote_vars().unwrap();
3993
3994        let expected = PathBuf::from(TEST_PROJECT_PATH)
3995            .join(".devcontainer")
3996            .join("Dockerfile");
3997        assert_eq!(
3998            devcontainer_manifest.dockerfile_location().await,
3999            Some(expected)
4000        );
4001    }
4002
4003    #[gpui::test]
4004    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
4005        cx: &mut TestAppContext,
4006    ) {
4007        cx.executor().allow_parking();
4008        env_logger::try_init().ok();
4009        let given_devcontainer_contents = r#"
4010        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4011        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4012        {
4013          "features": {
4014            "ghcr.io/devcontainers/features/aws-cli:1": {},
4015            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4016          },
4017          "name": "Rust and PostgreSQL",
4018          "dockerComposeFile": "docker-compose.yml",
4019          "service": "app",
4020          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4021
4022          // Features to add to the dev container. More info: https://containers.dev/features.
4023          // "features": {},
4024
4025          // Use 'forwardPorts' to make a list of ports inside the container available locally.
4026          "forwardPorts": [
4027            8083,
4028            "db:5432",
4029            "db:1234",
4030          ],
4031          "updateRemoteUserUID": false,
4032          "appPort": "8084",
4033
4034          // Use 'postCreateCommand' to run commands after the container is created.
4035          // "postCreateCommand": "rustc --version",
4036
4037          // Configure tool-specific properties.
4038          // "customizations": {},
4039
4040          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4041          // "remoteUser": "root"
4042        }
4043        "#;
4044        let (test_dependencies, mut devcontainer_manifest) =
4045            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4046                .await
4047                .unwrap();
4048
4049        test_dependencies
4050        .fs
4051        .atomic_write(
4052            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4053            r#"
4054version: '3.8'
4055
4056volumes:
4057postgres-data:
4058
4059services:
4060app:
4061    build:
4062        context: .
4063        dockerfile: Dockerfile
4064    env_file:
4065        # Ensure that the variables in .env match the same variables in devcontainer.json
4066        - .env
4067
4068    volumes:
4069        - ../..:/workspaces:cached
4070
4071    # Overrides default command so things don't shut down after the process ends.
4072    command: sleep infinity
4073
4074    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4075    network_mode: service:db
4076
4077    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4078    # (Adding the "ports" property to this file will not forward from a Codespace.)
4079
4080db:
4081    image: postgres:14.1
4082    restart: unless-stopped
4083    volumes:
4084        - postgres-data:/var/lib/postgresql/data
4085    env_file:
4086        # Ensure that the variables in .env match the same variables in devcontainer.json
4087        - .env
4088
4089    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4090    # (Adding the "ports" property to this file will not forward from a Codespace.)
4091                "#.trim().to_string(),
4092        )
4093        .await
4094        .unwrap();
4095
4096        test_dependencies.fs.atomic_write(
4097        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4098        r#"
4099FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4100
4101# Include lld linker to improve build times either by using environment variable
4102# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4103RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4104&& apt-get -y install clang lld \
4105&& apt-get autoremove -y && apt-get clean -y
4106        "#.trim().to_string()).await.unwrap();
4107
4108        devcontainer_manifest.parse_nonremote_vars().unwrap();
4109
4110        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4111
4112        let files = test_dependencies.fs.files();
4113        let feature_dockerfile = files
4114            .iter()
4115            .find(|f| {
4116                f.file_name()
4117                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4118            })
4119            .expect("to be found");
4120        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4121        assert_eq!(
4122            &feature_dockerfile,
4123            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4124
4125FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4126
4127# Include lld linker to improve build times either by using environment variable
4128# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4129RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4130&& apt-get -y install clang lld \
4131&& apt-get autoremove -y && apt-get clean -y
4132
4133FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4134USER root
4135COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4136RUN chmod -R 0755 /tmp/build-features/
4137
4138FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4139
4140USER root
4141
4142RUN mkdir -p /tmp/dev-container-features
4143COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4144
4145RUN \
4146echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4147echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4148
4149
4150RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
4151cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
4152&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4153&& cd /tmp/dev-container-features/aws-cli_0 \
4154&& chmod +x ./devcontainer-features-install.sh \
4155&& ./devcontainer-features-install.sh \
4156&& rm -rf /tmp/dev-container-features/aws-cli_0
4157
4158RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
4159cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
4160&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4161&& cd /tmp/dev-container-features/docker-in-docker_1 \
4162&& chmod +x ./devcontainer-features-install.sh \
4163&& ./devcontainer-features-install.sh \
4164&& rm -rf /tmp/dev-container-features/docker-in-docker_1
4165
4166
4167ARG _DEV_CONTAINERS_IMAGE_USER=root
4168USER $_DEV_CONTAINERS_IMAGE_USER
4169
4170# Ensure that /etc/profile does not clobber the existing path
4171RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4172
4173
4174ENV DOCKER_BUILDKIT=1
4175"#
4176        );
4177    }
4178
4179    #[cfg(not(target_os = "windows"))]
4180    #[gpui::test]
4181    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
4182        cx.executor().allow_parking();
4183        env_logger::try_init().ok();
4184        let given_devcontainer_contents = r#"
4185        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4186        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4187        {
4188          "features": {
4189            "ghcr.io/devcontainers/features/aws-cli:1": {},
4190            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4191          },
4192          "name": "Rust and PostgreSQL",
4193          "dockerComposeFile": "docker-compose.yml",
4194          "service": "app",
4195          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4196
4197          // Features to add to the dev container. More info: https://containers.dev/features.
4198          // "features": {},
4199
4200          // Use 'forwardPorts' to make a list of ports inside the container available locally.
4201          // "forwardPorts": [5432],
4202
4203          // Use 'postCreateCommand' to run commands after the container is created.
4204          // "postCreateCommand": "rustc --version",
4205
4206          // Configure tool-specific properties.
4207          // "customizations": {},
4208
4209          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4210          // "remoteUser": "root"
4211        }
4212        "#;
4213        let mut fake_docker = FakeDocker::new();
4214        fake_docker.set_podman(true);
4215        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4216            cx,
4217            FakeFs::new(cx.executor()),
4218            fake_http_client(),
4219            Arc::new(fake_docker),
4220            Arc::new(TestCommandRunner::new()),
4221            HashMap::new(),
4222            given_devcontainer_contents,
4223        )
4224        .await
4225        .unwrap();
4226
4227        test_dependencies
4228        .fs
4229        .atomic_write(
4230            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4231            r#"
4232version: '3.8'
4233
4234volumes:
4235postgres-data:
4236
4237services:
4238app:
4239build:
4240    context: .
4241    dockerfile: Dockerfile
4242env_file:
4243    # Ensure that the variables in .env match the same variables in devcontainer.json
4244    - .env
4245
4246volumes:
4247    - ../..:/workspaces:cached
4248
4249# Overrides default command so things don't shut down after the process ends.
4250command: sleep infinity
4251
4252# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4253network_mode: service:db
4254
4255# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4256# (Adding the "ports" property to this file will not forward from a Codespace.)
4257
4258db:
4259image: postgres:14.1
4260restart: unless-stopped
4261volumes:
4262    - postgres-data:/var/lib/postgresql/data
4263env_file:
4264    # Ensure that the variables in .env match the same variables in devcontainer.json
4265    - .env
4266
4267# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4268# (Adding the "ports" property to this file will not forward from a Codespace.)
4269                "#.trim().to_string(),
4270        )
4271        .await
4272        .unwrap();
4273
4274        test_dependencies.fs.atomic_write(
4275        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4276        r#"
4277FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4278
4279# Include lld linker to improve build times either by using environment variable
4280# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4281RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4282&& apt-get -y install clang lld \
4283&& apt-get autoremove -y && apt-get clean -y
4284        "#.trim().to_string()).await.unwrap();
4285
4286        devcontainer_manifest.parse_nonremote_vars().unwrap();
4287
4288        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4289
4290        let files = test_dependencies.fs.files();
4291
4292        let feature_dockerfile = files
4293            .iter()
4294            .find(|f| {
4295                f.file_name()
4296                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4297            })
4298            .expect("to be found");
4299        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4300        assert_eq!(
4301            &feature_dockerfile,
4302            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4303
4304FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4305
4306# Include lld linker to improve build times either by using environment variable
4307# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4308RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4309&& apt-get -y install clang lld \
4310&& apt-get autoremove -y && apt-get clean -y
4311
4312FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4313
4314FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4315USER root
4316COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4317RUN chmod -R 0755 /tmp/build-features/
4318
4319FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4320
4321USER root
4322
4323RUN mkdir -p /tmp/dev-container-features
4324COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4325
4326RUN \
4327echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4328echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4329
4330
4331COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4332RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4333&& cd /tmp/dev-container-features/aws-cli_0 \
4334&& chmod +x ./devcontainer-features-install.sh \
4335&& ./devcontainer-features-install.sh
4336
4337COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4338RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4339&& cd /tmp/dev-container-features/docker-in-docker_1 \
4340&& chmod +x ./devcontainer-features-install.sh \
4341&& ./devcontainer-features-install.sh
4342
4343
4344ARG _DEV_CONTAINERS_IMAGE_USER=root
4345USER $_DEV_CONTAINERS_IMAGE_USER
4346"#
4347        );
4348
4349        let uid_dockerfile = files
4350            .iter()
4351            .find(|f| {
4352                f.file_name()
4353                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4354            })
4355            .expect("to be found");
4356        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4357
4358        assert_eq!(
4359            &uid_dockerfile,
4360            r#"ARG BASE_IMAGE
4361FROM $BASE_IMAGE
4362
4363USER root
4364
4365ARG REMOTE_USER
4366ARG NEW_UID
4367ARG NEW_GID
4368SHELL ["/bin/sh", "-c"]
4369RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4370	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4371	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4372	if [ -z "$OLD_UID" ]; then \
4373		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4374	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4375		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4376	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4377		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4378	else \
4379		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4380			FREE_GID=65532; \
4381			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4382			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4383			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4384		fi; \
4385		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4386		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4387		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4388			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4389		fi; \
4390		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4391	fi;
4392
4393ARG IMAGE_USER
4394USER $IMAGE_USER
4395
4396# Ensure that /etc/profile does not clobber the existing path
4397RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4398
4399
4400ENV DOCKER_BUILDKIT=1
4401"#
4402        );
4403    }
4404
4405    #[gpui::test]
4406    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4407        cx.executor().allow_parking();
4408        env_logger::try_init().ok();
4409        let given_devcontainer_contents = r#"
4410            /*---------------------------------------------------------------------------------------------
4411             *  Copyright (c) Microsoft Corporation. All rights reserved.
4412             *  Licensed under the MIT License. See License.txt in the project root for license information.
4413             *--------------------------------------------------------------------------------------------*/
4414            {
4415              "name": "cli-${devcontainerId}",
4416              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4417              "build": {
4418                "dockerfile": "Dockerfile",
4419                "args": {
4420                  "VARIANT": "18-bookworm",
4421                  "FOO": "bar",
4422                },
4423                "target": "development",
4424              },
4425              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4426              "workspaceFolder": "/workspace2",
4427              "mounts": [
4428                // Keep command history across instances
4429                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4430              ],
4431
4432              "forwardPorts": [
4433                8082,
4434                8083,
4435              ],
4436              "appPort": "8084",
4437              "updateRemoteUserUID": false,
4438
4439              "containerEnv": {
4440                "VARIABLE_VALUE": "value",
4441              },
4442
4443              "initializeCommand": "touch IAM.md",
4444
4445              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4446
4447              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4448
4449              "postCreateCommand": {
4450                "yarn": "yarn install",
4451                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4452              },
4453
4454              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4455
4456              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4457
4458              "remoteUser": "node",
4459
4460              "remoteEnv": {
4461                "PATH": "${containerEnv:PATH}:/some/other/path",
4462                "OTHER_ENV": "other_env_value"
4463              },
4464
4465              "features": {
4466                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4467                  "moby": false,
4468                },
4469                "ghcr.io/devcontainers/features/go:1": {},
4470              },
4471
4472              "customizations": {
4473                "vscode": {
4474                  "extensions": [
4475                    "dbaeumer.vscode-eslint",
4476                    "GitHub.vscode-pull-request-github",
4477                  ],
4478                },
4479                "zed": {
4480                  "extensions": ["vue", "ruby"],
4481                },
4482                "codespaces": {
4483                  "repositories": {
4484                    "devcontainers/features": {
4485                      "permissions": {
4486                        "contents": "write",
4487                        "workflows": "write",
4488                      },
4489                    },
4490                  },
4491                },
4492              },
4493            }
4494            "#;
4495
4496        let (test_dependencies, mut devcontainer_manifest) =
4497            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4498                .await
4499                .unwrap();
4500
4501        test_dependencies
4502            .fs
4503            .atomic_write(
4504                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4505                r#"
4506#  Copyright (c) Microsoft Corporation. All rights reserved.
4507#  Licensed under the MIT License. See License.txt in the project root for license information.
4508ARG VARIANT="16-bullseye"
4509FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4510FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4511
4512RUN mkdir -p /workspaces && chown node:node /workspaces
4513
4514ARG USERNAME=node
4515USER $USERNAME
4516
4517# Save command line history
4518RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4519&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4520&& mkdir -p /home/$USERNAME/commandhistory \
4521&& touch /home/$USERNAME/commandhistory/.bash_history \
4522&& chown -R $USERNAME /home/$USERNAME/commandhistory
4523                    "#.trim().to_string(),
4524            )
4525            .await
4526            .unwrap();
4527
4528        devcontainer_manifest.parse_nonremote_vars().unwrap();
4529
4530        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4531
4532        assert_eq!(
4533            devcontainer_up.extension_ids,
4534            vec!["vue".to_string(), "ruby".to_string()]
4535        );
4536
4537        let files = test_dependencies.fs.files();
4538        let feature_dockerfile = files
4539            .iter()
4540            .find(|f| {
4541                f.file_name()
4542                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4543            })
4544            .expect("to be found");
4545        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4546        assert_eq!(
4547            &feature_dockerfile,
4548            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4549
4550#  Copyright (c) Microsoft Corporation. All rights reserved.
4551#  Licensed under the MIT License. See License.txt in the project root for license information.
4552ARG VARIANT="16-bullseye"
4553FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4554FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4555
4556RUN mkdir -p /workspaces && chown node:node /workspaces
4557
4558ARG USERNAME=node
4559USER $USERNAME
4560
4561# Save command line history
4562RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4563&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4564&& mkdir -p /home/$USERNAME/commandhistory \
4565&& touch /home/$USERNAME/commandhistory/.bash_history \
4566&& chown -R $USERNAME /home/$USERNAME/commandhistory
4567FROM development AS dev_container_auto_added_stage_label
4568
4569FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4570USER root
4571COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4572RUN chmod -R 0755 /tmp/build-features/
4573
4574FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4575
4576USER root
4577
4578RUN mkdir -p /tmp/dev-container-features
4579COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4580
4581RUN \
4582echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4583echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4584
4585
4586RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4587cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4588&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4589&& cd /tmp/dev-container-features/docker-in-docker_0 \
4590&& chmod +x ./devcontainer-features-install.sh \
4591&& ./devcontainer-features-install.sh \
4592&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4593
4594RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4595cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4596&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4597&& cd /tmp/dev-container-features/go_1 \
4598&& chmod +x ./devcontainer-features-install.sh \
4599&& ./devcontainer-features-install.sh \
4600&& rm -rf /tmp/dev-container-features/go_1
4601
4602
4603ARG _DEV_CONTAINERS_IMAGE_USER=root
4604USER $_DEV_CONTAINERS_IMAGE_USER
4605
4606# Ensure that /etc/profile does not clobber the existing path
4607RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4608
4609ENV DOCKER_BUILDKIT=1
4610
4611ENV GOPATH=/go
4612ENV GOROOT=/usr/local/go
4613ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4614ENV VARIABLE_VALUE=value
4615"#
4616        );
4617
4618        let golang_install_wrapper = files
4619            .iter()
4620            .find(|f| {
4621                f.file_name()
4622                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4623                    && f.to_str().is_some_and(|s| s.contains("go_"))
4624            })
4625            .expect("to be found");
4626        let golang_install_wrapper = test_dependencies
4627            .fs
4628            .load(golang_install_wrapper)
4629            .await
4630            .unwrap();
4631        assert_eq!(
4632            &golang_install_wrapper,
4633            r#"#!/bin/sh
4634set -e
4635
4636on_exit () {
4637    [ $? -eq 0 ] && exit
4638    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4639}
4640
4641trap on_exit EXIT
4642
4643echo ===========================================================================
4644echo 'Feature       : go'
4645echo 'Id            : ghcr.io/devcontainers/features/go:1'
4646echo 'Options       :'
4647echo '    GOLANGCILINTVERSION=latest
4648    VERSION=latest'
4649echo ===========================================================================
4650
4651set -a
4652. ../devcontainer-features.builtin.env
4653. ./devcontainer-features.env
4654set +a
4655
4656chmod +x ./install.sh
4657./install.sh
4658"#
4659        );
4660
4661        let docker_commands = test_dependencies
4662            .command_runner
4663            .commands_by_program("docker");
4664
4665        let docker_run_command = docker_commands
4666            .iter()
4667            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4668
4669        assert!(docker_run_command.is_some());
4670
4671        let docker_exec_commands = test_dependencies
4672            .docker
4673            .exec_commands_recorded
4674            .lock()
4675            .unwrap();
4676
4677        assert!(docker_exec_commands.iter().all(|exec| {
4678            exec.env
4679                == HashMap::from([
4680                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4681                    (
4682                        "PATH".to_string(),
4683                        "/initial/path:/some/other/path".to_string(),
4684                    ),
4685                ])
4686        }))
4687    }
4688
4689    #[cfg(not(target_os = "windows"))]
4690    #[gpui::test]
4691    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4692        cx.executor().allow_parking();
4693        env_logger::try_init().ok();
4694        let given_devcontainer_contents = r#"
4695            {
4696              "name": "cli-${devcontainerId}",
4697              "image": "test_image:latest",
4698            }
4699            "#;
4700
4701        let (test_dependencies, mut devcontainer_manifest) =
4702            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4703                .await
4704                .unwrap();
4705
4706        devcontainer_manifest.parse_nonremote_vars().unwrap();
4707
4708        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4709
4710        let files = test_dependencies.fs.files();
4711        let uid_dockerfile = files
4712            .iter()
4713            .find(|f| {
4714                f.file_name()
4715                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4716            })
4717            .expect("to be found");
4718        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4719
4720        assert_eq!(
4721            &uid_dockerfile,
4722            r#"ARG BASE_IMAGE
4723FROM $BASE_IMAGE
4724
4725USER root
4726
4727ARG REMOTE_USER
4728ARG NEW_UID
4729ARG NEW_GID
4730SHELL ["/bin/sh", "-c"]
4731RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4732	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4733	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4734	if [ -z "$OLD_UID" ]; then \
4735		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4736	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4737		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4738	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4739		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4740	else \
4741		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4742			FREE_GID=65532; \
4743			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4744			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4745			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4746		fi; \
4747		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4748		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4749		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4750			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4751		fi; \
4752		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4753	fi;
4754
4755ARG IMAGE_USER
4756USER $IMAGE_USER
4757
4758# Ensure that /etc/profile does not clobber the existing path
4759RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4760"#
4761        );
4762    }
4763
4764    #[cfg(target_os = "windows")]
4765    #[gpui::test]
4766    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4767        cx.executor().allow_parking();
4768        env_logger::try_init().ok();
4769        let given_devcontainer_contents = r#"
4770            {
4771              "name": "cli-${devcontainerId}",
4772              "image": "test_image:latest",
4773            }
4774            "#;
4775
4776        let (_, mut devcontainer_manifest) =
4777            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4778                .await
4779                .unwrap();
4780
4781        devcontainer_manifest.parse_nonremote_vars().unwrap();
4782
4783        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4784
4785        assert_eq!(
4786            devcontainer_up.remote_workspace_folder,
4787            "/workspaces/project"
4788        );
4789    }
4790
4791    #[cfg(not(target_os = "windows"))]
4792    #[gpui::test]
4793    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4794        cx.executor().allow_parking();
4795        env_logger::try_init().ok();
4796        let given_devcontainer_contents = r#"
4797            {
4798              "name": "cli-${devcontainerId}",
4799              "dockerComposeFile": "docker-compose-plain.yml",
4800              "service": "app",
4801            }
4802            "#;
4803
4804        let (test_dependencies, mut devcontainer_manifest) =
4805            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4806                .await
4807                .unwrap();
4808
4809        test_dependencies
4810            .fs
4811            .atomic_write(
4812                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4813                r#"
4814services:
4815    app:
4816        image: test_image:latest
4817        command: sleep infinity
4818        volumes:
4819            - ..:/workspace:cached
4820                "#
4821                .trim()
4822                .to_string(),
4823            )
4824            .await
4825            .unwrap();
4826
4827        devcontainer_manifest.parse_nonremote_vars().unwrap();
4828
4829        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4830
4831        let files = test_dependencies.fs.files();
4832        let uid_dockerfile = files
4833            .iter()
4834            .find(|f| {
4835                f.file_name()
4836                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4837            })
4838            .expect("to be found");
4839        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4840
4841        assert_eq!(
4842            &uid_dockerfile,
4843            r#"ARG BASE_IMAGE
4844FROM $BASE_IMAGE
4845
4846USER root
4847
4848ARG REMOTE_USER
4849ARG NEW_UID
4850ARG NEW_GID
4851SHELL ["/bin/sh", "-c"]
4852RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4853	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4854	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4855	if [ -z "$OLD_UID" ]; then \
4856		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4857	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4858		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4859	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4860		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4861	else \
4862		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4863			FREE_GID=65532; \
4864			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4865			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4866			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4867		fi; \
4868		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4869		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4870		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4871			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4872		fi; \
4873		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4874	fi;
4875
4876ARG IMAGE_USER
4877USER $IMAGE_USER
4878
4879# Ensure that /etc/profile does not clobber the existing path
4880RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4881"#
4882        );
4883    }
4884
4885    #[gpui::test]
4886    async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4887        cx.executor().allow_parking();
4888        env_logger::try_init().ok();
4889        let given_devcontainer_contents = r#"
4890            {
4891              "name": "cli-${devcontainerId}",
4892              "build": {
4893                "dockerfile": "Dockerfile",
4894                "args": {
4895                    "VERSION": "1.22",
4896                }
4897              },
4898            }
4899            "#;
4900
4901        let (test_dependencies, mut devcontainer_manifest) =
4902            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4903                .await
4904                .unwrap();
4905
4906        test_dependencies
4907            .fs
4908            .atomic_write(
4909                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4910                r#"
4911FROM dontgrabme as build_context
4912ARG VERSION=1.21
4913ARG REPOSITORY=mybuild
4914ARG REGISTRY=docker.io/stuff
4915
4916ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4917
4918FROM ${IMAGE} AS devcontainer
4919                    "#
4920                .trim()
4921                .to_string(),
4922            )
4923            .await
4924            .unwrap();
4925
4926        devcontainer_manifest.parse_nonremote_vars().unwrap();
4927
4928        let dockerfile_contents = devcontainer_manifest
4929            .expanded_dockerfile_content()
4930            .await
4931            .unwrap();
4932        let base_image = image_from_dockerfile(
4933            dockerfile_contents,
4934            &devcontainer_manifest
4935                .dev_container()
4936                .build
4937                .as_ref()
4938                .and_then(|b| b.target.clone()),
4939        )
4940        .unwrap();
4941
4942        assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4943    }
4944
4945    #[gpui::test]
4946    async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4947        cx.executor().allow_parking();
4948        env_logger::try_init().ok();
4949        let given_devcontainer_contents = r#"
4950            {
4951              "name": "cli-${devcontainerId}",
4952              "build": {
4953                "dockerfile": "Dockerfile",
4954                "args": {
4955                    "VERSION": "1.22",
4956                },
4957                "target": "development"
4958              },
4959            }
4960            "#;
4961
4962        let (test_dependencies, mut devcontainer_manifest) =
4963            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4964                .await
4965                .unwrap();
4966
4967        test_dependencies
4968            .fs
4969            .atomic_write(
4970                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4971                r#"
4972FROM dontgrabme as build_context
4973ARG VERSION=1.21
4974ARG REPOSITORY=mybuild
4975ARG REGISTRY=docker.io/stuff
4976
4977ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4978ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4979
4980FROM ${DEV_IMAGE} AS development
4981FROM ${IMAGE} AS production
4982                    "#
4983                .trim()
4984                .to_string(),
4985            )
4986            .await
4987            .unwrap();
4988
4989        devcontainer_manifest.parse_nonremote_vars().unwrap();
4990
4991        let dockerfile_contents = devcontainer_manifest
4992            .expanded_dockerfile_content()
4993            .await
4994            .unwrap();
4995        let base_image = image_from_dockerfile(
4996            dockerfile_contents,
4997            &devcontainer_manifest
4998                .dev_container()
4999                .build
5000                .as_ref()
5001                .and_then(|b| b.target.clone()),
5002        )
5003        .unwrap();
5004
5005        assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
5006    }
5007
5008    #[gpui::test]
5009    async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
5010        cx.executor().allow_parking();
5011        env_logger::try_init().ok();
5012        let given_devcontainer_contents = r#"
5013            {
5014              "name": "cli-${devcontainerId}",
5015              "build": {
5016                "dockerfile": "Dockerfile",
5017                "args": {
5018                    "JSON_ARG": "some-value",
5019                    "ELIXIR_VERSION": "1.21",
5020                }
5021              },
5022            }
5023            "#;
5024
5025        let (test_dependencies, mut devcontainer_manifest) =
5026            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5027                .await
5028                .unwrap();
5029
5030        test_dependencies
5031            .fs
5032            .atomic_write(
5033                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5034                r#"
5035ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5036ARG ELIXIR_VERSION=1.20.0-rc.4
5037ARG FOO=foo BAR=bar
5038ARG FOOBAR=${FOO}${BAR}
5039ARG OTP_VERSION=28.4.1
5040ARG DEBIAN_VERSION=trixie-20260316-slim
5041ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
5042ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5043ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
5044ARG FROM_JSON=${JSON_ARG}
5045
5046FROM ${IMAGE} AS devcontainer
5047                    "#
5048                .trim()
5049                .to_string(),
5050            )
5051            .await
5052            .unwrap();
5053
5054        devcontainer_manifest.parse_nonremote_vars().unwrap();
5055
5056        let expanded_dockerfile = devcontainer_manifest
5057            .expanded_dockerfile_content()
5058            .await
5059            .unwrap();
5060
5061        assert_eq!(
5062            &expanded_dockerfile,
5063            r#"
5064ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5065ARG ELIXIR_VERSION=1.20.0-rc.4
5066ARG FOO=foo BAR=bar
5067ARG FOOBAR=foobar
5068ARG OTP_VERSION=28.4.1
5069ARG DEBIAN_VERSION=trixie-20260316-slim
5070ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
5071ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5072ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
5073ARG FROM_JSON=some-value
5074
5075FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
5076            "#
5077            .trim()
5078        )
5079    }
5080
5081    #[gpui::test]
5082    async fn test_expands_compose_service_args_in_dockerfile(cx: &mut TestAppContext) {
5083        cx.executor().allow_parking();
5084        env_logger::try_init().ok();
5085
5086        let given_devcontainer_contents = r#"
5087            {
5088              "dockerComposeFile": "docker-compose-with-args.yml",
5089              "service": "app",
5090            }
5091            "#;
5092
5093        let (test_dependencies, mut devcontainer_manifest) =
5094            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5095                .await
5096                .unwrap();
5097
5098        test_dependencies
5099            .fs
5100            .atomic_write(
5101                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5102                "FROM ${BASE_IMAGE}\nUSER root\n".to_string(),
5103            )
5104            .await
5105            .unwrap();
5106
5107        devcontainer_manifest.parse_nonremote_vars().unwrap();
5108
5109        let expanded = devcontainer_manifest
5110            .expanded_dockerfile_content()
5111            .await
5112            .unwrap();
5113
5114        assert_eq!(expanded, "FROM test_image:latest\nUSER root");
5115
5116        let base_image =
5117            image_from_dockerfile(expanded, &None).expect("base image resolves from compose args");
5118        assert_eq!(base_image, "test_image:latest");
5119    }
5120
5121    #[test]
5122    fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
5123
5124    #[test]
5125    fn test_aliases_dockerfile_with_no_aliases_for_build() {}
5126
5127    #[test]
5128    fn test_aliases_dockerfile_with_build_target_specified() {}
5129
5130    pub(crate) struct RecordedExecCommand {
5131        pub(crate) _container_id: String,
5132        pub(crate) _remote_folder: String,
5133        pub(crate) _user: String,
5134        pub(crate) env: HashMap<String, String>,
5135        pub(crate) _inner_command: Command,
5136    }
5137
5138    pub(crate) struct FakeDocker {
5139        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
5140        podman: bool,
5141        has_buildx: bool,
5142    }
5143
5144    impl FakeDocker {
5145        pub(crate) fn new() -> Self {
5146            Self {
5147                podman: false,
5148                has_buildx: true,
5149                exec_commands_recorded: Mutex::new(Vec::new()),
5150            }
5151        }
5152        #[cfg(not(target_os = "windows"))]
5153        fn set_podman(&mut self, podman: bool) {
5154            self.podman = podman;
5155        }
5156    }
5157
5158    #[async_trait]
5159    impl DockerClient for FakeDocker {
5160        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
5161            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
5162                return Ok(DockerInspect {
5163                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5164                        .to_string(),
5165                    config: DockerInspectConfig {
5166                        labels: DockerConfigLabels {
5167                            metadata: Some(vec![HashMap::from([(
5168                                "remoteUser".to_string(),
5169                                Value::String("node".to_string()),
5170                            )])]),
5171                        },
5172                        env: Vec::new(),
5173                        image_user: Some("root".to_string()),
5174                    },
5175                    mounts: None,
5176                    state: None,
5177                });
5178            }
5179            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
5180                return Ok(DockerInspect {
5181                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
5182                        .to_string(),
5183                    config: DockerInspectConfig {
5184                        labels: DockerConfigLabels {
5185                            metadata: Some(vec![HashMap::from([(
5186                                "remoteUser".to_string(),
5187                                Value::String("vscode".to_string()),
5188                            )])]),
5189                        },
5190                        image_user: Some("root".to_string()),
5191                        env: Vec::new(),
5192                    },
5193                    mounts: None,
5194                    state: None,
5195                });
5196            }
5197            if id.starts_with("cli_") {
5198                return Ok(DockerInspect {
5199                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5200                        .to_string(),
5201                    config: DockerInspectConfig {
5202                        labels: DockerConfigLabels {
5203                            metadata: Some(vec![HashMap::from([(
5204                                "remoteUser".to_string(),
5205                                Value::String("node".to_string()),
5206                            )])]),
5207                        },
5208                        image_user: Some("root".to_string()),
5209                        env: vec!["PATH=/initial/path".to_string()],
5210                    },
5211                    mounts: None,
5212                    state: None,
5213                });
5214            }
5215            if id == "found_docker_ps" {
5216                return Ok(DockerInspect {
5217                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5218                        .to_string(),
5219                    config: DockerInspectConfig {
5220                        labels: DockerConfigLabels {
5221                            metadata: Some(vec![HashMap::from([(
5222                                "remoteUser".to_string(),
5223                                Value::String("node".to_string()),
5224                            )])]),
5225                        },
5226                        image_user: Some("root".to_string()),
5227                        env: vec!["PATH=/initial/path".to_string()],
5228                    },
5229                    mounts: Some(vec![DockerInspectMount {
5230                        source: "/path/to/local/project".to_string(),
5231                        destination: "/workspaces/project".to_string(),
5232                    }]),
5233                    state: None,
5234                });
5235            }
5236            if id.starts_with("rust_a-") {
5237                return Ok(DockerInspect {
5238                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
5239                        .to_string(),
5240                    config: DockerInspectConfig {
5241                        labels: DockerConfigLabels {
5242                            metadata: Some(vec![HashMap::from([(
5243                                "remoteUser".to_string(),
5244                                Value::String("vscode".to_string()),
5245                            )])]),
5246                        },
5247                        image_user: Some("root".to_string()),
5248                        env: Vec::new(),
5249                    },
5250                    mounts: None,
5251                    state: None,
5252                });
5253            }
5254            if id == "test_image:latest" {
5255                return Ok(DockerInspect {
5256                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5257                        .to_string(),
5258                    config: DockerInspectConfig {
5259                        labels: DockerConfigLabels {
5260                            metadata: Some(vec![HashMap::from([(
5261                                "remoteUser".to_string(),
5262                                Value::String("node".to_string()),
5263                            )])]),
5264                        },
5265                        env: Vec::new(),
5266                        image_user: Some("root".to_string()),
5267                    },
5268                    mounts: None,
5269                    state: None,
5270                });
5271            }
5272
5273            Err(DevContainerError::DockerNotAvailable)
5274        }
5275        async fn get_docker_compose_config(
5276            &self,
5277            config_files: &Vec<PathBuf>,
5278        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5279            let project_path = PathBuf::from(TEST_PROJECT_PATH);
5280            if config_files.len() == 1
5281                && config_files.get(0)
5282                    == Some(
5283                        &project_path
5284                            .join(".devcontainer")
5285                            .join("docker-compose.yml"),
5286                    )
5287            {
5288                return Ok(Some(DockerComposeConfig {
5289                    name: None,
5290                    services: HashMap::from([
5291                        (
5292                            "app".to_string(),
5293                            DockerComposeService {
5294                                build: Some(DockerComposeServiceBuild {
5295                                    context: Some(".".to_string()),
5296                                    dockerfile: Some("Dockerfile".to_string()),
5297                                    args: None,
5298                                    additional_contexts: None,
5299                                    target: None,
5300                                }),
5301                                volumes: vec![MountDefinition {
5302                                    source: Some("../..".to_string()),
5303                                    target: "/workspaces".to_string(),
5304                                    mount_type: Some("bind".to_string()),
5305                                }],
5306                                network_mode: Some("service:db".to_string()),
5307                                ..Default::default()
5308                            },
5309                        ),
5310                        (
5311                            "db".to_string(),
5312                            DockerComposeService {
5313                                image: Some("postgres:14.1".to_string()),
5314                                volumes: vec![MountDefinition {
5315                                    source: Some("postgres-data".to_string()),
5316                                    target: "/var/lib/postgresql/data".to_string(),
5317                                    mount_type: Some("volume".to_string()),
5318                                }],
5319                                env_file: Some(vec![".env".to_string()]),
5320                                ..Default::default()
5321                            },
5322                        ),
5323                    ]),
5324                    volumes: HashMap::from([(
5325                        "postgres-data".to_string(),
5326                        DockerComposeVolume::default(),
5327                    )]),
5328                }));
5329            }
5330            if config_files.len() == 1
5331                && config_files.get(0)
5332                    == Some(
5333                        &project_path
5334                            .join(".devcontainer")
5335                            .join("docker-compose-context-parent.yml"),
5336                    )
5337            {
5338                return Ok(Some(DockerComposeConfig {
5339                    name: None,
5340                    services: HashMap::from([(
5341                        "app".to_string(),
5342                        DockerComposeService {
5343                            build: Some(DockerComposeServiceBuild {
5344                                context: Some("..".to_string()),
5345                                dockerfile: Some(
5346                                    PathBuf::from(".devcontainer")
5347                                        .join("Dockerfile")
5348                                        .display()
5349                                        .to_string(),
5350                                ),
5351                                args: None,
5352                                additional_contexts: None,
5353                                target: None,
5354                            }),
5355                            ..Default::default()
5356                        },
5357                    )]),
5358                    volumes: HashMap::new(),
5359                }));
5360            }
5361            if config_files.len() == 1
5362                && config_files.get(0)
5363                    == Some(
5364                        &project_path
5365                            .join(".devcontainer")
5366                            .join("docker-compose-with-args.yml"),
5367                    )
5368            {
5369                return Ok(Some(DockerComposeConfig {
5370                    name: None,
5371                    services: HashMap::from([(
5372                        "app".to_string(),
5373                        DockerComposeService {
5374                            build: Some(DockerComposeServiceBuild {
5375                                context: Some(".".to_string()),
5376                                dockerfile: Some("Dockerfile".to_string()),
5377                                args: Some(HashMap::from([(
5378                                    "BASE_IMAGE".to_string(),
5379                                    "test_image:latest".to_string(),
5380                                )])),
5381                                additional_contexts: None,
5382                                target: None,
5383                            }),
5384                            ..Default::default()
5385                        },
5386                    )]),
5387                    ..Default::default()
5388                }));
5389            }
5390            if config_files.len() == 1
5391                && config_files.get(0)
5392                    == Some(
5393                        &project_path
5394                            .join(".devcontainer")
5395                            .join("docker-compose-plain.yml"),
5396                    )
5397            {
5398                return Ok(Some(DockerComposeConfig {
5399                    name: None,
5400                    services: HashMap::from([(
5401                        "app".to_string(),
5402                        DockerComposeService {
5403                            image: Some("test_image:latest".to_string()),
5404                            command: vec!["sleep".to_string(), "infinity".to_string()],
5405                            ..Default::default()
5406                        },
5407                    )]),
5408                    ..Default::default()
5409                }));
5410            }
5411            Err(DevContainerError::DockerNotAvailable)
5412        }
5413        async fn docker_compose_build(
5414            &self,
5415            _config_files: &Vec<PathBuf>,
5416            _project_name: &str,
5417        ) -> Result<(), DevContainerError> {
5418            Ok(())
5419        }
5420        async fn run_docker_exec(
5421            &self,
5422            container_id: &str,
5423            remote_folder: &str,
5424            user: &str,
5425            env: &HashMap<String, String>,
5426            inner_command: Command,
5427        ) -> Result<(), DevContainerError> {
5428            let mut record = self
5429                .exec_commands_recorded
5430                .lock()
5431                .expect("should be available");
5432            record.push(RecordedExecCommand {
5433                _container_id: container_id.to_string(),
5434                _remote_folder: remote_folder.to_string(),
5435                _user: user.to_string(),
5436                env: env.clone(),
5437                _inner_command: inner_command,
5438            });
5439            Ok(())
5440        }
5441        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5442            Err(DevContainerError::DockerNotAvailable)
5443        }
5444        async fn find_process_by_filters(
5445            &self,
5446            _filters: Vec<String>,
5447        ) -> Result<Option<DockerPs>, DevContainerError> {
5448            Ok(Some(DockerPs {
5449                id: "found_docker_ps".to_string(),
5450            }))
5451        }
5452        fn supports_compose_buildkit(&self) -> bool {
5453            !self.podman && self.has_buildx
5454        }
5455        fn docker_cli(&self) -> String {
5456            if self.podman {
5457                "podman".to_string()
5458            } else {
5459                "docker".to_string()
5460            }
5461        }
5462    }
5463
5464    #[derive(Debug, Clone)]
5465    pub(crate) struct TestCommand {
5466        pub(crate) program: String,
5467        pub(crate) args: Vec<String>,
5468    }
5469
5470    pub(crate) struct TestCommandRunner {
5471        commands_recorded: Mutex<Vec<TestCommand>>,
5472    }
5473
5474    impl TestCommandRunner {
5475        fn new() -> Self {
5476            Self {
5477                commands_recorded: Mutex::new(Vec::new()),
5478            }
5479        }
5480
5481        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5482            let record = self.commands_recorded.lock().expect("poisoned");
5483            record
5484                .iter()
5485                .filter(|r| r.program == program)
5486                .map(|r| r.clone())
5487                .collect()
5488        }
5489    }
5490
5491    #[async_trait]
5492    impl CommandRunner for TestCommandRunner {
5493        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5494            let mut record = self.commands_recorded.lock().expect("poisoned");
5495
5496            record.push(TestCommand {
5497                program: command.get_program().display().to_string(),
5498                args: command
5499                    .get_args()
5500                    .map(|a| a.display().to_string())
5501                    .collect(),
5502            });
5503
5504            Ok(Output {
5505                status: ExitStatus::default(),
5506                stdout: vec![],
5507                stderr: vec![],
5508            })
5509        }
5510    }
5511
5512    fn fake_http_client() -> Arc<dyn HttpClient> {
5513        FakeHttpClient::create(|request| async move {
5514            let (parts, _body) = request.into_parts();
5515            if parts.uri.path() == "/token" {
5516                let token_response = TokenResponse {
5517                    token: "token".to_string(),
5518                };
5519                return Ok(http::Response::builder()
5520                    .status(200)
5521                    .body(http_client::AsyncBody::from(
5522                        serde_json_lenient::to_string(&token_response).unwrap(),
5523                    ))
5524                    .unwrap());
5525            }
5526
5527            // OCI specific things
5528            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5529                let response = r#"
5530                    {
5531                        "schemaVersion": 2,
5532                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
5533                        "config": {
5534                            "mediaType": "application/vnd.devcontainers",
5535                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5536                            "size": 2
5537                        },
5538                        "layers": [
5539                            {
5540                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5541                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5542                                "size": 59392,
5543                                "annotations": {
5544                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5545                                }
5546                            }
5547                        ],
5548                        "annotations": {
5549                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5550                            "com.github.package.type": "devcontainer_feature"
5551                        }
5552                    }
5553                    "#;
5554                return Ok(http::Response::builder()
5555                    .status(200)
5556                    .body(http_client::AsyncBody::from(response))
5557                    .unwrap());
5558            }
5559
5560            if parts.uri.path()
5561                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5562            {
5563                let response = build_tarball(vec![
5564                    ("./NOTES.md", r#"
5565                        ## Limitations
5566
5567                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5568                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5569                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5570                          ```
5571                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5572                          ```
5573                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5574
5575
5576                        ## OS Support
5577
5578                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5579
5580                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5581
5582                        `bash` is required to execute the `install.sh` script."#),
5583                    ("./README.md", r#"
5584                        # Docker (Docker-in-Docker) (docker-in-docker)
5585
5586                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5587
5588                        ## Example Usage
5589
5590                        ```json
5591                        "features": {
5592                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5593                        }
5594                        ```
5595
5596                        ## Options
5597
5598                        | Options Id | Description | Type | Default Value |
5599                        |-----|-----|-----|-----|
5600                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5601                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5602                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5603                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5604                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5605                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5606                        | installDockerBuildx | Install Docker Buildx | boolean | true |
5607                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5608                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5609
5610                        ## Customizations
5611
5612                        ### VS Code Extensions
5613
5614                        - `ms-azuretools.vscode-containers`
5615
5616                        ## Limitations
5617
5618                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5619                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5620                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5621                          ```
5622                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5623                          ```
5624                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5625
5626
5627                        ## OS Support
5628
5629                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5630
5631                        `bash` is required to execute the `install.sh` script.
5632
5633
5634                        ---
5635
5636                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
5637                    ("./devcontainer-feature.json", r#"
5638                        {
5639                          "id": "docker-in-docker",
5640                          "version": "2.16.1",
5641                          "name": "Docker (Docker-in-Docker)",
5642                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5643                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5644                          "options": {
5645                            "version": {
5646                              "type": "string",
5647                              "proposals": [
5648                                "latest",
5649                                "none",
5650                                "20.10"
5651                              ],
5652                              "default": "latest",
5653                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5654                            },
5655                            "moby": {
5656                              "type": "boolean",
5657                              "default": true,
5658                              "description": "Install OSS Moby build instead of Docker CE"
5659                            },
5660                            "mobyBuildxVersion": {
5661                              "type": "string",
5662                              "default": "latest",
5663                              "description": "Install a specific version of moby-buildx when using Moby"
5664                            },
5665                            "dockerDashComposeVersion": {
5666                              "type": "string",
5667                              "enum": [
5668                                "none",
5669                                "v1",
5670                                "v2"
5671                              ],
5672                              "default": "v2",
5673                              "description": "Default version of Docker Compose (v1, v2 or none)"
5674                            },
5675                            "azureDnsAutoDetection": {
5676                              "type": "boolean",
5677                              "default": true,
5678                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5679                            },
5680                            "dockerDefaultAddressPool": {
5681                              "type": "string",
5682                              "default": "",
5683                              "proposals": [],
5684                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5685                            },
5686                            "installDockerBuildx": {
5687                              "type": "boolean",
5688                              "default": true,
5689                              "description": "Install Docker Buildx"
5690                            },
5691                            "installDockerComposeSwitch": {
5692                              "type": "boolean",
5693                              "default": false,
5694                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5695                            },
5696                            "disableIp6tables": {
5697                              "type": "boolean",
5698                              "default": false,
5699                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5700                            }
5701                          },
5702                          "entrypoint": "/usr/local/share/docker-init.sh",
5703                          "privileged": true,
5704                          "containerEnv": {
5705                            "DOCKER_BUILDKIT": "1"
5706                          },
5707                          "customizations": {
5708                            "vscode": {
5709                              "extensions": [
5710                                "ms-azuretools.vscode-containers"
5711                              ],
5712                              "settings": {
5713                                "github.copilot.chat.codeGeneration.instructions": [
5714                                  {
5715                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5716                                  }
5717                                ]
5718                              }
5719                            }
5720                          },
5721                          "mounts": [
5722                            {
5723                              "source": "dind-var-lib-docker-${devcontainerId}",
5724                              "target": "/var/lib/docker",
5725                              "type": "volume"
5726                            }
5727                          ],
5728                          "installsAfter": [
5729                            "ghcr.io/devcontainers/features/common-utils"
5730                          ]
5731                        }"#),
5732                    ("./install.sh", r#"
5733                    #!/usr/bin/env bash
5734                    #-------------------------------------------------------------------------------------------------------------
5735                    # Copyright (c) Microsoft Corporation. All rights reserved.
5736                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5737                    #-------------------------------------------------------------------------------------------------------------
5738                    #
5739                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5740                    # Maintainer: The Dev Container spec maintainers
5741
5742
5743                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5744                    USE_MOBY="${MOBY:-"true"}"
5745                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5746                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5747                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5748                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5749                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5750                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5751                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5752                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5753                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5754                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5755                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5756                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5757
5758                    # Default: Exit on any failure.
5759                    set -e
5760
5761                    # Clean up
5762                    rm -rf /var/lib/apt/lists/*
5763
5764                    # Setup STDERR.
5765                    err() {
5766                        echo "(!) $*" >&2
5767                    }
5768
5769                    if [ "$(id -u)" -ne 0 ]; then
5770                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5771                        exit 1
5772                    fi
5773
5774                    ###################
5775                    # Helper Functions
5776                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5777                    ###################
5778
5779                    # Determine the appropriate non-root user
5780                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5781                        USERNAME=""
5782                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5783                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5784                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5785                                USERNAME=${CURRENT_USER}
5786                                break
5787                            fi
5788                        done
5789                        if [ "${USERNAME}" = "" ]; then
5790                            USERNAME=root
5791                        fi
5792                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5793                        USERNAME=root
5794                    fi
5795
5796                    # Package manager update function
5797                    pkg_mgr_update() {
5798                        case ${ADJUSTED_ID} in
5799                            debian)
5800                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5801                                    echo "Running apt-get update..."
5802                                    apt-get update -y
5803                                fi
5804                                ;;
5805                            rhel)
5806                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5807                                    cache_check_dir="/var/cache/yum"
5808                                else
5809                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5810                                fi
5811                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5812                                    echo "Running ${PKG_MGR_CMD} makecache ..."
5813                                    ${PKG_MGR_CMD} makecache
5814                                fi
5815                                ;;
5816                        esac
5817                    }
5818
5819                    # Checks if packages are installed and installs them if not
5820                    check_packages() {
5821                        case ${ADJUSTED_ID} in
5822                            debian)
5823                                if ! dpkg -s "$@" > /dev/null 2>&1; then
5824                                    pkg_mgr_update
5825                                    apt-get -y install --no-install-recommends "$@"
5826                                fi
5827                                ;;
5828                            rhel)
5829                                if ! rpm -q "$@" > /dev/null 2>&1; then
5830                                    pkg_mgr_update
5831                                    ${PKG_MGR_CMD} -y install "$@"
5832                                fi
5833                                ;;
5834                        esac
5835                    }
5836
5837                    # Figure out correct version of a three part version number is not passed
5838                    find_version_from_git_tags() {
5839                        local variable_name=$1
5840                        local requested_version=${!variable_name}
5841                        if [ "${requested_version}" = "none" ]; then return; fi
5842                        local repository=$2
5843                        local prefix=${3:-"tags/v"}
5844                        local separator=${4:-"."}
5845                        local last_part_optional=${5:-"false"}
5846                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5847                            local escaped_separator=${separator//./\\.}
5848                            local last_part
5849                            if [ "${last_part_optional}" = "true" ]; then
5850                                last_part="(${escaped_separator}[0-9]+)?"
5851                            else
5852                                last_part="${escaped_separator}[0-9]+"
5853                            fi
5854                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5855                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5856                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5857                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5858                            else
5859                                set +e
5860                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5861                                set -e
5862                            fi
5863                        fi
5864                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5865                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5866                            exit 1
5867                        fi
5868                        echo "${variable_name}=${!variable_name}"
5869                    }
5870
5871                    # Use semver logic to decrement a version number then look for the closest match
5872                    find_prev_version_from_git_tags() {
5873                        local variable_name=$1
5874                        local current_version=${!variable_name}
5875                        local repository=$2
5876                        # Normally a "v" is used before the version number, but support alternate cases
5877                        local prefix=${3:-"tags/v"}
5878                        # Some repositories use "_" instead of "." for version number part separation, support that
5879                        local separator=${4:-"."}
5880                        # Some tools release versions that omit the last digit (e.g. go)
5881                        local last_part_optional=${5:-"false"}
5882                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5883                        local version_suffix_regex=$6
5884                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5885                        set +e
5886                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5887                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5888                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5889
5890                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5891                                ((major=major-1))
5892                                declare -g ${variable_name}="${major}"
5893                                # Look for latest version from previous major release
5894                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5895                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
5896                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5897                                ((minor=minor-1))
5898                                declare -g ${variable_name}="${major}.${minor}"
5899                                # Look for latest version from previous minor release
5900                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5901                            else
5902                                ((breakfix=breakfix-1))
5903                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5904                                    declare -g ${variable_name}="${major}.${minor}"
5905                                else
5906                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5907                                fi
5908                            fi
5909                        set -e
5910                    }
5911
5912                    # Function to fetch the version released prior to the latest version
5913                    get_previous_version() {
5914                        local url=$1
5915                        local repo_url=$2
5916                        local variable_name=$3
5917                        prev_version=${!variable_name}
5918
5919                        output=$(curl -s "$repo_url");
5920                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5921                          message=$(echo "$output" | jq -r '.message')
5922
5923                          if [[ $message == "API rate limit exceeded"* ]]; then
5924                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5925                                echo -e "\nAttempting to find latest version using GitHub tags."
5926                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
5927                                declare -g ${variable_name}="${prev_version}"
5928                           fi
5929                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5930                            echo -e "\nAttempting to find latest version using GitHub Api."
5931                            version=$(echo "$output" | jq -r '.[1].tag_name')
5932                            declare -g ${variable_name}="${version#v}"
5933                        fi
5934                        echo "${variable_name}=${!variable_name}"
5935                    }
5936
5937                    get_github_api_repo_url() {
5938                        local url=$1
5939                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5940                    }
5941
5942                    ###########################################
5943                    # Start docker-in-docker installation
5944                    ###########################################
5945
5946                    # Ensure apt is in non-interactive to avoid prompts
5947                    export DEBIAN_FRONTEND=noninteractive
5948
5949                    # Source /etc/os-release to get OS info
5950                    . /etc/os-release
5951
5952                    # Determine adjusted ID and package manager
5953                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5954                        ADJUSTED_ID="debian"
5955                        PKG_MGR_CMD="apt-get"
5956                        # Use dpkg for Debian-based systems
5957                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5958                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5959                        ADJUSTED_ID="rhel"
5960                        # Determine the appropriate package manager for RHEL-based systems
5961                        for pkg_mgr in tdnf dnf microdnf yum; do
5962                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
5963                                PKG_MGR_CMD="$pkg_mgr"
5964                                break
5965                            fi
5966                        done
5967
5968                        if [ -z "${PKG_MGR_CMD}" ]; then
5969                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5970                            exit 1
5971                        fi
5972
5973                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5974                    else
5975                        err "Linux distro ${ID} not supported."
5976                        exit 1
5977                    fi
5978
5979                    # Azure Linux specific setup
5980                    if [ "${ID}" = "azurelinux" ]; then
5981                        VERSION_CODENAME="azurelinux${VERSION_ID}"
5982                    fi
5983
5984                    # Prevent attempting to install Moby on Debian trixie (packages removed)
5985                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5986                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5987                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5988                        exit 1
5989                    fi
5990
5991                    # Check if distro is supported
5992                    if [ "${USE_MOBY}" = "true" ]; then
5993                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5994                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5995                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5996                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5997                                exit 1
5998                            fi
5999                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
6000                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
6001                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6002                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
6003                            else
6004                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
6005                            fi
6006                        fi
6007                    else
6008                        if [ "${ADJUSTED_ID}" = "debian" ]; then
6009                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
6010                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
6011                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
6012                                exit 1
6013                            fi
6014                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
6015                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
6016
6017                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
6018                        fi
6019                    fi
6020
6021                    # Install base dependencies
6022                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
6023                    case ${ADJUSTED_ID} in
6024                        debian)
6025                            check_packages apt-transport-https $base_packages dirmngr
6026                            ;;
6027                        rhel)
6028                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
6029
6030                            ;;
6031                    esac
6032
6033                    # Install git if not already present
6034                    if ! command -v git >/dev/null 2>&1; then
6035                        check_packages git
6036                    fi
6037
6038                    # Update CA certificates to ensure HTTPS connections work properly
6039                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
6040                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
6041                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
6042                        update-ca-certificates
6043                    fi
6044
6045                    # Swap to legacy iptables for compatibility (Debian only)
6046                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
6047                        update-alternatives --set iptables /usr/sbin/iptables-legacy
6048                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
6049                    fi
6050
6051                    # Set up the necessary repositories
6052                    if [ "${USE_MOBY}" = "true" ]; then
6053                        # Name of open source engine/cli
6054                        engine_package_name="moby-engine"
6055                        cli_package_name="moby-cli"
6056
6057                        case ${ADJUSTED_ID} in
6058                            debian)
6059                                # Import key safely and import Microsoft apt repo
6060                                {
6061                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
6062                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
6063                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
6064                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
6065                                ;;
6066                            rhel)
6067                                echo "(*) ${ID} detected - checking for Moby packages..."
6068
6069                                # Check if moby packages are available in default repos
6070                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
6071                                    echo "(*) Using built-in ${ID} Moby packages"
6072                                else
6073                                    case "${ID}" in
6074                                        azurelinux)
6075                                            echo "(*) Moby packages not found in Azure Linux repositories"
6076                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
6077                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
6078                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
6079                                            exit 1
6080                                            ;;
6081                                        mariner)
6082                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
6083                                            # Add Microsoft repository if packages aren't available locally
6084                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
6085                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
6086                    [microsoft]
6087                    name=Microsoft Repository
6088                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
6089                    enabled=1
6090                    gpgcheck=1
6091                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
6092                    EOF
6093                                    # Verify packages are available after adding repo
6094                                    pkg_mgr_update
6095                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
6096                                        echo "(*) Moby packages not found in Microsoft repository either"
6097                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
6098                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
6099                                        exit 1
6100                                    fi
6101                                    ;;
6102                                *)
6103                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
6104                                    exit 1
6105                                    ;;
6106                                esac
6107                            fi
6108                            ;;
6109                        esac
6110                    else
6111                        # Name of licensed engine/cli
6112                        engine_package_name="docker-ce"
6113                        cli_package_name="docker-ce-cli"
6114                        case ${ADJUSTED_ID} in
6115                            debian)
6116                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
6117                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
6118                                ;;
6119                            rhel)
6120                                # Docker CE repository setup for RHEL-based systems
6121                                setup_docker_ce_repo() {
6122                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
6123                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
6124                    [docker-ce-stable]
6125                    name=Docker CE Stable
6126                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
6127                    enabled=1
6128                    gpgcheck=1
6129                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
6130                    skip_if_unavailable=1
6131                    module_hotfixes=1
6132                    EOF
6133                                }
6134                                install_azure_linux_deps() {
6135                                    echo "(*) Installing device-mapper libraries for Docker CE..."
6136                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
6137                                    echo "(*) Installing additional Docker CE dependencies..."
6138                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
6139                                        echo "(*) Some optional dependencies could not be installed, continuing..."
6140                                    }
6141                                }
6142                                setup_selinux_context() {
6143                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
6144                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
6145                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
6146                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
6147                                    fi
6148                                }
6149
6150                                # Special handling for RHEL Docker CE installation
6151                                case "${ID}" in
6152                                    azurelinux|mariner)
6153                                        echo "(*) ${ID} detected"
6154                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
6155                                        echo "(*) Setting up Docker CE repository..."
6156
6157                                        setup_docker_ce_repo
6158                                        install_azure_linux_deps
6159
6160                                        if [ "${USE_MOBY}" != "true" ]; then
6161                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
6162                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
6163                                            setup_selinux_context
6164                                        else
6165                                            echo "(*) Using Moby - container-selinux not required"
6166                                        fi
6167                                        ;;
6168                                    *)
6169                                        # Standard RHEL/CentOS/Fedora approach
6170                                        if command -v dnf >/dev/null 2>&1; then
6171                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6172                                        elif command -v yum-config-manager >/dev/null 2>&1; then
6173                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6174                                        else
6175                                            # Manual fallback
6176                                            setup_docker_ce_repo
6177                                fi
6178                                ;;
6179                            esac
6180                            ;;
6181                        esac
6182                    fi
6183
6184                    # Refresh package database
6185                    case ${ADJUSTED_ID} in
6186                        debian)
6187                            apt-get update
6188                            ;;
6189                        rhel)
6190                            pkg_mgr_update
6191                            ;;
6192                    esac
6193
6194                    # Soft version matching
6195                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
6196                        # Empty, meaning grab whatever "latest" is in apt repo
6197                        engine_version_suffix=""
6198                        cli_version_suffix=""
6199                    else
6200                        case ${ADJUSTED_ID} in
6201                            debian)
6202                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
6203                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
6204                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
6205                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
6206                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6207                        set +e # Don't exit if finding version fails - will handle gracefully
6208                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6209                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6210                        set -e
6211                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
6212                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6213                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6214                            exit 1
6215                        fi
6216                        ;;
6217                    rhel)
6218                         # For RHEL-based systems, use dnf/yum to find versions
6219                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
6220                                set +e # Don't exit if finding version fails - will handle gracefully
6221                                    if [ "${USE_MOBY}" = "true" ]; then
6222                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6223                                    else
6224                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6225                                    fi
6226                                set -e
6227                                if [ -n "${available_versions}" ]; then
6228                                    engine_version_suffix="-${available_versions}"
6229                                    cli_version_suffix="-${available_versions}"
6230                                else
6231                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
6232                                    engine_version_suffix=""
6233                                    cli_version_suffix=""
6234                                fi
6235                                ;;
6236                        esac
6237                    fi
6238
6239                    # Version matching for moby-buildx
6240                    if [ "${USE_MOBY}" = "true" ]; then
6241                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
6242                            # Empty, meaning grab whatever "latest" is in apt repo
6243                            buildx_version_suffix=""
6244                        else
6245                            case ${ADJUSTED_ID} in
6246                                debian)
6247                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6248                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
6249                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6250                            set +e
6251                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
6252                            set -e
6253                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
6254                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6255                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6256                                exit 1
6257                            fi
6258                            ;;
6259                                rhel)
6260                                    # For RHEL-based systems, try to find buildx version or use latest
6261                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6262                                    set +e
6263                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
6264                                    set -e
6265                                    if [ -n "${available_buildx}" ]; then
6266                                        buildx_version_suffix="-${available_buildx}"
6267                                    else
6268                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
6269                                        buildx_version_suffix=""
6270                                    fi
6271                                    ;;
6272                            esac
6273                            echo "buildx_version_suffix ${buildx_version_suffix}"
6274                        fi
6275                    fi
6276
6277                    # Install Docker / Moby CLI if not already installed
6278                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
6279                        echo "Docker / Moby CLI and Engine already installed."
6280                    else
6281                            case ${ADJUSTED_ID} in
6282                            debian)
6283                                if [ "${USE_MOBY}" = "true" ]; then
6284                                    # Install engine
6285                                    set +e # Handle error gracefully
6286                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6287                                        exit_code=$?
6288                                    set -e
6289
6290                                    if [ ${exit_code} -ne 0 ]; then
6291                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6292                                        exit 1
6293                                    fi
6294
6295                                    # Install compose
6296                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6297                                else
6298                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6299                                    # Install compose
6300                                    apt-mark hold docker-ce docker-ce-cli
6301                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6302                                fi
6303                                ;;
6304                            rhel)
6305                                if [ "${USE_MOBY}" = "true" ]; then
6306                                    set +e # Handle error gracefully
6307                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6308                                        exit_code=$?
6309                                    set -e
6310
6311                                    if [ ${exit_code} -ne 0 ]; then
6312                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6313                                        exit 1
6314                                    fi
6315
6316                                    # Install compose
6317                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6318                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6319                                    fi
6320                                else
6321                                                   # Special handling for Azure Linux Docker CE installation
6322                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6323                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6324
6325                                        # Use rpm with --force and --nodeps for Azure Linux
6326                                        set +e  # Don't exit on error for this section
6327                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6328                                        install_result=$?
6329                                        set -e
6330
6331                                        if [ $install_result -ne 0 ]; then
6332                                            echo "(*) Standard installation failed, trying manual installation..."
6333
6334                                            echo "(*) Standard installation failed, trying manual installation..."
6335
6336                                            # Create directory for downloading packages
6337                                            mkdir -p /tmp/docker-ce-install
6338
6339                                            # Download packages manually using curl since tdnf doesn't support download
6340                                            echo "(*) Downloading Docker CE packages manually..."
6341
6342                                            # Get the repository baseurl
6343                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6344
6345                                            # Download packages directly
6346                                            cd /tmp/docker-ce-install
6347
6348                                            # Get package names with versions
6349                                            if [ -n "${cli_version_suffix}" ]; then
6350                                                docker_ce_version="${cli_version_suffix#-}"
6351                                                docker_cli_version="${engine_version_suffix#-}"
6352                                            else
6353                                                # Get latest version from repository
6354                                                docker_ce_version="latest"
6355                                            fi
6356
6357                                            echo "(*) Attempting to download Docker CE packages from repository..."
6358
6359                                            # Try to download latest packages if specific version fails
6360                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6361                                                # Fallback: try to get latest available version
6362                                                echo "(*) Specific version not found, trying latest..."
6363                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6364                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6365                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6366
6367                                                if [ -n "${latest_docker}" ]; then
6368                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6369                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6370                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6371                                                else
6372                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
6373                                                    echo "(*) Please check repository configuration or use 'moby': true"
6374                                                    exit 1
6375                                                fi
6376                                            fi
6377                                            # Install systemd libraries required by Docker CE
6378                                            echo "(*) Installing systemd libraries required by Docker CE..."
6379                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6380                                                echo "(*) WARNING: Could not install systemd libraries"
6381                                                echo "(*) Docker may fail to start without these"
6382                                            }
6383
6384                                            # Install with rpm --force --nodeps
6385                                            echo "(*) Installing Docker CE packages with dependency override..."
6386                                            rpm -Uvh --force --nodeps *.rpm
6387
6388                                            # Cleanup
6389                                            cd /
6390                                            rm -rf /tmp/docker-ce-install
6391
6392                                            echo "(*) Docker CE installation completed with dependency bypass"
6393                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6394                                        fi
6395                                    else
6396                                        # Standard installation for other RHEL-based systems
6397                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6398                                    fi
6399                                    # Install compose
6400                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6401                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6402                                    fi
6403                                fi
6404                                ;;
6405                        esac
6406                    fi
6407
6408                    echo "Finished installing docker / moby!"
6409
6410                    docker_home="/usr/libexec/docker"
6411                    cli_plugins_dir="${docker_home}/cli-plugins"
6412
6413                    # fallback for docker-compose
6414                    fallback_compose(){
6415                        local url=$1
6416                        local repo_url=$(get_github_api_repo_url "$url")
6417                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6418                        get_previous_version "${url}" "${repo_url}" compose_version
6419                        echo -e "\nAttempting to install v${compose_version}"
6420                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6421                    }
6422
6423                    # If 'docker-compose' command is to be included
6424                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6425                        case "${architecture}" in
6426                        amd64|x86_64) target_compose_arch=x86_64 ;;
6427                        arm64|aarch64) target_compose_arch=aarch64 ;;
6428                        *)
6429                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6430                            exit 1
6431                        esac
6432
6433                        docker_compose_path="/usr/local/bin/docker-compose"
6434                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6435                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6436                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
6437
6438                            if [ "${target_compose_arch}" = "x86_64" ]; then
6439                                echo "(*) Installing docker compose v1..."
6440                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6441                                chmod +x ${docker_compose_path}
6442
6443                                # Download the SHA256 checksum
6444                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6445                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6446                                sha256sum -c docker-compose.sha256sum --ignore-missing
6447                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6448                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6449                                exit 1
6450                            else
6451                                # Use pip to get a version that runs on this architecture
6452                                check_packages python3-minimal python3-pip libffi-dev python3-venv
6453                                echo "(*) Installing docker compose v1 via pip..."
6454                                export PYTHONUSERBASE=/usr/local
6455                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6456                            fi
6457                        else
6458                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6459                            docker_compose_url="https://github.com/docker/compose"
6460                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6461                            echo "(*) Installing docker-compose ${compose_version}..."
6462                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6463                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6464                                     fallback_compose "$docker_compose_url"
6465                            }
6466
6467                            chmod +x ${docker_compose_path}
6468
6469                            # Download the SHA256 checksum
6470                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6471                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6472                            sha256sum -c docker-compose.sha256sum --ignore-missing
6473
6474                            mkdir -p ${cli_plugins_dir}
6475                            cp ${docker_compose_path} ${cli_plugins_dir}
6476                        fi
6477                    fi
6478
6479                    # fallback method for compose-switch
6480                    fallback_compose-switch() {
6481                        local url=$1
6482                        local repo_url=$(get_github_api_repo_url "$url")
6483                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6484                        get_previous_version "$url" "$repo_url" compose_switch_version
6485                        echo -e "\nAttempting to install v${compose_switch_version}"
6486                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6487                    }
6488                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6489                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6490                        if type docker-compose > /dev/null 2>&1; then
6491                            echo "(*) Installing compose-switch..."
6492                            current_compose_path="$(command -v docker-compose)"
6493                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6494                            compose_switch_version="latest"
6495                            compose_switch_url="https://github.com/docker/compose-switch"
6496                            # Try to get latest version, fallback to known stable version if GitHub API fails
6497                            set +e
6498                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
6499                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6500                                echo "(*) GitHub API rate limited or failed, using fallback method"
6501                                fallback_compose-switch "$compose_switch_url"
6502                            fi
6503                            set -e
6504
6505                            # Map architecture for compose-switch downloads
6506                            case "${architecture}" in
6507                                amd64|x86_64) target_switch_arch=amd64 ;;
6508                                arm64|aarch64) target_switch_arch=arm64 ;;
6509                                *) target_switch_arch=${architecture} ;;
6510                            esac
6511                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6512                            chmod +x /usr/local/bin/compose-switch
6513                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6514                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6515                            mv "${current_compose_path}" "${target_compose_path}"
6516                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6517                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6518                        else
6519                            err "Skipping installation of compose-switch as docker compose is unavailable..."
6520                        fi
6521                    fi
6522
6523                    # If init file already exists, exit
6524                    if [ -f "/usr/local/share/docker-init.sh" ]; then
6525                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
6526                        # Clean up
6527                        rm -rf /var/lib/apt/lists/*
6528                        exit 0
6529                    fi
6530                    echo "docker-init doesn't exist, adding..."
6531
6532                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6533                            groupadd -r docker
6534                    fi
6535
6536                    usermod -aG docker ${USERNAME}
6537
6538                    # fallback for docker/buildx
6539                    fallback_buildx() {
6540                        local url=$1
6541                        local repo_url=$(get_github_api_repo_url "$url")
6542                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6543                        get_previous_version "$url" "$repo_url" buildx_version
6544                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6545                        echo -e "\nAttempting to install v${buildx_version}"
6546                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6547                    }
6548
6549                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6550                        buildx_version="latest"
6551                        docker_buildx_url="https://github.com/docker/buildx"
6552                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6553                        echo "(*) Installing buildx ${buildx_version}..."
6554
6555                          # Map architecture for buildx downloads
6556                        case "${architecture}" in
6557                            amd64|x86_64) target_buildx_arch=amd64 ;;
6558                            arm64|aarch64) target_buildx_arch=arm64 ;;
6559                            *) target_buildx_arch=${architecture} ;;
6560                        esac
6561
6562                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6563
6564                        cd /tmp
6565                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6566
6567                        docker_home="/usr/libexec/docker"
6568                        cli_plugins_dir="${docker_home}/cli-plugins"
6569
6570                        mkdir -p ${cli_plugins_dir}
6571                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6572                        chmod +x ${cli_plugins_dir}/docker-buildx
6573
6574                        chown -R "${USERNAME}:docker" "${docker_home}"
6575                        chmod -R g+r+w "${docker_home}"
6576                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6577                    fi
6578
6579                    DOCKER_DEFAULT_IP6_TABLES=""
6580                    if [ "$DISABLE_IP6_TABLES" == true ]; then
6581                        requested_version=""
6582                        # checking whether the version requested either is in semver format or just a number denoting the major version
6583                        # and, extracting the major version number out of the two scenarios
6584                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6585                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6586                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6587                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6588                            requested_version=$DOCKER_VERSION
6589                        fi
6590                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6591                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6592                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6593                        fi
6594                    fi
6595
6596                    if [ ! -d /usr/local/share ]; then
6597                        mkdir -p /usr/local/share
6598                    fi
6599
6600                    tee /usr/local/share/docker-init.sh > /dev/null \
6601                    << EOF
6602                    #!/bin/sh
6603                    #-------------------------------------------------------------------------------------------------------------
6604                    # Copyright (c) Microsoft Corporation. All rights reserved.
6605                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6606                    #-------------------------------------------------------------------------------------------------------------
6607
6608                    set -e
6609
6610                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6611                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6612                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6613                    EOF
6614
6615                    tee -a /usr/local/share/docker-init.sh > /dev/null \
6616                    << 'EOF'
6617                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6618                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6619                        find /run /var/run -iname 'docker*.pid' -delete || :
6620                        find /run /var/run -iname 'container*.pid' -delete || :
6621
6622                        # -- Start: dind wrapper script --
6623                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6624
6625                        export container=docker
6626
6627                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6628                            mount -t securityfs none /sys/kernel/security || {
6629                                echo >&2 'Could not mount /sys/kernel/security.'
6630                                echo >&2 'AppArmor detection and --privileged mode might break.'
6631                            }
6632                        fi
6633
6634                        # Mount /tmp (conditionally)
6635                        if ! mountpoint -q /tmp; then
6636                            mount -t tmpfs none /tmp
6637                        fi
6638
6639                        set_cgroup_nesting()
6640                        {
6641                            # cgroup v2: enable nesting
6642                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6643                                # move the processes from the root group to the /init group,
6644                                # otherwise writing subtree_control fails with EBUSY.
6645                                # An error during moving non-existent process (i.e., "cat") is ignored.
6646                                mkdir -p /sys/fs/cgroup/init
6647                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6648                                # enable controllers
6649                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6650                                    > /sys/fs/cgroup/cgroup.subtree_control
6651                            fi
6652                        }
6653
6654                        # Set cgroup nesting, retrying if necessary
6655                        retry_cgroup_nesting=0
6656
6657                        until [ "${retry_cgroup_nesting}" -eq "5" ];
6658                        do
6659                            set +e
6660                                set_cgroup_nesting
6661
6662                                if [ $? -ne 0 ]; then
6663                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6664                                else
6665                                    break
6666                                fi
6667
6668                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6669                            set -e
6670                        done
6671
6672                        # -- End: dind wrapper script --
6673
6674                        # Handle DNS
6675                        set +e
6676                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6677                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6678                            then
6679                                echo "Setting dockerd Azure DNS."
6680                                CUSTOMDNS="--dns 168.63.129.16"
6681                            else
6682                                echo "Not setting dockerd DNS manually."
6683                                CUSTOMDNS=""
6684                            fi
6685                        set -e
6686
6687                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6688                        then
6689                            DEFAULT_ADDRESS_POOL=""
6690                        else
6691                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6692                        fi
6693
6694                        # Start docker/moby engine
6695                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6696                    INNEREOF
6697                    )"
6698
6699                    sudo_if() {
6700                        COMMAND="$*"
6701
6702                        if [ "$(id -u)" -ne 0 ]; then
6703                            sudo $COMMAND
6704                        else
6705                            $COMMAND
6706                        fi
6707                    }
6708
6709                    retry_docker_start_count=0
6710                    docker_ok="false"
6711
6712                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
6713                    do
6714                        # Start using sudo if not invoked as root
6715                        if [ "$(id -u)" -ne 0 ]; then
6716                            sudo /bin/sh -c "${dockerd_start}"
6717                        else
6718                            eval "${dockerd_start}"
6719                        fi
6720
6721                        retry_count=0
6722                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
6723                        do
6724                            sleep 1s
6725                            set +e
6726                                docker info > /dev/null 2>&1 && docker_ok="true"
6727                            set -e
6728
6729                            retry_count=`expr $retry_count + 1`
6730                        done
6731
6732                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6733                            echo "(*) Failed to start docker, retrying..."
6734                            set +e
6735                                sudo_if pkill dockerd
6736                                sudo_if pkill containerd
6737                            set -e
6738                        fi
6739
6740                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
6741                    done
6742
6743                    # Execute whatever commands were passed in (if any). This allows us
6744                    # to set this script to ENTRYPOINT while still executing the default CMD.
6745                    exec "$@"
6746                    EOF
6747
6748                    chmod +x /usr/local/share/docker-init.sh
6749                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
6750
6751                    # Clean up
6752                    rm -rf /var/lib/apt/lists/*
6753
6754                    echo 'docker-in-docker-debian script has completed!'"#),
6755                ]).await;
6756
6757                return Ok(http::Response::builder()
6758                    .status(200)
6759                    .body(AsyncBody::from(response))
6760                    .unwrap());
6761            }
6762            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6763                let response = r#"
6764                    {
6765                        "schemaVersion": 2,
6766                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6767                        "config": {
6768                            "mediaType": "application/vnd.devcontainers",
6769                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6770                            "size": 2
6771                        },
6772                        "layers": [
6773                            {
6774                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6775                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6776                                "size": 20992,
6777                                "annotations": {
6778                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6779                                }
6780                            }
6781                        ],
6782                        "annotations": {
6783                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6784                            "com.github.package.type": "devcontainer_feature"
6785                        }
6786                    }
6787                    "#;
6788
6789                return Ok(http::Response::builder()
6790                    .status(200)
6791                    .body(http_client::AsyncBody::from(response))
6792                    .unwrap());
6793            }
6794            if parts.uri.path()
6795                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6796            {
6797                let response = build_tarball(vec![
6798                    ("./devcontainer-feature.json", r#"
6799                        {
6800                            "id": "go",
6801                            "version": "1.3.3",
6802                            "name": "Go",
6803                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6804                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6805                            "options": {
6806                                "version": {
6807                                    "type": "string",
6808                                    "proposals": [
6809                                        "latest",
6810                                        "none",
6811                                        "1.24",
6812                                        "1.23"
6813                                    ],
6814                                    "default": "latest",
6815                                    "description": "Select or enter a Go version to install"
6816                                },
6817                                "golangciLintVersion": {
6818                                    "type": "string",
6819                                    "default": "latest",
6820                                    "description": "Version of golangci-lint to install"
6821                                }
6822                            },
6823                            "init": true,
6824                            "customizations": {
6825                                "vscode": {
6826                                    "extensions": [
6827                                        "golang.Go"
6828                                    ],
6829                                    "settings": {
6830                                        "github.copilot.chat.codeGeneration.instructions": [
6831                                            {
6832                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6833                                            }
6834                                        ]
6835                                    }
6836                                }
6837                            },
6838                            "containerEnv": {
6839                                "GOROOT": "/usr/local/go",
6840                                "GOPATH": "/go",
6841                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6842                            },
6843                            "capAdd": [
6844                                "SYS_PTRACE"
6845                            ],
6846                            "securityOpt": [
6847                                "seccomp=unconfined"
6848                            ],
6849                            "installsAfter": [
6850                                "ghcr.io/devcontainers/features/common-utils"
6851                            ]
6852                        }
6853                        "#),
6854                    ("./install.sh", r#"
6855                    #!/usr/bin/env bash
6856                    #-------------------------------------------------------------------------------------------------------------
6857                    # Copyright (c) Microsoft Corporation. All rights reserved.
6858                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6859                    #-------------------------------------------------------------------------------------------------------------
6860                    #
6861                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6862                    # Maintainer: The VS Code and Codespaces Teams
6863
6864                    TARGET_GO_VERSION="${VERSION:-"latest"}"
6865                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6866
6867                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6868                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6869                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6870                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6871
6872                    # https://www.google.com/linuxrepositories/
6873                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6874
6875                    set -e
6876
6877                    if [ "$(id -u)" -ne 0 ]; then
6878                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6879                        exit 1
6880                    fi
6881
6882                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6883                    . /etc/os-release
6884                    # Get an adjusted ID independent of distro variants
6885                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6886                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6887                        ADJUSTED_ID="debian"
6888                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6889                        ADJUSTED_ID="rhel"
6890                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6891                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6892                        else
6893                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6894                        fi
6895                    else
6896                        echo "Linux distro ${ID} not supported."
6897                        exit 1
6898                    fi
6899
6900                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6901                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6902                        # Update the repo files to reference vault.centos.org.
6903                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6904                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6905                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6906                    fi
6907
6908                    # Setup INSTALL_CMD & PKG_MGR_CMD
6909                    if type apt-get > /dev/null 2>&1; then
6910                        PKG_MGR_CMD=apt-get
6911                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6912                    elif type microdnf > /dev/null 2>&1; then
6913                        PKG_MGR_CMD=microdnf
6914                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6915                    elif type dnf > /dev/null 2>&1; then
6916                        PKG_MGR_CMD=dnf
6917                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6918                    else
6919                        PKG_MGR_CMD=yum
6920                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6921                    fi
6922
6923                    # Clean up
6924                    clean_up() {
6925                        case ${ADJUSTED_ID} in
6926                            debian)
6927                                rm -rf /var/lib/apt/lists/*
6928                                ;;
6929                            rhel)
6930                                rm -rf /var/cache/dnf/* /var/cache/yum/*
6931                                rm -rf /tmp/yum.log
6932                                rm -rf ${GPG_INSTALL_PATH}
6933                                ;;
6934                        esac
6935                    }
6936                    clean_up
6937
6938
6939                    # Figure out correct version of a three part version number is not passed
6940                    find_version_from_git_tags() {
6941                        local variable_name=$1
6942                        local requested_version=${!variable_name}
6943                        if [ "${requested_version}" = "none" ]; then return; fi
6944                        local repository=$2
6945                        local prefix=${3:-"tags/v"}
6946                        local separator=${4:-"."}
6947                        local last_part_optional=${5:-"false"}
6948                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6949                            local escaped_separator=${separator//./\\.}
6950                            local last_part
6951                            if [ "${last_part_optional}" = "true" ]; then
6952                                last_part="(${escaped_separator}[0-9]+)?"
6953                            else
6954                                last_part="${escaped_separator}[0-9]+"
6955                            fi
6956                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6957                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6958                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6959                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6960                            else
6961                                set +e
6962                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6963                                set -e
6964                            fi
6965                        fi
6966                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6967                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6968                            exit 1
6969                        fi
6970                        echo "${variable_name}=${!variable_name}"
6971                    }
6972
6973                    pkg_mgr_update() {
6974                        case $ADJUSTED_ID in
6975                            debian)
6976                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6977                                    echo "Running apt-get update..."
6978                                    ${PKG_MGR_CMD} update -y
6979                                fi
6980                                ;;
6981                            rhel)
6982                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6983                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6984                                        echo "Running ${PKG_MGR_CMD} makecache ..."
6985                                        ${PKG_MGR_CMD} makecache
6986                                    fi
6987                                else
6988                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6989                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6990                                        set +e
6991                                        ${PKG_MGR_CMD} check-update
6992                                        rc=$?
6993                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6994                                            exit 1
6995                                        fi
6996                                        set -e
6997                                    fi
6998                                fi
6999                                ;;
7000                        esac
7001                    }
7002
7003                    # Checks if packages are installed and installs them if not
7004                    check_packages() {
7005                        case ${ADJUSTED_ID} in
7006                            debian)
7007                                if ! dpkg -s "$@" > /dev/null 2>&1; then
7008                                    pkg_mgr_update
7009                                    ${INSTALL_CMD} "$@"
7010                                fi
7011                                ;;
7012                            rhel)
7013                                if ! rpm -q "$@" > /dev/null 2>&1; then
7014                                    pkg_mgr_update
7015                                    ${INSTALL_CMD} "$@"
7016                                fi
7017                                ;;
7018                        esac
7019                    }
7020
7021                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
7022                    rm -f /etc/profile.d/00-restore-env.sh
7023                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
7024                    chmod +x /etc/profile.d/00-restore-env.sh
7025
7026                    # Some distributions do not install awk by default (e.g. Mariner)
7027                    if ! type awk >/dev/null 2>&1; then
7028                        check_packages awk
7029                    fi
7030
7031                    # Determine the appropriate non-root user
7032                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
7033                        USERNAME=""
7034                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
7035                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
7036                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
7037                                USERNAME=${CURRENT_USER}
7038                                break
7039                            fi
7040                        done
7041                        if [ "${USERNAME}" = "" ]; then
7042                            USERNAME=root
7043                        fi
7044                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
7045                        USERNAME=root
7046                    fi
7047
7048                    export DEBIAN_FRONTEND=noninteractive
7049
7050                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
7051
7052                    if [ $ADJUSTED_ID = "debian" ]; then
7053                        check_packages g++ libc6-dev
7054                    else
7055                        check_packages gcc-c++ glibc-devel
7056                    fi
7057                    # Install curl, git, other dependencies if missing
7058                    if ! type curl > /dev/null 2>&1; then
7059                        check_packages curl
7060                    fi
7061                    if ! type git > /dev/null 2>&1; then
7062                        check_packages git
7063                    fi
7064                    # Some systems, e.g. Mariner, still a few more packages
7065                    if ! type as > /dev/null 2>&1; then
7066                        check_packages binutils
7067                    fi
7068                    if ! [ -f /usr/include/linux/errno.h ]; then
7069                        check_packages kernel-headers
7070                    fi
7071                    # Minimal RHEL install may need findutils installed
7072                    if ! [ -f /usr/bin/find ]; then
7073                        check_packages findutils
7074                    fi
7075
7076                    # Get closest match for version number specified
7077                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
7078
7079                    architecture="$(uname -m)"
7080                    case $architecture in
7081                        x86_64) architecture="amd64";;
7082                        aarch64 | armv8*) architecture="arm64";;
7083                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
7084                        i?86) architecture="386";;
7085                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
7086                    esac
7087
7088                    # Install Go
7089                    umask 0002
7090                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
7091                        groupadd -r golang
7092                    fi
7093                    usermod -a -G golang "${USERNAME}"
7094                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7095
7096                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
7097                        # Use a temporary location for gpg keys to avoid polluting image
7098                        export GNUPGHOME="/tmp/tmp-gnupg"
7099                        mkdir -p ${GNUPGHOME}
7100                        chmod 700 ${GNUPGHOME}
7101                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
7102                        gpg -q --import /tmp/tmp-gnupg/golang_key
7103                        echo "Downloading Go ${TARGET_GO_VERSION}..."
7104                        set +e
7105                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7106                        exit_code=$?
7107                        set -e
7108                        if [ "$exit_code" != "0" ]; then
7109                            echo "(!) Download failed."
7110                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
7111                            set +e
7112                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
7113                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
7114                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
7115                            # Handle Go's odd version pattern where "0" releases omit the last part
7116                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
7117                                ((minor=minor-1))
7118                                TARGET_GO_VERSION="${major}.${minor}"
7119                                # Look for latest version from previous minor release
7120                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
7121                            else
7122                                ((breakfix=breakfix-1))
7123                                if [ "${breakfix}" = "0" ]; then
7124                                    TARGET_GO_VERSION="${major}.${minor}"
7125                                else
7126                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
7127                                fi
7128                            fi
7129                            set -e
7130                            echo "Trying ${TARGET_GO_VERSION}..."
7131                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7132                        fi
7133                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
7134                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
7135                        echo "Extracting Go ${TARGET_GO_VERSION}..."
7136                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
7137                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
7138                    else
7139                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
7140                    fi
7141
7142                    # Install Go tools that are isImportant && !replacedByGopls based on
7143                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
7144                    GO_TOOLS="\
7145                        golang.org/x/tools/gopls@latest \
7146                        honnef.co/go/tools/cmd/staticcheck@latest \
7147                        golang.org/x/lint/golint@latest \
7148                        github.com/mgechev/revive@latest \
7149                        github.com/go-delve/delve/cmd/dlv@latest \
7150                        github.com/fatih/gomodifytags@latest \
7151                        github.com/haya14busa/goplay/cmd/goplay@latest \
7152                        github.com/cweill/gotests/gotests@latest \
7153                        github.com/josharian/impl@latest"
7154
7155                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
7156                        echo "Installing common Go tools..."
7157                        export PATH=${TARGET_GOROOT}/bin:${PATH}
7158                        export GOPATH=/tmp/gotools
7159                        export GOCACHE="${GOPATH}/cache"
7160
7161                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
7162                        cd "${GOPATH}"
7163
7164                        # Use go get for versions of go under 1.16
7165                        go_install_command=install
7166                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
7167                            export GO111MODULE=on
7168                            go_install_command=get
7169                            echo "Go version < 1.16, using go get."
7170                        fi
7171
7172                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
7173
7174                        # Move Go tools into path
7175                        if [ -d "${GOPATH}/bin" ]; then
7176                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
7177                        fi
7178
7179                        # Install golangci-lint from precompiled binaries
7180                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
7181                            echo "Installing golangci-lint latest..."
7182                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7183                                sh -s -- -b "${TARGET_GOPATH}/bin"
7184                        else
7185                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
7186                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7187                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
7188                        fi
7189
7190                        # Remove Go tools temp directory
7191                        rm -rf "${GOPATH}"
7192                    fi
7193
7194
7195                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7196                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7197                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
7198                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
7199
7200                    # Clean up
7201                    clean_up
7202
7203                    echo "Done!"
7204                        "#),
7205                ])
7206                .await;
7207                return Ok(http::Response::builder()
7208                    .status(200)
7209                    .body(AsyncBody::from(response))
7210                    .unwrap());
7211            }
7212            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
7213                let response = r#"
7214                    {
7215                        "schemaVersion": 2,
7216                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
7217                        "config": {
7218                            "mediaType": "application/vnd.devcontainers",
7219                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
7220                            "size": 2
7221                        },
7222                        "layers": [
7223                            {
7224                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
7225                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
7226                                "size": 19968,
7227                                "annotations": {
7228                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
7229                                }
7230                            }
7231                        ],
7232                        "annotations": {
7233                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
7234                            "com.github.package.type": "devcontainer_feature"
7235                        }
7236                    }"#;
7237                return Ok(http::Response::builder()
7238                    .status(200)
7239                    .body(AsyncBody::from(response))
7240                    .unwrap());
7241            }
7242            if parts.uri.path()
7243                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
7244            {
7245                let response = build_tarball(vec![
7246                    (
7247                        "./devcontainer-feature.json",
7248                        r#"
7249{
7250    "id": "aws-cli",
7251    "version": "1.1.3",
7252    "name": "AWS CLI",
7253    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
7254    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
7255    "options": {
7256        "version": {
7257            "type": "string",
7258            "proposals": [
7259                "latest"
7260            ],
7261            "default": "latest",
7262            "description": "Select or enter an AWS CLI version."
7263        },
7264        "verbose": {
7265            "type": "boolean",
7266            "default": true,
7267            "description": "Suppress verbose output."
7268        }
7269    },
7270    "customizations": {
7271        "vscode": {
7272            "extensions": [
7273                "AmazonWebServices.aws-toolkit-vscode"
7274            ],
7275            "settings": {
7276                "github.copilot.chat.codeGeneration.instructions": [
7277                    {
7278                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
7279                    }
7280                ]
7281            }
7282        }
7283    },
7284    "installsAfter": [
7285        "ghcr.io/devcontainers/features/common-utils"
7286    ]
7287}
7288                    "#,
7289                    ),
7290                    (
7291                        "./install.sh",
7292                        r#"#!/usr/bin/env bash
7293                    #-------------------------------------------------------------------------------------------------------------
7294                    # Copyright (c) Microsoft Corporation. All rights reserved.
7295                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7296                    #-------------------------------------------------------------------------------------------------------------
7297                    #
7298                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7299                    # Maintainer: The VS Code and Codespaces Teams
7300
7301                    set -e
7302
7303                    # Clean up
7304                    rm -rf /var/lib/apt/lists/*
7305
7306                    VERSION=${VERSION:-"latest"}
7307                    VERBOSE=${VERBOSE:-"true"}
7308
7309                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7310                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7311
7312                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7313                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7314                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7315                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7316                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7317                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7318                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7319                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7320                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7321                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7322                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7323                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7324                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7325                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7326                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7327                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7328                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7329                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7330                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7331                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7332                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7333                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7334                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7335                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7336                    YLZATHZKTJyiqA==
7337                    =vYOk
7338                    -----END PGP PUBLIC KEY BLOCK-----"
7339
7340                    if [ "$(id -u)" -ne 0 ]; then
7341                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7342                        exit 1
7343                    fi
7344
7345                    apt_get_update()
7346                    {
7347                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7348                            echo "Running apt-get update..."
7349                            apt-get update -y
7350                        fi
7351                    }
7352
7353                    # Checks if packages are installed and installs them if not
7354                    check_packages() {
7355                        if ! dpkg -s "$@" > /dev/null 2>&1; then
7356                            apt_get_update
7357                            apt-get -y install --no-install-recommends "$@"
7358                        fi
7359                    }
7360
7361                    export DEBIAN_FRONTEND=noninteractive
7362
7363                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7364
7365                    verify_aws_cli_gpg_signature() {
7366                        local filePath=$1
7367                        local sigFilePath=$2
7368                        local awsGpgKeyring=aws-cli-public-key.gpg
7369
7370                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7371                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7372                        local status=$?
7373
7374                        rm "./${awsGpgKeyring}"
7375
7376                        return ${status}
7377                    }
7378
7379                    install() {
7380                        local scriptZipFile=awscli.zip
7381                        local scriptSigFile=awscli.sig
7382
7383                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7384                        if [ "${VERSION}" != "latest" ]; then
7385                            local versionStr=-${VERSION}
7386                        fi
7387                        architecture=$(dpkg --print-architecture)
7388                        case "${architecture}" in
7389                            amd64) architectureStr=x86_64 ;;
7390                            arm64) architectureStr=aarch64 ;;
7391                            *)
7392                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7393                                exit 1
7394                        esac
7395                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7396                        curl "${scriptUrl}" -o "${scriptZipFile}"
7397                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7398
7399                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7400                        if (( $? > 0 )); then
7401                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7402                            exit 1
7403                        fi
7404
7405                        if [ "${VERBOSE}" = "false" ]; then
7406                            unzip -q "${scriptZipFile}"
7407                        else
7408                            unzip "${scriptZipFile}"
7409                        fi
7410
7411                        ./aws/install
7412
7413                        # kubectl bash completion
7414                        mkdir -p /etc/bash_completion.d
7415                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7416
7417                        # kubectl zsh completion
7418                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7419                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7420                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7421                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7422                        fi
7423
7424                        rm -rf ./aws
7425                    }
7426
7427                    echo "(*) Installing AWS CLI..."
7428
7429                    install
7430
7431                    # Clean up
7432                    rm -rf /var/lib/apt/lists/*
7433
7434                    echo "Done!""#,
7435                    ),
7436                    ("./scripts/", r#""#),
7437                    (
7438                        "./scripts/fetch-latest-completer-scripts.sh",
7439                        r#"
7440                        #!/bin/bash
7441                        #-------------------------------------------------------------------------------------------------------------
7442                        # Copyright (c) Microsoft Corporation. All rights reserved.
7443                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7444                        #-------------------------------------------------------------------------------------------------------------
7445                        #
7446                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7447                        # Maintainer: The Dev Container spec maintainers
7448                        #
7449                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7450                        #
7451                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7452                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7453                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7454
7455                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7456                        chmod +x "$BASH_COMPLETER_SCRIPT"
7457
7458                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7459                        chmod +x "$ZSH_COMPLETER_SCRIPT"
7460                        "#,
7461                    ),
7462                    ("./scripts/vendor/", r#""#),
7463                    (
7464                        "./scripts/vendor/aws_bash_completer",
7465                        r#"
7466                        # Typically that would be added under one of the following paths:
7467                        # - /etc/bash_completion.d
7468                        # - /usr/local/etc/bash_completion.d
7469                        # - /usr/share/bash-completion/completions
7470
7471                        complete -C aws_completer aws
7472                        "#,
7473                    ),
7474                    (
7475                        "./scripts/vendor/aws_zsh_completer.sh",
7476                        r#"
7477                        # Source this file to activate auto completion for zsh using the bash
7478                        # compatibility helper.  Make sure to run `compinit` before, which should be
7479                        # given usually.
7480                        #
7481                        # % source /path/to/zsh_complete.sh
7482                        #
7483                        # Typically that would be called somewhere in your .zshrc.
7484                        #
7485                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7486                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7487                        #
7488                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7489                        #
7490                        # zsh releases prior to that version do not export the required env variables!
7491
7492                        autoload -Uz bashcompinit
7493                        bashcompinit -i
7494
7495                        _bash_complete() {
7496                          local ret=1
7497                          local -a suf matches
7498                          local -x COMP_POINT COMP_CWORD
7499                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7500                          local -x COMP_LINE="$words"
7501                          local -A savejobstates savejobtexts
7502
7503                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7504                          (( COMP_CWORD = CURRENT - 1))
7505                          COMP_WORDS=( $words )
7506                          BASH_VERSINFO=( 2 05b 0 1 release )
7507
7508                          savejobstates=( ${(kv)jobstates} )
7509                          savejobtexts=( ${(kv)jobtexts} )
7510
7511                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7512
7513                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7514
7515                          if [[ -n $matches ]]; then
7516                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7517                              compset -P '*/' && matches=( ${matches##*/} )
7518                              compset -S '/*' && matches=( ${matches%%/*} )
7519                              compadd -Q -f "${suf[@]}" -a matches && ret=0
7520                            else
7521                              compadd -Q "${suf[@]}" -a matches && ret=0
7522                            fi
7523                          fi
7524
7525                          if (( ret )); then
7526                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7527                              _default "${suf[@]}" && ret=0
7528                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7529                              _directories "${suf[@]}" && ret=0
7530                            fi
7531                          fi
7532
7533                          return ret
7534                        }
7535
7536                        complete -C aws_completer aws
7537                        "#,
7538                    ),
7539                ]).await;
7540
7541                return Ok(http::Response::builder()
7542                    .status(200)
7543                    .body(AsyncBody::from(response))
7544                    .unwrap());
7545            }
7546
7547            Ok(http::Response::builder()
7548                .status(404)
7549                .body(http_client::AsyncBody::default())
7550                .unwrap())
7551        })
7552    }
7553}