devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use fs::Fs;
  10use http_client::HttpClient;
  11use util::{ResultExt, command::Command};
  12
  13use crate::{
  14    DevContainerConfig, DevContainerContext,
  15    command_json::{CommandRunner, DefaultCommandRunner},
  16    devcontainer_api::{DevContainerError, DevContainerUp},
  17    devcontainer_json::{
  18        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
  19        deserialize_devcontainer_json,
  20    },
  21    docker::{
  22        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  23        DockerComposeVolume, DockerInspect, DockerPs, get_remote_dir_from_config,
  24    },
  25    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  26    get_oci_token,
  27    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  28    safe_id_lower,
  29};
  30
  31enum ConfigStatus {
  32    Deserialized(DevContainer),
  33    VariableParsed(DevContainer),
  34}
  35
  36#[derive(Debug, Clone, Eq, PartialEq, Default)]
  37pub(crate) struct DockerComposeResources {
  38    files: Vec<PathBuf>,
  39    config: DockerComposeConfig,
  40}
  41
  42struct DevContainerManifest {
  43    http_client: Arc<dyn HttpClient>,
  44    fs: Arc<dyn Fs>,
  45    docker_client: Arc<dyn DockerClient>,
  46    command_runner: Arc<dyn CommandRunner>,
  47    raw_config: String,
  48    config: ConfigStatus,
  49    local_environment: HashMap<String, String>,
  50    local_project_directory: PathBuf,
  51    config_directory: PathBuf,
  52    file_name: String,
  53    root_image: Option<DockerInspect>,
  54    features_build_info: Option<FeaturesBuildInfo>,
  55    features: Vec<FeatureManifest>,
  56}
  57const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
  58impl DevContainerManifest {
  59    async fn new(
  60        context: &DevContainerContext,
  61        environment: HashMap<String, String>,
  62        docker_client: Arc<dyn DockerClient>,
  63        command_runner: Arc<dyn CommandRunner>,
  64        local_config: DevContainerConfig,
  65        local_project_path: &Path,
  66    ) -> Result<Self, DevContainerError> {
  67        let config_path = local_project_path.join(local_config.config_path.clone());
  68        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  69        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  70            log::error!("Unable to read devcontainer contents: {e}");
  71            DevContainerError::DevContainerParseFailed
  72        })?;
  73
  74        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  75
  76        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  77            log::error!("Dev container file should be in a directory");
  78            DevContainerError::NotInValidProject
  79        })?;
  80        let file_name = config_path
  81            .file_name()
  82            .and_then(|f| f.to_str())
  83            .ok_or_else(|| {
  84                log::error!("Dev container file has no file name, or is invalid unicode");
  85                DevContainerError::DevContainerParseFailed
  86            })?;
  87
  88        Ok(Self {
  89            fs: context.fs.clone(),
  90            http_client: context.http_client.clone(),
  91            docker_client,
  92            command_runner,
  93            raw_config: devcontainer_contents,
  94            config: ConfigStatus::Deserialized(devcontainer),
  95            local_project_directory: local_project_path.to_path_buf(),
  96            local_environment: environment,
  97            config_directory: devcontainer_directory.to_path_buf(),
  98            file_name: file_name.to_string(),
  99            root_image: None,
 100            features_build_info: None,
 101            features: Vec::new(),
 102        })
 103    }
 104
 105    fn devcontainer_id(&self) -> String {
 106        let mut labels = self.identifying_labels();
 107        labels.sort_by_key(|(key, _)| *key);
 108
 109        let mut hasher = DefaultHasher::new();
 110        for (key, value) in &labels {
 111            key.hash(&mut hasher);
 112            value.hash(&mut hasher);
 113        }
 114
 115        format!("{:016x}", hasher.finish())
 116    }
 117
 118    fn identifying_labels(&self) -> Vec<(&str, String)> {
 119        let labels = vec![
 120            (
 121                "devcontainer.local_folder",
 122                (self.local_project_directory.display()).to_string(),
 123            ),
 124            (
 125                "devcontainer.config_file",
 126                (self.config_file().display()).to_string(),
 127            ),
 128        ];
 129        labels
 130    }
 131
 132    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
 133        let mut replaced_content = content
 134            .replace("${devcontainerId}", &self.devcontainer_id())
 135            .replace(
 136                "${containerWorkspaceFolderBasename}",
 137                &self.remote_workspace_base_name().unwrap_or_default(),
 138            )
 139            .replace(
 140                "${localWorkspaceFolderBasename}",
 141                &self.local_workspace_base_name()?,
 142            )
 143            .replace(
 144                "${containerWorkspaceFolder}",
 145                &self
 146                    .remote_workspace_folder()
 147                    .map(|path| path.display().to_string())
 148                    .unwrap_or_default()
 149                    .replace('\\', "/"),
 150            )
 151            .replace(
 152                "${localWorkspaceFolder}",
 153                &self.local_workspace_folder().replace('\\', "/"),
 154            );
 155        for (k, v) in &self.local_environment {
 156            let find = format!("${{localEnv:{k}}}");
 157            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
 158        }
 159
 160        Ok(replaced_content)
 161    }
 162
 163    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 164        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 165        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
 166
 167        self.config = ConfigStatus::VariableParsed(parsed_config);
 168
 169        Ok(())
 170    }
 171
 172    fn runtime_remote_env(
 173        &self,
 174        container_env: &HashMap<String, String>,
 175    ) -> Result<HashMap<String, String>, DevContainerError> {
 176        let mut merged_remote_env = container_env.clone();
 177        // HOME is user-specific, and we will often not run as the image user
 178        merged_remote_env.remove("HOME");
 179        if let Some(remote_env) = self.dev_container().remote_env.clone() {
 180            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
 181                log::error!(
 182                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
 183                    remote_env
 184                );
 185                DevContainerError::DevContainerParseFailed
 186            })?;
 187            for (k, v) in container_env {
 188                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
 189            }
 190            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
 191                .map_err(|e| {
 192                    log::error!(
 193                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
 194                        &raw
 195                    );
 196                    DevContainerError::DevContainerParseFailed
 197                })?;
 198            for (k, v) in reserialized {
 199                merged_remote_env.insert(k, v);
 200            }
 201        }
 202        Ok(merged_remote_env)
 203    }
 204
 205    fn config_file(&self) -> PathBuf {
 206        self.config_directory.join(&self.file_name)
 207    }
 208
 209    fn dev_container(&self) -> &DevContainer {
 210        match &self.config {
 211            ConfigStatus::Deserialized(dev_container) => dev_container,
 212            ConfigStatus::VariableParsed(dev_container) => dev_container,
 213        }
 214    }
 215
 216    async fn dockerfile_location(&self) -> Option<PathBuf> {
 217        let dev_container = self.dev_container();
 218        match dev_container.build_type() {
 219            DevContainerBuildType::Image => None,
 220            DevContainerBuildType::Dockerfile => dev_container
 221                .build
 222                .as_ref()
 223                .map(|build| self.config_directory.join(&build.dockerfile)),
 224            DevContainerBuildType::DockerCompose => {
 225                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 226                    return None;
 227                };
 228                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 229                else {
 230                    return None;
 231                };
 232                main_service
 233                    .build
 234                    .and_then(|b| b.dockerfile)
 235                    .map(|dockerfile| self.config_directory.join(dockerfile))
 236            }
 237            DevContainerBuildType::None => None,
 238        }
 239    }
 240
 241    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 242        let mut hasher = DefaultHasher::new();
 243        let prefix = match &self.dev_container().name {
 244            Some(name) => &safe_id_lower(name),
 245            None => "zed-dc",
 246        };
 247        let prefix = prefix.get(..6).unwrap_or(prefix);
 248
 249        dockerfile_build_path.hash(&mut hasher);
 250
 251        let hash = hasher.finish();
 252        format!("{}-{:x}-features", prefix, hash)
 253    }
 254
 255    /// Gets the base image from the devcontainer with the following precedence:
 256    /// - The devcontainer image if an image is specified
 257    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 258    /// - The image sourced in the docker-compose main service, if one is specified
 259    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 260    /// If no such image is available, return an error
 261    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 262        if let Some(image) = &self.dev_container().image {
 263            return Ok(image.to_string());
 264        }
 265        if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
 266            let dockerfile_contents = self
 267                .fs
 268                .load(&self.config_directory.join(dockerfile))
 269                .await
 270                .map_err(|e| {
 271                    log::error!("Error reading dockerfile: {e}");
 272                    DevContainerError::DevContainerParseFailed
 273                })?;
 274            return image_from_dockerfile(self, dockerfile_contents);
 275        }
 276        if self.dev_container().docker_compose_file.is_some() {
 277            let docker_compose_manifest = self.docker_compose_manifest().await?;
 278            let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 279
 280            if let Some(dockerfile) = main_service
 281                .build
 282                .as_ref()
 283                .and_then(|b| b.dockerfile.as_ref())
 284            {
 285                let dockerfile_contents = self
 286                    .fs
 287                    .load(&self.config_directory.join(dockerfile))
 288                    .await
 289                    .map_err(|e| {
 290                        log::error!("Error reading dockerfile: {e}");
 291                        DevContainerError::DevContainerParseFailed
 292                    })?;
 293                return image_from_dockerfile(self, dockerfile_contents);
 294            }
 295            if let Some(image) = &main_service.image {
 296                return Ok(image.to_string());
 297            }
 298
 299            log::error!("No valid base image found in docker-compose configuration");
 300            return Err(DevContainerError::DevContainerParseFailed);
 301        }
 302        log::error!("No valid base image found in dev container configuration");
 303        Err(DevContainerError::DevContainerParseFailed)
 304    }
 305
 306    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 307        let dev_container = match &self.config {
 308            ConfigStatus::Deserialized(_) => {
 309                log::error!(
 310                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 311                );
 312                return Err(DevContainerError::DevContainerParseFailed);
 313            }
 314            ConfigStatus::VariableParsed(dev_container) => dev_container,
 315        };
 316        let root_image_tag = self.get_base_image_from_config().await?;
 317        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 318
 319        if dev_container.build_type() == DevContainerBuildType::Image
 320            && !dev_container.has_features()
 321        {
 322            log::debug!("No resources to download. Proceeding with just the image");
 323            return Ok(());
 324        }
 325
 326        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 327        let timestamp = std::time::SystemTime::now()
 328            .duration_since(std::time::UNIX_EPOCH)
 329            .map(|d| d.as_millis())
 330            .unwrap_or(0);
 331
 332        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 333        let empty_context_dir = temp_base.join("empty-folder");
 334
 335        self.fs
 336            .create_dir(&features_content_dir)
 337            .await
 338            .map_err(|e| {
 339                log::error!("Failed to create features content dir: {e}");
 340                DevContainerError::FilesystemError
 341            })?;
 342
 343        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 344            log::error!("Failed to create empty context dir: {e}");
 345            DevContainerError::FilesystemError
 346        })?;
 347
 348        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 349        let image_tag =
 350            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 351
 352        let build_info = FeaturesBuildInfo {
 353            dockerfile_path,
 354            features_content_dir,
 355            empty_context_dir,
 356            build_image: dev_container.image.clone(),
 357            image_tag,
 358        };
 359
 360        let features = match &dev_container.features {
 361            Some(features) => features,
 362            None => &HashMap::new(),
 363        };
 364
 365        let container_user = get_container_user_from_config(&root_image, self)?;
 366        let remote_user = get_remote_user_from_config(&root_image, self)?;
 367
 368        let builtin_env_content = format!(
 369            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 370            container_user, remote_user
 371        );
 372
 373        let builtin_env_path = build_info
 374            .features_content_dir
 375            .join("devcontainer-features.builtin.env");
 376
 377        self.fs
 378            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 379            .await
 380            .map_err(|e| {
 381                log::error!("Failed to write builtin env file: {e}");
 382                DevContainerError::FilesystemError
 383            })?;
 384
 385        let ordered_features =
 386            resolve_feature_order(features, &dev_container.override_feature_install_order);
 387
 388        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 389            if matches!(options, FeatureOptions::Bool(false)) {
 390                log::debug!(
 391                    "Feature '{}' is disabled (set to false), skipping",
 392                    feature_ref
 393                );
 394                continue;
 395            }
 396
 397            let feature_id = extract_feature_id(feature_ref);
 398            let consecutive_id = format!("{}_{}", feature_id, index);
 399            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 400
 401            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 402                log::error!(
 403                    "Failed to create feature directory for {}: {e}",
 404                    feature_ref
 405                );
 406                DevContainerError::FilesystemError
 407            })?;
 408
 409            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 410                log::error!(
 411                    "Feature '{}' is not a supported OCI feature reference",
 412                    feature_ref
 413                );
 414                DevContainerError::DevContainerParseFailed
 415            })?;
 416            let TokenResponse { token } =
 417                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 418                    .await
 419                    .map_err(|e| {
 420                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 421                        DevContainerError::ResourceFetchFailed
 422                    })?;
 423            let manifest = get_oci_manifest(
 424                &oci_ref.registry,
 425                &oci_ref.path,
 426                &token,
 427                &self.http_client,
 428                &oci_ref.version,
 429                None,
 430            )
 431            .await
 432            .map_err(|e| {
 433                log::error!(
 434                    "Failed to fetch OCI manifest for feature '{}': {e}",
 435                    feature_ref
 436                );
 437                DevContainerError::ResourceFetchFailed
 438            })?;
 439            let digest = &manifest
 440                .layers
 441                .first()
 442                .ok_or_else(|| {
 443                    log::error!(
 444                        "OCI manifest for feature '{}' contains no layers",
 445                        feature_ref
 446                    );
 447                    DevContainerError::ResourceFetchFailed
 448                })?
 449                .digest;
 450            download_oci_tarball(
 451                &token,
 452                &oci_ref.registry,
 453                &oci_ref.path,
 454                digest,
 455                "application/vnd.devcontainers.layer.v1+tar",
 456                &feature_dir,
 457                &self.http_client,
 458                &self.fs,
 459                None,
 460            )
 461            .await?;
 462
 463            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 464            if !self.fs.is_file(feature_json_path).await {
 465                let message = format!(
 466                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 467                    feature_json_path
 468                );
 469                log::error!("{}", &message);
 470                return Err(DevContainerError::ResourceFetchFailed);
 471            }
 472
 473            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 474                log::error!("error reading devcontainer-feature.json: {:?}", e);
 475                DevContainerError::FilesystemError
 476            })?;
 477
 478            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 479
 480            let feature_json: DevContainerFeatureJson =
 481                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
 482                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 483                    DevContainerError::ResourceFetchFailed
 484                })?;
 485
 486            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 487
 488            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 489
 490            let env_content = feature_manifest
 491                .write_feature_env(&self.fs, options)
 492                .await?;
 493
 494            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 495
 496            self.fs
 497                .write(
 498                    &feature_manifest
 499                        .file_path()
 500                        .join("devcontainer-features-install.sh"),
 501                    &wrapper_content.as_bytes(),
 502                )
 503                .await
 504                .map_err(|e| {
 505                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 506                    DevContainerError::FilesystemError
 507                })?;
 508
 509            self.features.push(feature_manifest);
 510        }
 511
 512        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 513
 514        let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
 515        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 516
 517        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 518            self.fs.load(location).await.log_err()
 519        } else {
 520            None
 521        };
 522
 523        let dockerfile_content = self.generate_dockerfile_extended(
 524            &container_user,
 525            &remote_user,
 526            dockerfile_base_content,
 527            use_buildkit,
 528        );
 529
 530        self.fs
 531            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 532            .await
 533            .map_err(|e| {
 534                log::error!("Failed to write Dockerfile.extended: {e}");
 535                DevContainerError::FilesystemError
 536            })?;
 537
 538        log::debug!(
 539            "Features build resources written to {:?}",
 540            build_info.features_content_dir
 541        );
 542
 543        self.root_image = Some(root_image);
 544        self.features_build_info = Some(build_info);
 545
 546        Ok(())
 547    }
 548
 549    fn generate_dockerfile_extended(
 550        &self,
 551        container_user: &str,
 552        remote_user: &str,
 553        dockerfile_content: Option<String>,
 554        use_buildkit: bool,
 555    ) -> String {
 556        #[cfg(not(target_os = "windows"))]
 557        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 558        #[cfg(target_os = "windows")]
 559        let update_remote_user_uid = false;
 560        let feature_layers: String = self
 561            .features
 562            .iter()
 563            .map(|manifest| {
 564                manifest.generate_dockerfile_feature_layer(
 565                    use_buildkit,
 566                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 567                )
 568            })
 569            .collect();
 570
 571        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 572        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 573
 574        let dockerfile_content = dockerfile_content
 575            .map(|content| {
 576                if dockerfile_alias(&content).is_some() {
 577                    content
 578                } else {
 579                    dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
 580                }
 581            })
 582            .unwrap_or("".to_string());
 583
 584        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 585
 586        let feature_content_source_stage = if use_buildkit {
 587            "".to_string()
 588        } else {
 589            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 590                .to_string()
 591        };
 592
 593        let builtin_env_source_path = if use_buildkit {
 594            "./devcontainer-features.builtin.env"
 595        } else {
 596            "/tmp/build-features/devcontainer-features.builtin.env"
 597        };
 598
 599        let mut extended_dockerfile = format!(
 600            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 601
 602{dockerfile_content}
 603{feature_content_source_stage}
 604FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 605USER root
 606COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 607RUN chmod -R 0755 /tmp/build-features/
 608
 609FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 610
 611USER root
 612
 613RUN mkdir -p {dest}
 614COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 615
 616RUN \
 617echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 618echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 619
 620{feature_layers}
 621
 622ARG _DEV_CONTAINERS_IMAGE_USER=root
 623USER $_DEV_CONTAINERS_IMAGE_USER
 624"#
 625        );
 626
 627        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 628        if !update_remote_user_uid {
 629            extended_dockerfile = format!(
 630                r#"{extended_dockerfile}
 631# Ensure that /etc/profile does not clobber the existing path
 632RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 633"#
 634            );
 635
 636            for feature in &self.features {
 637                let container_env_layer = feature.generate_dockerfile_env();
 638                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 639            }
 640
 641            if let Some(env) = &self.dev_container().container_env {
 642                for (key, value) in env {
 643                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 644                }
 645            }
 646        }
 647
 648        extended_dockerfile
 649    }
 650
 651    fn build_merged_resources(
 652        &self,
 653        base_image: DockerInspect,
 654    ) -> Result<DockerBuildResources, DevContainerError> {
 655        let dev_container = match &self.config {
 656            ConfigStatus::Deserialized(_) => {
 657                log::error!(
 658                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 659                );
 660                return Err(DevContainerError::DevContainerParseFailed);
 661            }
 662            ConfigStatus::VariableParsed(dev_container) => dev_container,
 663        };
 664        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 665
 666        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 667
 668        mounts.append(&mut feature_mounts);
 669
 670        let privileged = dev_container.privileged.unwrap_or(false)
 671            || self.features.iter().any(|f| f.privileged());
 672
 673        let mut entrypoint_script_lines = vec![
 674            "echo Container started".to_string(),
 675            "trap \"exit 0\" 15".to_string(),
 676        ];
 677
 678        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 679            entrypoint_script_lines.push(entrypoint.clone());
 680        }
 681        entrypoint_script_lines.append(&mut vec![
 682            "exec \"$@\"".to_string(),
 683            "while sleep 1 & wait $!; do :; done".to_string(),
 684        ]);
 685
 686        Ok(DockerBuildResources {
 687            image: base_image,
 688            additional_mounts: mounts,
 689            privileged,
 690            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 691        })
 692    }
 693
 694    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 695        if let ConfigStatus::Deserialized(_) = &self.config {
 696            log::error!(
 697                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 698            );
 699            return Err(DevContainerError::DevContainerParseFailed);
 700        }
 701        let dev_container = self.dev_container();
 702        match dev_container.build_type() {
 703            DevContainerBuildType::Image | DevContainerBuildType::Dockerfile => {
 704                let built_docker_image = self.build_docker_image().await?;
 705                let built_docker_image = self
 706                    .update_remote_user_uid(built_docker_image, None)
 707                    .await?;
 708
 709                let resources = self.build_merged_resources(built_docker_image)?;
 710                Ok(DevContainerBuildResources::Docker(resources))
 711            }
 712            DevContainerBuildType::DockerCompose => {
 713                log::debug!("Using docker compose. Building extended compose files");
 714                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 715
 716                return Ok(DevContainerBuildResources::DockerCompose(
 717                    docker_compose_resources,
 718                ));
 719            }
 720            DevContainerBuildType::None => {
 721                return Err(DevContainerError::DevContainerParseFailed);
 722            }
 723        }
 724    }
 725
 726    async fn run_dev_container(
 727        &self,
 728        build_resources: DevContainerBuildResources,
 729    ) -> Result<DevContainerUp, DevContainerError> {
 730        let ConfigStatus::VariableParsed(_) = &self.config else {
 731            log::error!(
 732                "Variables have not been parsed; cannot proceed with running the dev container"
 733            );
 734            return Err(DevContainerError::DevContainerParseFailed);
 735        };
 736        let running_container = match build_resources {
 737            DevContainerBuildResources::DockerCompose(resources) => {
 738                self.run_docker_compose(resources).await?
 739            }
 740            DevContainerBuildResources::Docker(resources) => {
 741                self.run_docker_image(resources).await?
 742            }
 743        };
 744
 745        let remote_user = get_remote_user_from_config(&running_container, self)?;
 746        let remote_workspace_folder = get_remote_dir_from_config(
 747            &running_container,
 748            (&self.local_project_directory.display()).to_string(),
 749        )?;
 750
 751        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 752
 753        Ok(DevContainerUp {
 754            container_id: running_container.id,
 755            remote_user,
 756            remote_workspace_folder,
 757            extension_ids: self.extension_ids(),
 758            remote_env,
 759        })
 760    }
 761
 762    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 763        let dev_container = match &self.config {
 764            ConfigStatus::Deserialized(_) => {
 765                log::error!(
 766                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 767                );
 768                return Err(DevContainerError::DevContainerParseFailed);
 769            }
 770            ConfigStatus::VariableParsed(dev_container) => dev_container,
 771        };
 772        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 773            return Err(DevContainerError::DevContainerParseFailed);
 774        };
 775        let docker_compose_full_paths = docker_compose_files
 776            .iter()
 777            .map(|relative| self.config_directory.join(relative))
 778            .collect::<Vec<PathBuf>>();
 779
 780        let Some(config) = self
 781            .docker_client
 782            .get_docker_compose_config(&docker_compose_full_paths)
 783            .await?
 784        else {
 785            log::error!("Output could not deserialize into DockerComposeConfig");
 786            return Err(DevContainerError::DevContainerParseFailed);
 787        };
 788        Ok(DockerComposeResources {
 789            files: docker_compose_full_paths,
 790            config,
 791        })
 792    }
 793
 794    async fn build_and_extend_compose_files(
 795        &self,
 796    ) -> Result<DockerComposeResources, DevContainerError> {
 797        let dev_container = match &self.config {
 798            ConfigStatus::Deserialized(_) => {
 799                log::error!(
 800                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 801                );
 802                return Err(DevContainerError::DevContainerParseFailed);
 803            }
 804            ConfigStatus::VariableParsed(dev_container) => dev_container,
 805        };
 806
 807        let Some(features_build_info) = &self.features_build_info else {
 808            log::error!(
 809                "Cannot build and extend compose files: features build info is not yet constructed"
 810            );
 811            return Err(DevContainerError::DevContainerParseFailed);
 812        };
 813        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 814        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 815
 816        let (main_service_name, main_service) =
 817            find_primary_service(&docker_compose_resources, self)?;
 818        let built_service_image = if main_service
 819            .build
 820            .as_ref()
 821            .map(|b| b.dockerfile.as_ref())
 822            .is_some()
 823        {
 824            if !supports_buildkit {
 825                self.build_feature_content_image().await?;
 826            }
 827
 828            let dockerfile_path = &features_build_info.dockerfile_path;
 829
 830            let build_args = if !supports_buildkit {
 831                HashMap::from([
 832                    (
 833                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 834                        "dev_container_auto_added_stage_label".to_string(),
 835                    ),
 836                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 837                ])
 838            } else {
 839                HashMap::from([
 840                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 841                    (
 842                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 843                        "dev_container_auto_added_stage_label".to_string(),
 844                    ),
 845                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 846                ])
 847            };
 848
 849            let additional_contexts = if !supports_buildkit {
 850                None
 851            } else {
 852                Some(HashMap::from([(
 853                    "dev_containers_feature_content_source".to_string(),
 854                    features_build_info
 855                        .features_content_dir
 856                        .display()
 857                        .to_string(),
 858                )]))
 859            };
 860
 861            let build_override = DockerComposeConfig {
 862                name: None,
 863                services: HashMap::from([(
 864                    main_service_name.clone(),
 865                    DockerComposeService {
 866                        image: Some(features_build_info.image_tag.clone()),
 867                        entrypoint: None,
 868                        cap_add: None,
 869                        security_opt: None,
 870                        labels: None,
 871                        build: Some(DockerComposeServiceBuild {
 872                            context: Some(
 873                                features_build_info.empty_context_dir.display().to_string(),
 874                            ),
 875                            dockerfile: Some(dockerfile_path.display().to_string()),
 876                            args: Some(build_args),
 877                            additional_contexts,
 878                        }),
 879                        volumes: Vec::new(),
 880                        ..Default::default()
 881                    },
 882                )]),
 883                volumes: HashMap::new(),
 884            };
 885
 886            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 887            let config_location = temp_base.join("docker_compose_build.json");
 888
 889            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 890                log::error!("Error serializing docker compose runtime override: {e}");
 891                DevContainerError::DevContainerParseFailed
 892            })?;
 893
 894            self.fs
 895                .write(&config_location, config_json.as_bytes())
 896                .await
 897                .map_err(|e| {
 898                    log::error!("Error writing the runtime override file: {e}");
 899                    DevContainerError::FilesystemError
 900                })?;
 901
 902            docker_compose_resources.files.push(config_location);
 903
 904            self.docker_client
 905                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 906                .await?;
 907            self.docker_client
 908                .inspect(&features_build_info.image_tag)
 909                .await?
 910        } else if let Some(image) = &main_service.image {
 911            if dev_container
 912                .features
 913                .as_ref()
 914                .is_none_or(|features| features.is_empty())
 915            {
 916                self.docker_client.inspect(image).await?
 917            } else {
 918                if !supports_buildkit {
 919                    self.build_feature_content_image().await?;
 920                }
 921
 922                let dockerfile_path = &features_build_info.dockerfile_path;
 923
 924                let build_args = if !supports_buildkit {
 925                    HashMap::from([
 926                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 927                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 928                    ])
 929                } else {
 930                    HashMap::from([
 931                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 932                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 933                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 934                    ])
 935                };
 936
 937                let additional_contexts = if !supports_buildkit {
 938                    None
 939                } else {
 940                    Some(HashMap::from([(
 941                        "dev_containers_feature_content_source".to_string(),
 942                        features_build_info
 943                            .features_content_dir
 944                            .display()
 945                            .to_string(),
 946                    )]))
 947                };
 948
 949                let build_override = DockerComposeConfig {
 950                    name: None,
 951                    services: HashMap::from([(
 952                        main_service_name.clone(),
 953                        DockerComposeService {
 954                            image: Some(features_build_info.image_tag.clone()),
 955                            entrypoint: None,
 956                            cap_add: None,
 957                            security_opt: None,
 958                            labels: None,
 959                            build: Some(DockerComposeServiceBuild {
 960                                context: Some(
 961                                    features_build_info.empty_context_dir.display().to_string(),
 962                                ),
 963                                dockerfile: Some(dockerfile_path.display().to_string()),
 964                                args: Some(build_args),
 965                                additional_contexts,
 966                            }),
 967                            volumes: Vec::new(),
 968                            ..Default::default()
 969                        },
 970                    )]),
 971                    volumes: HashMap::new(),
 972                };
 973
 974                let temp_base = std::env::temp_dir().join("devcontainer-zed");
 975                let config_location = temp_base.join("docker_compose_build.json");
 976
 977                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 978                    log::error!("Error serializing docker compose runtime override: {e}");
 979                    DevContainerError::DevContainerParseFailed
 980                })?;
 981
 982                self.fs
 983                    .write(&config_location, config_json.as_bytes())
 984                    .await
 985                    .map_err(|e| {
 986                        log::error!("Error writing the runtime override file: {e}");
 987                        DevContainerError::FilesystemError
 988                    })?;
 989
 990                docker_compose_resources.files.push(config_location);
 991
 992                self.docker_client
 993                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 994                    .await?;
 995
 996                self.docker_client
 997                    .inspect(&features_build_info.image_tag)
 998                    .await?
 999            }
1000        } else {
1001            log::error!("Docker compose must have either image or dockerfile defined");
1002            return Err(DevContainerError::DevContainerParseFailed);
1003        };
1004
1005        let built_service_image = self
1006            .update_remote_user_uid(built_service_image, Some(&features_build_info.image_tag))
1007            .await?;
1008
1009        let resources = self.build_merged_resources(built_service_image)?;
1010
1011        let network_mode = main_service.network_mode.as_ref();
1012        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1013        let runtime_override_file = self
1014            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1015            .await?;
1016
1017        docker_compose_resources.files.push(runtime_override_file);
1018
1019        Ok(docker_compose_resources)
1020    }
1021
1022    async fn write_runtime_override_file(
1023        &self,
1024        main_service_name: &str,
1025        network_mode_service: Option<&str>,
1026        resources: DockerBuildResources,
1027    ) -> Result<PathBuf, DevContainerError> {
1028        let config =
1029            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1030        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1031        let config_location = temp_base.join("docker_compose_runtime.json");
1032
1033        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1034            log::error!("Error serializing docker compose runtime override: {e}");
1035            DevContainerError::DevContainerParseFailed
1036        })?;
1037
1038        self.fs
1039            .write(&config_location, config_json.as_bytes())
1040            .await
1041            .map_err(|e| {
1042                log::error!("Error writing the runtime override file: {e}");
1043                DevContainerError::FilesystemError
1044            })?;
1045
1046        Ok(config_location)
1047    }
1048
1049    fn build_runtime_override(
1050        &self,
1051        main_service_name: &str,
1052        network_mode_service: Option<&str>,
1053        resources: DockerBuildResources,
1054    ) -> Result<DockerComposeConfig, DevContainerError> {
1055        let mut runtime_labels = HashMap::new();
1056
1057        if let Some(metadata) = &resources.image.config.labels.metadata {
1058            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1059                log::error!("Error serializing docker image metadata: {e}");
1060                DevContainerError::ContainerNotValid(resources.image.id.clone())
1061            })?;
1062
1063            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1064        }
1065
1066        for (k, v) in self.identifying_labels() {
1067            runtime_labels.insert(k.to_string(), v.to_string());
1068        }
1069
1070        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1071            .additional_mounts
1072            .iter()
1073            .filter_map(|mount| {
1074                if let Some(mount_type) = &mount.mount_type
1075                    && mount_type.to_lowercase() == "volume"
1076                {
1077                    Some((
1078                        mount.source.clone(),
1079                        DockerComposeVolume {
1080                            name: mount.source.clone(),
1081                        },
1082                    ))
1083                } else {
1084                    None
1085                }
1086            })
1087            .collect();
1088
1089        let volumes: Vec<MountDefinition> = resources
1090            .additional_mounts
1091            .iter()
1092            .map(|v| MountDefinition {
1093                source: v.source.clone(),
1094                target: v.target.clone(),
1095                mount_type: v.mount_type.clone(),
1096            })
1097            .collect();
1098
1099        let mut main_service = DockerComposeService {
1100            entrypoint: Some(vec![
1101                "/bin/sh".to_string(),
1102                "-c".to_string(),
1103                resources.entrypoint_script,
1104                "-".to_string(),
1105            ]),
1106            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1107            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1108            labels: Some(runtime_labels),
1109            volumes,
1110            privileged: Some(resources.privileged),
1111            ..Default::default()
1112        };
1113        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1114        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1115        if let Some(forward_ports) = &self.dev_container().forward_ports {
1116            let main_service_ports: Vec<String> = forward_ports
1117                .iter()
1118                .filter_map(|f| match f {
1119                    ForwardPort::Number(port) => Some(port.to_string()),
1120                    ForwardPort::String(port) => {
1121                        let parts: Vec<&str> = port.split(":").collect();
1122                        if parts.len() <= 1 {
1123                            Some(port.to_string())
1124                        } else if parts.len() == 2 {
1125                            if parts[0] == main_service_name {
1126                                Some(parts[1].to_string())
1127                            } else {
1128                                None
1129                            }
1130                        } else {
1131                            None
1132                        }
1133                    }
1134                })
1135                .collect();
1136            for port in main_service_ports {
1137                // If the main service uses a different service's network bridge, append to that service's ports instead
1138                if let Some(network_service_name) = network_mode_service {
1139                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1140                        service.ports.push(format!("{port}:{port}"));
1141                    } else {
1142                        service_declarations.insert(
1143                            network_service_name.to_string(),
1144                            DockerComposeService {
1145                                ports: vec![format!("{port}:{port}")],
1146                                ..Default::default()
1147                            },
1148                        );
1149                    }
1150                } else {
1151                    main_service.ports.push(format!("{port}:{port}"));
1152                }
1153            }
1154            let other_service_ports: Vec<(&str, &str)> = forward_ports
1155                .iter()
1156                .filter_map(|f| match f {
1157                    ForwardPort::Number(_) => None,
1158                    ForwardPort::String(port) => {
1159                        let parts: Vec<&str> = port.split(":").collect();
1160                        if parts.len() != 2 {
1161                            None
1162                        } else {
1163                            if parts[0] == main_service_name {
1164                                None
1165                            } else {
1166                                Some((parts[0], parts[1]))
1167                            }
1168                        }
1169                    }
1170                })
1171                .collect();
1172            for (service_name, port) in other_service_ports {
1173                if let Some(service) = service_declarations.get_mut(service_name) {
1174                    service.ports.push(format!("{port}:{port}"));
1175                } else {
1176                    service_declarations.insert(
1177                        service_name.to_string(),
1178                        DockerComposeService {
1179                            ports: vec![format!("{port}:{port}")],
1180                            ..Default::default()
1181                        },
1182                    );
1183                }
1184            }
1185        }
1186        if let Some(port) = &self.dev_container().app_port {
1187            if let Some(network_service_name) = network_mode_service {
1188                if let Some(service) = service_declarations.get_mut(network_service_name) {
1189                    service.ports.push(format!("{port}:{port}"));
1190                } else {
1191                    service_declarations.insert(
1192                        network_service_name.to_string(),
1193                        DockerComposeService {
1194                            ports: vec![format!("{port}:{port}")],
1195                            ..Default::default()
1196                        },
1197                    );
1198                }
1199            } else {
1200                main_service.ports.push(format!("{port}:{port}"));
1201            }
1202        }
1203
1204        service_declarations.insert(main_service_name.to_string(), main_service);
1205        let new_docker_compose_config = DockerComposeConfig {
1206            name: None,
1207            services: service_declarations,
1208            volumes: config_volumes,
1209        };
1210
1211        Ok(new_docker_compose_config)
1212    }
1213
1214    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1215        let dev_container = match &self.config {
1216            ConfigStatus::Deserialized(_) => {
1217                log::error!(
1218                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1219                );
1220                return Err(DevContainerError::DevContainerParseFailed);
1221            }
1222            ConfigStatus::VariableParsed(dev_container) => dev_container,
1223        };
1224
1225        match dev_container.build_type() {
1226            DevContainerBuildType::Image => {
1227                let Some(image_tag) = &dev_container.image else {
1228                    return Err(DevContainerError::DevContainerParseFailed);
1229                };
1230                let base_image = self.docker_client.inspect(image_tag).await?;
1231                if dev_container
1232                    .features
1233                    .as_ref()
1234                    .is_none_or(|features| features.is_empty())
1235                {
1236                    log::debug!("No features to add. Using base image");
1237                    return Ok(base_image);
1238                }
1239            }
1240            DevContainerBuildType::Dockerfile => {}
1241            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1242                return Err(DevContainerError::DevContainerParseFailed);
1243            }
1244        };
1245
1246        let mut command = self.create_docker_build()?;
1247
1248        let output = self
1249            .command_runner
1250            .run_command(&mut command)
1251            .await
1252            .map_err(|e| {
1253                log::error!("Error building docker image: {e}");
1254                DevContainerError::CommandFailed(command.get_program().display().to_string())
1255            })?;
1256
1257        if !output.status.success() {
1258            let stderr = String::from_utf8_lossy(&output.stderr);
1259            log::error!("docker buildx build failed: {stderr}");
1260            return Err(DevContainerError::CommandFailed(
1261                command.get_program().display().to_string(),
1262            ));
1263        }
1264
1265        // After a successful build, inspect the newly tagged image to get its metadata
1266        let Some(features_build_info) = &self.features_build_info else {
1267            log::error!("Features build info expected, but not created");
1268            return Err(DevContainerError::DevContainerParseFailed);
1269        };
1270        let image = self
1271            .docker_client
1272            .inspect(&features_build_info.image_tag)
1273            .await?;
1274
1275        Ok(image)
1276    }
1277
1278    #[cfg(target_os = "windows")]
1279    async fn update_remote_user_uid(
1280        &self,
1281        image: DockerInspect,
1282        _override_tag: Option<&str>,
1283    ) -> Result<DockerInspect, DevContainerError> {
1284        Ok(image)
1285    }
1286    #[cfg(not(target_os = "windows"))]
1287    async fn update_remote_user_uid(
1288        &self,
1289        image: DockerInspect,
1290        override_tag: Option<&str>,
1291    ) -> Result<DockerInspect, DevContainerError> {
1292        let dev_container = self.dev_container();
1293
1294        let Some(features_build_info) = &self.features_build_info else {
1295            return Ok(image);
1296        };
1297
1298        // updateRemoteUserUID defaults to true per the devcontainers spec
1299        if dev_container.update_remote_user_uid == Some(false) {
1300            return Ok(image);
1301        }
1302
1303        let remote_user = get_remote_user_from_config(&image, self)?;
1304        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1305            return Ok(image);
1306        }
1307
1308        let image_user = image
1309            .config
1310            .image_user
1311            .as_deref()
1312            .unwrap_or("root")
1313            .to_string();
1314
1315        let host_uid = Command::new("id")
1316            .arg("-u")
1317            .output()
1318            .await
1319            .map_err(|e| {
1320                log::error!("Failed to get host UID: {e}");
1321                DevContainerError::CommandFailed("id -u".to_string())
1322            })
1323            .and_then(|output| {
1324                String::from_utf8_lossy(&output.stdout)
1325                    .trim()
1326                    .parse::<u32>()
1327                    .map_err(|e| {
1328                        log::error!("Failed to parse host UID: {e}");
1329                        DevContainerError::CommandFailed("id -u".to_string())
1330                    })
1331            })?;
1332
1333        let host_gid = Command::new("id")
1334            .arg("-g")
1335            .output()
1336            .await
1337            .map_err(|e| {
1338                log::error!("Failed to get host GID: {e}");
1339                DevContainerError::CommandFailed("id -g".to_string())
1340            })
1341            .and_then(|output| {
1342                String::from_utf8_lossy(&output.stdout)
1343                    .trim()
1344                    .parse::<u32>()
1345                    .map_err(|e| {
1346                        log::error!("Failed to parse host GID: {e}");
1347                        DevContainerError::CommandFailed("id -g".to_string())
1348                    })
1349            })?;
1350
1351        let dockerfile_content = self.generate_update_uid_dockerfile();
1352
1353        let dockerfile_path = features_build_info
1354            .features_content_dir
1355            .join("updateUID.Dockerfile");
1356        self.fs
1357            .write(&dockerfile_path, dockerfile_content.as_bytes())
1358            .await
1359            .map_err(|e| {
1360                log::error!("Failed to write updateUID Dockerfile: {e}");
1361                DevContainerError::FilesystemError
1362            })?;
1363
1364        let updated_image_tag = override_tag
1365            .map(|t| t.to_string())
1366            .unwrap_or_else(|| format!("{}-uid", features_build_info.image_tag));
1367
1368        let mut command = Command::new(self.docker_client.docker_cli());
1369        command.args(["build"]);
1370        command.args(["-f", &dockerfile_path.display().to_string()]);
1371        command.args(["-t", &updated_image_tag]);
1372        command.args([
1373            "--build-arg",
1374            &format!("BASE_IMAGE={}", features_build_info.image_tag),
1375        ]);
1376        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1377        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1378        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1379        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1380        command.arg(features_build_info.empty_context_dir.display().to_string());
1381
1382        let output = self
1383            .command_runner
1384            .run_command(&mut command)
1385            .await
1386            .map_err(|e| {
1387                log::error!("Error building UID update image: {e}");
1388                DevContainerError::CommandFailed(command.get_program().display().to_string())
1389            })?;
1390
1391        if !output.status.success() {
1392            let stderr = String::from_utf8_lossy(&output.stderr);
1393            log::error!("UID update build failed: {stderr}");
1394            return Err(DevContainerError::CommandFailed(
1395                command.get_program().display().to_string(),
1396            ));
1397        }
1398
1399        self.docker_client.inspect(&updated_image_tag).await
1400    }
1401
1402    #[cfg(not(target_os = "windows"))]
1403    fn generate_update_uid_dockerfile(&self) -> String {
1404        let mut dockerfile = r#"ARG BASE_IMAGE
1405FROM $BASE_IMAGE
1406
1407USER root
1408
1409ARG REMOTE_USER
1410ARG NEW_UID
1411ARG NEW_GID
1412SHELL ["/bin/sh", "-c"]
1413RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1414	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1415	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1416	if [ -z "$OLD_UID" ]; then \
1417		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1418	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1419		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1420	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1421		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1422	else \
1423		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1424			FREE_GID=65532; \
1425			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1426			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1427			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1428		fi; \
1429		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1430		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1431		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1432			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1433		fi; \
1434		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1435	fi;
1436
1437ARG IMAGE_USER
1438USER $IMAGE_USER
1439
1440# Ensure that /etc/profile does not clobber the existing path
1441RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1442"#.to_string();
1443        for feature in &self.features {
1444            let container_env_layer = feature.generate_dockerfile_env();
1445            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1446        }
1447
1448        if let Some(env) = &self.dev_container().container_env {
1449            for (key, value) in env {
1450                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1451            }
1452        }
1453        dockerfile
1454    }
1455
1456    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1457        let Some(features_build_info) = &self.features_build_info else {
1458            log::error!("Features build info not available for building feature content image");
1459            return Err(DevContainerError::DevContainerParseFailed);
1460        };
1461        let features_content_dir = &features_build_info.features_content_dir;
1462
1463        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1464        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1465
1466        self.fs
1467            .write(&dockerfile_path, dockerfile_content.as_bytes())
1468            .await
1469            .map_err(|e| {
1470                log::error!("Failed to write feature content Dockerfile: {e}");
1471                DevContainerError::FilesystemError
1472            })?;
1473
1474        let mut command = Command::new(self.docker_client.docker_cli());
1475        command.args([
1476            "build",
1477            "-t",
1478            "dev_container_feature_content_temp",
1479            "-f",
1480            &dockerfile_path.display().to_string(),
1481            &features_content_dir.display().to_string(),
1482        ]);
1483
1484        let output = self
1485            .command_runner
1486            .run_command(&mut command)
1487            .await
1488            .map_err(|e| {
1489                log::error!("Error building feature content image: {e}");
1490                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1491            })?;
1492
1493        if !output.status.success() {
1494            let stderr = String::from_utf8_lossy(&output.stderr);
1495            log::error!("Feature content image build failed: {stderr}");
1496            return Err(DevContainerError::CommandFailed(
1497                self.docker_client.docker_cli(),
1498            ));
1499        }
1500
1501        Ok(())
1502    }
1503
1504    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1505        let dev_container = match &self.config {
1506            ConfigStatus::Deserialized(_) => {
1507                log::error!(
1508                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1509                );
1510                return Err(DevContainerError::DevContainerParseFailed);
1511            }
1512            ConfigStatus::VariableParsed(dev_container) => dev_container,
1513        };
1514
1515        let Some(features_build_info) = &self.features_build_info else {
1516            log::error!(
1517                "Cannot create docker build command; features build info has not been constructed"
1518            );
1519            return Err(DevContainerError::DevContainerParseFailed);
1520        };
1521        let mut command = Command::new(self.docker_client.docker_cli());
1522
1523        command.args(["buildx", "build"]);
1524
1525        // --load is short for --output=docker, loading the built image into the local docker images
1526        command.arg("--load");
1527
1528        // BuildKit build context: provides the features content directory as a named context
1529        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1530        command.args([
1531            "--build-context",
1532            &format!(
1533                "dev_containers_feature_content_source={}",
1534                features_build_info.features_content_dir.display()
1535            ),
1536        ]);
1537
1538        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1539        if let Some(build_image) = &features_build_info.build_image {
1540            command.args([
1541                "--build-arg",
1542                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1543            ]);
1544        } else {
1545            command.args([
1546                "--build-arg",
1547                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1548            ]);
1549        }
1550
1551        command.args([
1552            "--build-arg",
1553            &format!(
1554                "_DEV_CONTAINERS_IMAGE_USER={}",
1555                self.root_image
1556                    .as_ref()
1557                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1558                    .unwrap_or(&"root".to_string())
1559            ),
1560        ]);
1561
1562        command.args([
1563            "--build-arg",
1564            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1565        ]);
1566
1567        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1568            for (key, value) in args {
1569                command.args(["--build-arg", &format!("{}={}", key, value)]);
1570            }
1571        }
1572
1573        command.args(["--target", "dev_containers_target_stage"]);
1574
1575        command.args([
1576            "-f",
1577            &features_build_info.dockerfile_path.display().to_string(),
1578        ]);
1579
1580        command.args(["-t", &features_build_info.image_tag]);
1581
1582        if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1583            command.arg(self.config_directory.display().to_string());
1584        } else {
1585            // Use an empty folder as the build context to avoid pulling in unneeded files.
1586            // The actual feature content is supplied via the BuildKit build context above.
1587            command.arg(features_build_info.empty_context_dir.display().to_string());
1588        }
1589
1590        Ok(command)
1591    }
1592
1593    async fn run_docker_compose(
1594        &self,
1595        resources: DockerComposeResources,
1596    ) -> Result<DockerInspect, DevContainerError> {
1597        let mut command = Command::new(self.docker_client.docker_cli());
1598        command.args(&["compose", "--project-name", &self.project_name()]);
1599        for docker_compose_file in resources.files {
1600            command.args(&["-f", &docker_compose_file.display().to_string()]);
1601        }
1602        command.args(&["up", "-d"]);
1603
1604        let output = self
1605            .command_runner
1606            .run_command(&mut command)
1607            .await
1608            .map_err(|e| {
1609                log::error!("Error running docker compose up: {e}");
1610                DevContainerError::CommandFailed(command.get_program().display().to_string())
1611            })?;
1612
1613        if !output.status.success() {
1614            let stderr = String::from_utf8_lossy(&output.stderr);
1615            log::error!("Non-success status from docker compose up: {}", stderr);
1616            return Err(DevContainerError::CommandFailed(
1617                command.get_program().display().to_string(),
1618            ));
1619        }
1620
1621        if let Some(docker_ps) = self.check_for_existing_container().await? {
1622            log::debug!("Found newly created dev container");
1623            return self.docker_client.inspect(&docker_ps.id).await;
1624        }
1625
1626        log::error!("Could not find existing container after docker compose up");
1627
1628        Err(DevContainerError::DevContainerParseFailed)
1629    }
1630
1631    async fn run_docker_image(
1632        &self,
1633        build_resources: DockerBuildResources,
1634    ) -> Result<DockerInspect, DevContainerError> {
1635        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1636
1637        let output = self
1638            .command_runner
1639            .run_command(&mut docker_run_command)
1640            .await
1641            .map_err(|e| {
1642                log::error!("Error running docker run: {e}");
1643                DevContainerError::CommandFailed(
1644                    docker_run_command.get_program().display().to_string(),
1645                )
1646            })?;
1647
1648        if !output.status.success() {
1649            let std_err = String::from_utf8_lossy(&output.stderr);
1650            log::error!("Non-success status from docker run. StdErr: {std_err}");
1651            return Err(DevContainerError::CommandFailed(
1652                docker_run_command.get_program().display().to_string(),
1653            ));
1654        }
1655
1656        log::debug!("Checking for container that was started");
1657        let Some(docker_ps) = self.check_for_existing_container().await? else {
1658            log::error!("Could not locate container just created");
1659            return Err(DevContainerError::DevContainerParseFailed);
1660        };
1661        self.docker_client.inspect(&docker_ps.id).await
1662    }
1663
1664    fn local_workspace_folder(&self) -> String {
1665        self.local_project_directory.display().to_string()
1666    }
1667    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1668        self.local_project_directory
1669            .file_name()
1670            .map(|f| f.display().to_string())
1671            .ok_or(DevContainerError::DevContainerParseFailed)
1672    }
1673
1674    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1675        self.dev_container()
1676            .workspace_folder
1677            .as_ref()
1678            .map(|folder| PathBuf::from(folder))
1679            .or(Some(
1680                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1681            ))
1682            .ok_or(DevContainerError::DevContainerParseFailed)
1683    }
1684    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1685        self.remote_workspace_folder().and_then(|f| {
1686            f.file_name()
1687                .map(|file_name| file_name.display().to_string())
1688                .ok_or(DevContainerError::DevContainerParseFailed)
1689        })
1690    }
1691
1692    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1693        if let Some(mount) = &self.dev_container().workspace_mount {
1694            return Ok(mount.clone());
1695        }
1696        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1697            return Err(DevContainerError::DevContainerParseFailed);
1698        };
1699
1700        Ok(MountDefinition {
1701            source: self.local_workspace_folder(),
1702            target: format!("/workspaces/{}", project_directory_name.display()),
1703            mount_type: None,
1704        })
1705    }
1706
1707    fn create_docker_run_command(
1708        &self,
1709        build_resources: DockerBuildResources,
1710    ) -> Result<Command, DevContainerError> {
1711        let remote_workspace_mount = self.remote_workspace_mount()?;
1712
1713        let docker_cli = self.docker_client.docker_cli();
1714        let mut command = Command::new(&docker_cli);
1715
1716        command.arg("run");
1717
1718        if build_resources.privileged {
1719            command.arg("--privileged");
1720        }
1721
1722        if &docker_cli == "podman" {
1723            command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1724        }
1725
1726        command.arg("--sig-proxy=false");
1727        command.arg("-d");
1728        command.arg("--mount");
1729        command.arg(remote_workspace_mount.to_string());
1730
1731        for mount in &build_resources.additional_mounts {
1732            command.arg("--mount");
1733            command.arg(mount.to_string());
1734        }
1735
1736        for (key, val) in self.identifying_labels() {
1737            command.arg("-l");
1738            command.arg(format!("{}={}", key, val));
1739        }
1740
1741        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1742            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1743                log::error!("Problem serializing image metadata: {e}");
1744                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1745            })?;
1746            command.arg("-l");
1747            command.arg(format!(
1748                "{}={}",
1749                "devcontainer.metadata", serialized_metadata
1750            ));
1751        }
1752
1753        if let Some(forward_ports) = &self.dev_container().forward_ports {
1754            for port in forward_ports {
1755                if let ForwardPort::Number(port_number) = port {
1756                    command.arg("-p");
1757                    command.arg(format!("{port_number}:{port_number}"));
1758                }
1759            }
1760        }
1761        if let Some(app_port) = &self.dev_container().app_port {
1762            command.arg("-p");
1763            command.arg(format!("{app_port}:{app_port}"));
1764        }
1765
1766        command.arg("--entrypoint");
1767        command.arg("/bin/sh");
1768        command.arg(&build_resources.image.id);
1769        command.arg("-c");
1770
1771        command.arg(build_resources.entrypoint_script);
1772        command.arg("-");
1773
1774        Ok(command)
1775    }
1776
1777    fn extension_ids(&self) -> Vec<String> {
1778        self.dev_container()
1779            .customizations
1780            .as_ref()
1781            .map(|c| c.zed.extensions.clone())
1782            .unwrap_or_default()
1783    }
1784
1785    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1786        self.run_initialize_commands().await?;
1787
1788        self.download_feature_and_dockerfile_resources().await?;
1789
1790        let build_resources = self.build_resources().await?;
1791
1792        let devcontainer_up = self.run_dev_container(build_resources).await?;
1793
1794        self.run_remote_scripts(&devcontainer_up, true).await?;
1795
1796        Ok(devcontainer_up)
1797    }
1798
1799    async fn run_remote_scripts(
1800        &self,
1801        devcontainer_up: &DevContainerUp,
1802        new_container: bool,
1803    ) -> Result<(), DevContainerError> {
1804        let ConfigStatus::VariableParsed(config) = &self.config else {
1805            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1806            return Err(DevContainerError::DevContainerScriptsFailed);
1807        };
1808        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1809
1810        if new_container {
1811            if let Some(on_create_command) = &config.on_create_command {
1812                for (command_name, command) in on_create_command.script_commands() {
1813                    log::debug!("Running on create command {command_name}");
1814                    self.docker_client
1815                        .run_docker_exec(
1816                            &devcontainer_up.container_id,
1817                            &remote_folder,
1818                            "root",
1819                            &devcontainer_up.remote_env,
1820                            command,
1821                        )
1822                        .await?;
1823                }
1824            }
1825            if let Some(update_content_command) = &config.update_content_command {
1826                for (command_name, command) in update_content_command.script_commands() {
1827                    log::debug!("Running update content command {command_name}");
1828                    self.docker_client
1829                        .run_docker_exec(
1830                            &devcontainer_up.container_id,
1831                            &remote_folder,
1832                            "root",
1833                            &devcontainer_up.remote_env,
1834                            command,
1835                        )
1836                        .await?;
1837                }
1838            }
1839
1840            if let Some(post_create_command) = &config.post_create_command {
1841                for (command_name, command) in post_create_command.script_commands() {
1842                    log::debug!("Running post create command {command_name}");
1843                    self.docker_client
1844                        .run_docker_exec(
1845                            &devcontainer_up.container_id,
1846                            &remote_folder,
1847                            &devcontainer_up.remote_user,
1848                            &devcontainer_up.remote_env,
1849                            command,
1850                        )
1851                        .await?;
1852                }
1853            }
1854            if let Some(post_start_command) = &config.post_start_command {
1855                for (command_name, command) in post_start_command.script_commands() {
1856                    log::debug!("Running post start command {command_name}");
1857                    self.docker_client
1858                        .run_docker_exec(
1859                            &devcontainer_up.container_id,
1860                            &remote_folder,
1861                            &devcontainer_up.remote_user,
1862                            &devcontainer_up.remote_env,
1863                            command,
1864                        )
1865                        .await?;
1866                }
1867            }
1868        }
1869        if let Some(post_attach_command) = &config.post_attach_command {
1870            for (command_name, command) in post_attach_command.script_commands() {
1871                log::debug!("Running post attach command {command_name}");
1872                self.docker_client
1873                    .run_docker_exec(
1874                        &devcontainer_up.container_id,
1875                        &remote_folder,
1876                        &devcontainer_up.remote_user,
1877                        &devcontainer_up.remote_env,
1878                        command,
1879                    )
1880                    .await?;
1881            }
1882        }
1883
1884        Ok(())
1885    }
1886
1887    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1888        let ConfigStatus::VariableParsed(config) = &self.config else {
1889            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1890            return Err(DevContainerError::DevContainerParseFailed);
1891        };
1892
1893        if let Some(initialize_command) = &config.initialize_command {
1894            log::debug!("Running initialize command");
1895            initialize_command
1896                .run(&self.command_runner, &self.local_project_directory)
1897                .await
1898        } else {
1899            log::warn!("No initialize command found");
1900            Ok(())
1901        }
1902    }
1903
1904    async fn check_for_existing_devcontainer(
1905        &self,
1906    ) -> Result<Option<DevContainerUp>, DevContainerError> {
1907        if let Some(docker_ps) = self.check_for_existing_container().await? {
1908            log::debug!("Dev container already found. Proceeding with it");
1909
1910            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1911
1912            if !docker_inspect.is_running() {
1913                log::debug!("Container not running. Will attempt to start, and then proceed");
1914                self.docker_client.start_container(&docker_ps.id).await?;
1915            }
1916
1917            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1918
1919            let remote_folder = get_remote_dir_from_config(
1920                &docker_inspect,
1921                (&self.local_project_directory.display()).to_string(),
1922            )?;
1923
1924            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1925
1926            let dev_container_up = DevContainerUp {
1927                container_id: docker_ps.id,
1928                remote_user: remote_user,
1929                remote_workspace_folder: remote_folder,
1930                extension_ids: self.extension_ids(),
1931                remote_env,
1932            };
1933
1934            self.run_remote_scripts(&dev_container_up, false).await?;
1935
1936            Ok(Some(dev_container_up))
1937        } else {
1938            log::debug!("Existing container not found.");
1939
1940            Ok(None)
1941        }
1942    }
1943
1944    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1945        self.docker_client
1946            .find_process_by_filters(
1947                self.identifying_labels()
1948                    .iter()
1949                    .map(|(k, v)| format!("label={k}={v}"))
1950                    .collect(),
1951            )
1952            .await
1953    }
1954
1955    fn project_name(&self) -> String {
1956        if let Some(name) = &self.dev_container().name {
1957            safe_id_lower(name)
1958        } else {
1959            let alternate_name = &self
1960                .local_workspace_base_name()
1961                .unwrap_or(self.local_workspace_folder());
1962            safe_id_lower(alternate_name)
1963        }
1964    }
1965}
1966
1967/// Holds all the information needed to construct a `docker buildx build` command
1968/// that extends a base image with dev container features.
1969///
1970/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
1971/// (cli/src/spec-node/containerFeatures.ts).
1972#[derive(Debug, Eq, PartialEq)]
1973pub(crate) struct FeaturesBuildInfo {
1974    /// Path to the generated Dockerfile.extended
1975    pub dockerfile_path: PathBuf,
1976    /// Path to the features content directory (used as a BuildKit build context)
1977    pub features_content_dir: PathBuf,
1978    /// Path to an empty directory used as the Docker build context
1979    pub empty_context_dir: PathBuf,
1980    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
1981    pub build_image: Option<String>,
1982    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
1983    pub image_tag: String,
1984}
1985
1986pub(crate) async fn read_devcontainer_configuration(
1987    config: DevContainerConfig,
1988    context: &DevContainerContext,
1989    environment: HashMap<String, String>,
1990) -> Result<DevContainer, DevContainerError> {
1991    let docker = if context.use_podman {
1992        Docker::new("podman")
1993    } else {
1994        Docker::new("docker")
1995    };
1996    let mut dev_container = DevContainerManifest::new(
1997        context,
1998        environment,
1999        Arc::new(docker),
2000        Arc::new(DefaultCommandRunner::new()),
2001        config,
2002        &context.project_directory.as_ref(),
2003    )
2004    .await?;
2005    dev_container.parse_nonremote_vars()?;
2006    Ok(dev_container.dev_container().clone())
2007}
2008
2009pub(crate) async fn spawn_dev_container(
2010    context: &DevContainerContext,
2011    environment: HashMap<String, String>,
2012    config: DevContainerConfig,
2013    local_project_path: &Path,
2014) -> Result<DevContainerUp, DevContainerError> {
2015    let docker = if context.use_podman {
2016        Docker::new("podman")
2017    } else {
2018        Docker::new("docker")
2019    };
2020    let mut devcontainer_manifest = DevContainerManifest::new(
2021        context,
2022        environment,
2023        Arc::new(docker),
2024        Arc::new(DefaultCommandRunner::new()),
2025        config,
2026        local_project_path,
2027    )
2028    .await?;
2029
2030    devcontainer_manifest.parse_nonremote_vars()?;
2031
2032    log::debug!("Checking for existing container");
2033    if let Some(devcontainer) = devcontainer_manifest
2034        .check_for_existing_devcontainer()
2035        .await?
2036    {
2037        Ok(devcontainer)
2038    } else {
2039        log::debug!("Existing container not found. Building");
2040
2041        devcontainer_manifest.build_and_run().await
2042    }
2043}
2044
2045#[derive(Debug)]
2046struct DockerBuildResources {
2047    image: DockerInspect,
2048    additional_mounts: Vec<MountDefinition>,
2049    privileged: bool,
2050    entrypoint_script: String,
2051}
2052
2053#[derive(Debug)]
2054enum DevContainerBuildResources {
2055    DockerCompose(DockerComposeResources),
2056    Docker(DockerBuildResources),
2057}
2058
2059fn find_primary_service(
2060    docker_compose: &DockerComposeResources,
2061    devcontainer: &DevContainerManifest,
2062) -> Result<(String, DockerComposeService), DevContainerError> {
2063    let Some(service_name) = &devcontainer.dev_container().service else {
2064        return Err(DevContainerError::DevContainerParseFailed);
2065    };
2066
2067    match docker_compose.config.services.get(service_name) {
2068        Some(service) => Ok((service_name.clone(), service.clone())),
2069        None => Err(DevContainerError::DevContainerParseFailed),
2070    }
2071}
2072
2073/// Destination folder inside the container where feature content is staged during build.
2074/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2075const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2076
2077/// Escapes regex special characters in a string.
2078fn escape_regex_chars(input: &str) -> String {
2079    let mut result = String::with_capacity(input.len() * 2);
2080    for c in input.chars() {
2081        if ".*+?^${}()|[]\\".contains(c) {
2082            result.push('\\');
2083        }
2084        result.push(c);
2085    }
2086    result
2087}
2088
2089/// Extracts the short feature ID from a full feature reference string.
2090///
2091/// Examples:
2092/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2093/// - `ghcr.io/user/repo/go` → `go`
2094/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2095/// - `./myFeature` → `myFeature`
2096fn extract_feature_id(feature_ref: &str) -> &str {
2097    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2098        &feature_ref[..at_idx]
2099    } else {
2100        let last_slash = feature_ref.rfind('/');
2101        let last_colon = feature_ref.rfind(':');
2102        match (last_slash, last_colon) {
2103            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2104            _ => feature_ref,
2105        }
2106    };
2107    match without_version.rfind('/') {
2108        Some(idx) => &without_version[idx + 1..],
2109        None => without_version,
2110    }
2111}
2112
2113/// Generates a shell command that looks up a user's passwd entry.
2114///
2115/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2116/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2117fn get_ent_passwd_shell_command(user: &str) -> String {
2118    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2119    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2120    format!(
2121        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2122        shell = escaped_for_shell,
2123        re = escaped_for_regex,
2124    )
2125}
2126
2127/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2128///
2129/// Features listed in the override come first (in the specified order), followed
2130/// by any remaining features sorted lexicographically by their full reference ID.
2131fn resolve_feature_order<'a>(
2132    features: &'a HashMap<String, FeatureOptions>,
2133    override_order: &Option<Vec<String>>,
2134) -> Vec<(&'a String, &'a FeatureOptions)> {
2135    if let Some(order) = override_order {
2136        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2137        for ordered_id in order {
2138            if let Some((key, options)) = features.get_key_value(ordered_id) {
2139                ordered.push((key, options));
2140            }
2141        }
2142        let mut remaining: Vec<_> = features
2143            .iter()
2144            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2145            .collect();
2146        remaining.sort_by_key(|(id, _)| id.as_str());
2147        ordered.extend(remaining);
2148        ordered
2149    } else {
2150        let mut entries: Vec<_> = features.iter().collect();
2151        entries.sort_by_key(|(id, _)| id.as_str());
2152        entries
2153    }
2154}
2155
2156/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2157///
2158/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2159/// `containerFeaturesConfiguration.ts`.
2160fn generate_install_wrapper(
2161    feature_ref: &str,
2162    feature_id: &str,
2163    env_variables: &str,
2164) -> Result<String, DevContainerError> {
2165    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2166        log::error!("Error escaping feature ref {feature_ref}: {e}");
2167        DevContainerError::DevContainerParseFailed
2168    })?;
2169    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2170        log::error!("Error escaping feature {feature_id}: {e}");
2171        DevContainerError::DevContainerParseFailed
2172    })?;
2173    let options_indented: String = env_variables
2174        .lines()
2175        .filter(|l| !l.is_empty())
2176        .map(|l| format!("    {}", l))
2177        .collect::<Vec<_>>()
2178        .join("\n");
2179    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2180        log::error!("Error escaping options {options_indented}: {e}");
2181        DevContainerError::DevContainerParseFailed
2182    })?;
2183
2184    let script = format!(
2185        r#"#!/bin/sh
2186set -e
2187
2188on_exit () {{
2189    [ $? -eq 0 ] && exit
2190    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2191}}
2192
2193trap on_exit EXIT
2194
2195echo ===========================================================================
2196echo 'Feature       : {escaped_name}'
2197echo 'Id            : {escaped_id}'
2198echo 'Options       :'
2199echo {escaped_options}
2200echo ===========================================================================
2201
2202set -a
2203. ../devcontainer-features.builtin.env
2204. ./devcontainer-features.env
2205set +a
2206
2207chmod +x ./install.sh
2208./install.sh
2209"#
2210    );
2211
2212    Ok(script)
2213}
2214
2215// Dockerfile actions need to be moved to their own file
2216fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2217    dockerfile_content
2218        .lines()
2219        .find(|line| line.starts_with("FROM"))
2220        .and_then(|line| {
2221            let words: Vec<&str> = line.split(" ").collect();
2222            if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2223                return Some(words[words.len() - 1].to_string());
2224            } else {
2225                return None;
2226            }
2227        })
2228}
2229
2230fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2231    if dockerfile_alias(dockerfile_content).is_some() {
2232        dockerfile_content.to_string()
2233    } else {
2234        dockerfile_content
2235            .lines()
2236            .map(|line| {
2237                if line.starts_with("FROM") {
2238                    format!("{} AS {}", line, alias)
2239                } else {
2240                    line.to_string()
2241                }
2242            })
2243            .collect::<Vec<String>>()
2244            .join("\n")
2245    }
2246}
2247
2248fn image_from_dockerfile(
2249    devcontainer: &DevContainerManifest,
2250    dockerfile_contents: String,
2251) -> Result<String, DevContainerError> {
2252    let mut raw_contents = dockerfile_contents
2253        .lines()
2254        .find(|line| line.starts_with("FROM"))
2255        .and_then(|from_line| {
2256            from_line
2257                .split(' ')
2258                .collect::<Vec<&str>>()
2259                .get(1)
2260                .map(|s| s.to_string())
2261        })
2262        .ok_or_else(|| {
2263            log::error!("Could not find an image definition in dockerfile");
2264            DevContainerError::DevContainerParseFailed
2265        })?;
2266
2267    for (k, v) in devcontainer
2268        .dev_container()
2269        .build
2270        .as_ref()
2271        .and_then(|b| b.args.as_ref())
2272        .unwrap_or(&HashMap::new())
2273    {
2274        raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2275    }
2276    Ok(raw_contents)
2277}
2278
2279// Container user things
2280// This should come from spec - see the docs
2281fn get_remote_user_from_config(
2282    docker_config: &DockerInspect,
2283    devcontainer: &DevContainerManifest,
2284) -> Result<String, DevContainerError> {
2285    if let DevContainer {
2286        remote_user: Some(user),
2287        ..
2288    } = &devcontainer.dev_container()
2289    {
2290        return Ok(user.clone());
2291    }
2292    if let Some(metadata) = &docker_config.config.labels.metadata {
2293        for metadatum in metadata {
2294            if let Some(remote_user) = metadatum.get("remoteUser") {
2295                if let Some(remote_user_str) = remote_user.as_str() {
2296                    return Ok(remote_user_str.to_string());
2297                }
2298            }
2299        }
2300    }
2301    if let Some(image_user) = &docker_config.config.image_user {
2302        if !image_user.is_empty() {
2303            return Ok(image_user.to_string());
2304        }
2305    }
2306    Ok("root".to_string())
2307}
2308
2309// This should come from spec - see the docs
2310fn get_container_user_from_config(
2311    docker_config: &DockerInspect,
2312    devcontainer: &DevContainerManifest,
2313) -> Result<String, DevContainerError> {
2314    if let Some(user) = &devcontainer.dev_container().container_user {
2315        return Ok(user.to_string());
2316    }
2317    if let Some(metadata) = &docker_config.config.labels.metadata {
2318        for metadatum in metadata {
2319            if let Some(container_user) = metadatum.get("containerUser") {
2320                if let Some(container_user_str) = container_user.as_str() {
2321                    return Ok(container_user_str.to_string());
2322                }
2323            }
2324        }
2325    }
2326    if let Some(image_user) = &docker_config.config.image_user {
2327        return Ok(image_user.to_string());
2328    }
2329
2330    Ok("root".to_string())
2331}
2332
2333#[cfg(test)]
2334mod test {
2335    use std::{
2336        collections::HashMap,
2337        ffi::OsStr,
2338        path::PathBuf,
2339        process::{ExitStatus, Output},
2340        sync::{Arc, Mutex},
2341    };
2342
2343    use async_trait::async_trait;
2344    use fs::{FakeFs, Fs};
2345    use gpui::{AppContext, TestAppContext};
2346    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2347    use project::{
2348        ProjectEnvironment,
2349        worktree_store::{WorktreeIdCounter, WorktreeStore},
2350    };
2351    use serde_json_lenient::Value;
2352    use util::{command::Command, paths::SanitizedPath};
2353
2354    use crate::{
2355        DevContainerConfig, DevContainerContext,
2356        command_json::CommandRunner,
2357        devcontainer_api::DevContainerError,
2358        devcontainer_json::MountDefinition,
2359        devcontainer_manifest::{
2360            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2361            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2362        },
2363        docker::{
2364            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2365            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2366            DockerPs,
2367        },
2368        oci::TokenResponse,
2369    };
2370    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2371
2372    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2373        let buffer = futures::io::Cursor::new(Vec::new());
2374        let mut builder = async_tar::Builder::new(buffer);
2375        for (file_name, content) in content {
2376            if content.is_empty() {
2377                let mut header = async_tar::Header::new_gnu();
2378                header.set_size(0);
2379                header.set_mode(0o755);
2380                header.set_entry_type(async_tar::EntryType::Directory);
2381                header.set_cksum();
2382                builder
2383                    .append_data(&mut header, file_name, &[] as &[u8])
2384                    .await
2385                    .unwrap();
2386            } else {
2387                let data = content.as_bytes();
2388                let mut header = async_tar::Header::new_gnu();
2389                header.set_size(data.len() as u64);
2390                header.set_mode(0o755);
2391                header.set_entry_type(async_tar::EntryType::Regular);
2392                header.set_cksum();
2393                builder
2394                    .append_data(&mut header, file_name, data)
2395                    .await
2396                    .unwrap();
2397            }
2398        }
2399        let buffer = builder.into_inner().await.unwrap();
2400        buffer.into_inner()
2401    }
2402
2403    fn test_project_filename() -> String {
2404        PathBuf::from(TEST_PROJECT_PATH)
2405            .file_name()
2406            .expect("is valid")
2407            .display()
2408            .to_string()
2409    }
2410
2411    async fn init_devcontainer_config(
2412        fs: &Arc<FakeFs>,
2413        devcontainer_contents: &str,
2414    ) -> DevContainerConfig {
2415        fs.insert_tree(
2416            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2417            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2418        )
2419        .await;
2420
2421        DevContainerConfig::default_config()
2422    }
2423
2424    struct TestDependencies {
2425        fs: Arc<FakeFs>,
2426        _http_client: Arc<dyn HttpClient>,
2427        docker: Arc<FakeDocker>,
2428        command_runner: Arc<TestCommandRunner>,
2429    }
2430
2431    async fn init_default_devcontainer_manifest(
2432        cx: &mut TestAppContext,
2433        devcontainer_contents: &str,
2434    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2435        let fs = FakeFs::new(cx.executor());
2436        let http_client = fake_http_client();
2437        let command_runner = Arc::new(TestCommandRunner::new());
2438        let docker = Arc::new(FakeDocker::new());
2439        let environment = HashMap::new();
2440
2441        init_devcontainer_manifest(
2442            cx,
2443            fs,
2444            http_client,
2445            docker,
2446            command_runner,
2447            environment,
2448            devcontainer_contents,
2449        )
2450        .await
2451    }
2452
2453    async fn init_devcontainer_manifest(
2454        cx: &mut TestAppContext,
2455        fs: Arc<FakeFs>,
2456        http_client: Arc<dyn HttpClient>,
2457        docker_client: Arc<FakeDocker>,
2458        command_runner: Arc<TestCommandRunner>,
2459        environment: HashMap<String, String>,
2460        devcontainer_contents: &str,
2461    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2462        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2463        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2464        let worktree_store =
2465            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2466        let project_environment =
2467            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2468
2469        let context = DevContainerContext {
2470            project_directory: SanitizedPath::cast_arc(project_path),
2471            use_podman: false,
2472            fs: fs.clone(),
2473            http_client: http_client.clone(),
2474            environment: project_environment.downgrade(),
2475        };
2476
2477        let test_dependencies = TestDependencies {
2478            fs: fs.clone(),
2479            _http_client: http_client.clone(),
2480            docker: docker_client.clone(),
2481            command_runner: command_runner.clone(),
2482        };
2483        let manifest = DevContainerManifest::new(
2484            &context,
2485            environment,
2486            docker_client,
2487            command_runner,
2488            local_config,
2489            &PathBuf::from(TEST_PROJECT_PATH),
2490        )
2491        .await?;
2492
2493        Ok((test_dependencies, manifest))
2494    }
2495
2496    #[gpui::test]
2497    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2498        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2499            cx,
2500            r#"
2501// These are some external comments. serde_lenient should handle them
2502{
2503    // These are some internal comments
2504    "image": "image",
2505    "remoteUser": "root",
2506}
2507            "#,
2508        )
2509        .await
2510        .unwrap();
2511
2512        let mut metadata = HashMap::new();
2513        metadata.insert(
2514            "remoteUser".to_string(),
2515            serde_json_lenient::Value::String("vsCode".to_string()),
2516        );
2517        let given_docker_config = DockerInspect {
2518            id: "docker_id".to_string(),
2519            config: DockerInspectConfig {
2520                labels: DockerConfigLabels {
2521                    metadata: Some(vec![metadata]),
2522                },
2523                image_user: None,
2524                env: Vec::new(),
2525            },
2526            mounts: None,
2527            state: None,
2528        };
2529
2530        let remote_user =
2531            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2532
2533        assert_eq!(remote_user, "root".to_string())
2534    }
2535
2536    #[gpui::test]
2537    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2538        let (_, devcontainer_manifest) =
2539            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2540        let mut metadata = HashMap::new();
2541        metadata.insert(
2542            "remoteUser".to_string(),
2543            serde_json_lenient::Value::String("vsCode".to_string()),
2544        );
2545        let given_docker_config = DockerInspect {
2546            id: "docker_id".to_string(),
2547            config: DockerInspectConfig {
2548                labels: DockerConfigLabels {
2549                    metadata: Some(vec![metadata]),
2550                },
2551                image_user: None,
2552                env: Vec::new(),
2553            },
2554            mounts: None,
2555            state: None,
2556        };
2557
2558        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2559
2560        assert!(remote_user.is_ok());
2561        let remote_user = remote_user.expect("ok");
2562        assert_eq!(&remote_user, "vsCode")
2563    }
2564
2565    #[test]
2566    fn should_extract_feature_id_from_references() {
2567        assert_eq!(
2568            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2569            "aws-cli"
2570        );
2571        assert_eq!(
2572            extract_feature_id("ghcr.io/devcontainers/features/go"),
2573            "go"
2574        );
2575        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2576        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2577        assert_eq!(
2578            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2579            "rust"
2580        );
2581    }
2582
2583    #[gpui::test]
2584    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2585        let mut metadata = HashMap::new();
2586        metadata.insert(
2587            "remoteUser".to_string(),
2588            serde_json_lenient::Value::String("vsCode".to_string()),
2589        );
2590
2591        let (_, devcontainer_manifest) =
2592            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2593        let build_resources = DockerBuildResources {
2594            image: DockerInspect {
2595                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2596                config: DockerInspectConfig {
2597                    labels: DockerConfigLabels { metadata: None },
2598                    image_user: None,
2599                    env: Vec::new(),
2600                },
2601                mounts: None,
2602                state: None,
2603            },
2604            additional_mounts: vec![],
2605            privileged: false,
2606            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2607        };
2608        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2609
2610        assert!(docker_run_command.is_ok());
2611        let docker_run_command = docker_run_command.expect("ok");
2612
2613        assert_eq!(docker_run_command.get_program(), "docker");
2614        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2615            .join(".devcontainer")
2616            .join("devcontainer.json");
2617        let expected_config_file_label = expected_config_file_label.display();
2618        assert_eq!(
2619            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2620            vec![
2621                OsStr::new("run"),
2622                OsStr::new("--sig-proxy=false"),
2623                OsStr::new("-d"),
2624                OsStr::new("--mount"),
2625                OsStr::new(
2626                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2627                ),
2628                OsStr::new("-l"),
2629                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2630                OsStr::new("-l"),
2631                OsStr::new(&format!(
2632                    "devcontainer.config_file={expected_config_file_label}"
2633                )),
2634                OsStr::new("--entrypoint"),
2635                OsStr::new("/bin/sh"),
2636                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2637                OsStr::new("-c"),
2638                OsStr::new(
2639                    "
2640    echo Container started
2641    trap \"exit 0\" 15
2642    exec \"$@\"
2643    while sleep 1 & wait $!; do :; done
2644                        "
2645                    .trim()
2646                ),
2647                OsStr::new("-"),
2648            ]
2649        )
2650    }
2651
2652    #[gpui::test]
2653    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2654        // State where service not defined in dev container
2655        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2656        let given_docker_compose_config = DockerComposeResources {
2657            config: DockerComposeConfig {
2658                name: Some("devcontainers".to_string()),
2659                services: HashMap::new(),
2660                ..Default::default()
2661            },
2662            ..Default::default()
2663        };
2664
2665        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2666
2667        assert!(bad_result.is_err());
2668
2669        // State where service defined in devcontainer, not found in DockerCompose config
2670        let (_, given_dev_container) =
2671            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2672                .await
2673                .unwrap();
2674        let given_docker_compose_config = DockerComposeResources {
2675            config: DockerComposeConfig {
2676                name: Some("devcontainers".to_string()),
2677                services: HashMap::new(),
2678                ..Default::default()
2679            },
2680            ..Default::default()
2681        };
2682
2683        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2684
2685        assert!(bad_result.is_err());
2686        // State where service defined in devcontainer and in DockerCompose config
2687
2688        let (_, given_dev_container) =
2689            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2690                .await
2691                .unwrap();
2692        let given_docker_compose_config = DockerComposeResources {
2693            config: DockerComposeConfig {
2694                name: Some("devcontainers".to_string()),
2695                services: HashMap::from([(
2696                    "found_service".to_string(),
2697                    DockerComposeService {
2698                        ..Default::default()
2699                    },
2700                )]),
2701                ..Default::default()
2702            },
2703            ..Default::default()
2704        };
2705
2706        let (service_name, _) =
2707            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2708
2709        assert_eq!(service_name, "found_service".to_string());
2710    }
2711
2712    #[gpui::test]
2713    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2714        let fs = FakeFs::new(cx.executor());
2715        let given_devcontainer_contents = r#"
2716// These are some external comments. serde_lenient should handle them
2717{
2718    // These are some internal comments
2719    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2720    "name": "myDevContainer-${devcontainerId}",
2721    "remoteUser": "root",
2722    "remoteEnv": {
2723        "DEVCONTAINER_ID": "${devcontainerId}",
2724        "MYVAR2": "myvarothervalue",
2725        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2726        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2727        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2728        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2729        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2730        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2731
2732    }
2733}
2734                    "#;
2735        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2736            cx,
2737            fs,
2738            fake_http_client(),
2739            Arc::new(FakeDocker::new()),
2740            Arc::new(TestCommandRunner::new()),
2741            HashMap::from([
2742                ("local_env_1".to_string(), "local_env_value1".to_string()),
2743                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2744            ]),
2745            given_devcontainer_contents,
2746        )
2747        .await
2748        .unwrap();
2749
2750        devcontainer_manifest.parse_nonremote_vars().unwrap();
2751
2752        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2753            &devcontainer_manifest.config
2754        else {
2755            panic!("Config not parsed");
2756        };
2757
2758        // ${devcontainerId}
2759        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2760        assert_eq!(
2761            variable_replaced_devcontainer.name,
2762            Some(format!("myDevContainer-{devcontainer_id}"))
2763        );
2764        assert_eq!(
2765            variable_replaced_devcontainer
2766                .remote_env
2767                .as_ref()
2768                .and_then(|env| env.get("DEVCONTAINER_ID")),
2769            Some(&devcontainer_id)
2770        );
2771
2772        // ${containerWorkspaceFolderBasename}
2773        assert_eq!(
2774            variable_replaced_devcontainer
2775                .remote_env
2776                .as_ref()
2777                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2778            Some(&test_project_filename())
2779        );
2780
2781        // ${localWorkspaceFolderBasename}
2782        assert_eq!(
2783            variable_replaced_devcontainer
2784                .remote_env
2785                .as_ref()
2786                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2787            Some(&test_project_filename())
2788        );
2789
2790        // ${containerWorkspaceFolder}
2791        assert_eq!(
2792            variable_replaced_devcontainer
2793                .remote_env
2794                .as_ref()
2795                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2796            Some(&format!("/workspaces/{}", test_project_filename()))
2797        );
2798
2799        // ${localWorkspaceFolder}
2800        assert_eq!(
2801            variable_replaced_devcontainer
2802                .remote_env
2803                .as_ref()
2804                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2805            Some(&TEST_PROJECT_PATH.to_string())
2806        );
2807
2808        // ${localEnv:VARIABLE_NAME}
2809        assert_eq!(
2810            variable_replaced_devcontainer
2811                .remote_env
2812                .as_ref()
2813                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2814            Some(&"local_env_value1".to_string())
2815        );
2816        assert_eq!(
2817            variable_replaced_devcontainer
2818                .remote_env
2819                .as_ref()
2820                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2821            Some(&"THISVALUEHERE".to_string())
2822        );
2823    }
2824
2825    #[gpui::test]
2826    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2827        let given_devcontainer_contents = r#"
2828                // These are some external comments. serde_lenient should handle them
2829                {
2830                    // These are some internal comments
2831                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2832                    "name": "myDevContainer-${devcontainerId}",
2833                    "remoteUser": "root",
2834                    "remoteEnv": {
2835                        "DEVCONTAINER_ID": "${devcontainerId}",
2836                        "MYVAR2": "myvarothervalue",
2837                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2838                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2839                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2840                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2841
2842                    },
2843                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2844                    "workspaceFolder": "/workspace/customfolder"
2845                }
2846            "#;
2847
2848        let (_, mut devcontainer_manifest) =
2849            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2850                .await
2851                .unwrap();
2852
2853        devcontainer_manifest.parse_nonremote_vars().unwrap();
2854
2855        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2856            &devcontainer_manifest.config
2857        else {
2858            panic!("Config not parsed");
2859        };
2860
2861        // ${devcontainerId}
2862        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2863        assert_eq!(
2864            variable_replaced_devcontainer.name,
2865            Some(format!("myDevContainer-{devcontainer_id}"))
2866        );
2867        assert_eq!(
2868            variable_replaced_devcontainer
2869                .remote_env
2870                .as_ref()
2871                .and_then(|env| env.get("DEVCONTAINER_ID")),
2872            Some(&devcontainer_id)
2873        );
2874
2875        // ${containerWorkspaceFolderBasename}
2876        assert_eq!(
2877            variable_replaced_devcontainer
2878                .remote_env
2879                .as_ref()
2880                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2881            Some(&"customfolder".to_string())
2882        );
2883
2884        // ${localWorkspaceFolderBasename}
2885        assert_eq!(
2886            variable_replaced_devcontainer
2887                .remote_env
2888                .as_ref()
2889                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2890            Some(&"project".to_string())
2891        );
2892
2893        // ${containerWorkspaceFolder}
2894        assert_eq!(
2895            variable_replaced_devcontainer
2896                .remote_env
2897                .as_ref()
2898                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2899            Some(&"/workspace/customfolder".to_string())
2900        );
2901
2902        // ${localWorkspaceFolder}
2903        assert_eq!(
2904            variable_replaced_devcontainer
2905                .remote_env
2906                .as_ref()
2907                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2908            Some(&TEST_PROJECT_PATH.to_string())
2909        );
2910    }
2911
2912    // updateRemoteUserUID is treated as false in Windows, so this test will fail
2913    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2914    #[cfg(not(target_os = "windows"))]
2915    #[gpui::test]
2916    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2917        cx.executor().allow_parking();
2918        env_logger::try_init().ok();
2919        let given_devcontainer_contents = r#"
2920            /*---------------------------------------------------------------------------------------------
2921             *  Copyright (c) Microsoft Corporation. All rights reserved.
2922             *  Licensed under the MIT License. See License.txt in the project root for license information.
2923             *--------------------------------------------------------------------------------------------*/
2924            {
2925              "name": "cli-${devcontainerId}",
2926              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2927              "build": {
2928                "dockerfile": "Dockerfile",
2929                "args": {
2930                  "VARIANT": "18-bookworm",
2931                  "FOO": "bar",
2932                },
2933              },
2934              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2935              "workspaceFolder": "/workspace2",
2936              "mounts": [
2937                // Keep command history across instances
2938                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2939              ],
2940
2941              "forwardPorts": [
2942                8082,
2943                8083,
2944              ],
2945              "appPort": "8084",
2946
2947              "containerEnv": {
2948                "VARIABLE_VALUE": "value",
2949              },
2950
2951              "initializeCommand": "touch IAM.md",
2952
2953              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
2954
2955              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
2956
2957              "postCreateCommand": {
2958                "yarn": "yarn install",
2959                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2960              },
2961
2962              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2963
2964              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
2965
2966              "remoteUser": "node",
2967
2968              "remoteEnv": {
2969                "PATH": "${containerEnv:PATH}:/some/other/path",
2970                "OTHER_ENV": "other_env_value"
2971              },
2972
2973              "features": {
2974                "ghcr.io/devcontainers/features/docker-in-docker:2": {
2975                  "moby": false,
2976                },
2977                "ghcr.io/devcontainers/features/go:1": {},
2978              },
2979
2980              "customizations": {
2981                "vscode": {
2982                  "extensions": [
2983                    "dbaeumer.vscode-eslint",
2984                    "GitHub.vscode-pull-request-github",
2985                  ],
2986                },
2987                "zed": {
2988                  "extensions": ["vue", "ruby"],
2989                },
2990                "codespaces": {
2991                  "repositories": {
2992                    "devcontainers/features": {
2993                      "permissions": {
2994                        "contents": "write",
2995                        "workflows": "write",
2996                      },
2997                    },
2998                  },
2999                },
3000              },
3001            }
3002            "#;
3003
3004        let (test_dependencies, mut devcontainer_manifest) =
3005            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3006                .await
3007                .unwrap();
3008
3009        test_dependencies
3010            .fs
3011            .atomic_write(
3012                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3013                r#"
3014#  Copyright (c) Microsoft Corporation. All rights reserved.
3015#  Licensed under the MIT License. See License.txt in the project root for license information.
3016ARG VARIANT="16-bullseye"
3017FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3018
3019RUN mkdir -p /workspaces && chown node:node /workspaces
3020
3021ARG USERNAME=node
3022USER $USERNAME
3023
3024# Save command line history
3025RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3026&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3027&& mkdir -p /home/$USERNAME/commandhistory \
3028&& touch /home/$USERNAME/commandhistory/.bash_history \
3029&& chown -R $USERNAME /home/$USERNAME/commandhistory
3030                    "#.trim().to_string(),
3031            )
3032            .await
3033            .unwrap();
3034
3035        devcontainer_manifest.parse_nonremote_vars().unwrap();
3036
3037        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3038
3039        assert_eq!(
3040            devcontainer_up.extension_ids,
3041            vec!["vue".to_string(), "ruby".to_string()]
3042        );
3043
3044        let files = test_dependencies.fs.files();
3045        let feature_dockerfile = files
3046            .iter()
3047            .find(|f| {
3048                f.file_name()
3049                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3050            })
3051            .expect("to be found");
3052        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3053        assert_eq!(
3054            &feature_dockerfile,
3055            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3056
3057#  Copyright (c) Microsoft Corporation. All rights reserved.
3058#  Licensed under the MIT License. See License.txt in the project root for license information.
3059ARG VARIANT="16-bullseye"
3060FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3061
3062RUN mkdir -p /workspaces && chown node:node /workspaces
3063
3064ARG USERNAME=node
3065USER $USERNAME
3066
3067# Save command line history
3068RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3069&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3070&& mkdir -p /home/$USERNAME/commandhistory \
3071&& touch /home/$USERNAME/commandhistory/.bash_history \
3072&& chown -R $USERNAME /home/$USERNAME/commandhistory
3073
3074FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3075USER root
3076COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3077RUN chmod -R 0755 /tmp/build-features/
3078
3079FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3080
3081USER root
3082
3083RUN mkdir -p /tmp/dev-container-features
3084COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3085
3086RUN \
3087echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3088echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3089
3090
3091RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3092cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3093&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3094&& cd /tmp/dev-container-features/docker-in-docker_0 \
3095&& chmod +x ./devcontainer-features-install.sh \
3096&& ./devcontainer-features-install.sh \
3097&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3098
3099RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3100cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3101&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3102&& cd /tmp/dev-container-features/go_1 \
3103&& chmod +x ./devcontainer-features-install.sh \
3104&& ./devcontainer-features-install.sh \
3105&& rm -rf /tmp/dev-container-features/go_1
3106
3107
3108ARG _DEV_CONTAINERS_IMAGE_USER=root
3109USER $_DEV_CONTAINERS_IMAGE_USER
3110"#
3111        );
3112
3113        let uid_dockerfile = files
3114            .iter()
3115            .find(|f| {
3116                f.file_name()
3117                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3118            })
3119            .expect("to be found");
3120        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3121
3122        assert_eq!(
3123            &uid_dockerfile,
3124            r#"ARG BASE_IMAGE
3125FROM $BASE_IMAGE
3126
3127USER root
3128
3129ARG REMOTE_USER
3130ARG NEW_UID
3131ARG NEW_GID
3132SHELL ["/bin/sh", "-c"]
3133RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3134	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3135	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3136	if [ -z "$OLD_UID" ]; then \
3137		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3138	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3139		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3140	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3141		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3142	else \
3143		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3144			FREE_GID=65532; \
3145			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3146			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3147			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3148		fi; \
3149		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3150		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3151		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3152			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3153		fi; \
3154		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3155	fi;
3156
3157ARG IMAGE_USER
3158USER $IMAGE_USER
3159
3160# Ensure that /etc/profile does not clobber the existing path
3161RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3162
3163ENV DOCKER_BUILDKIT=1
3164
3165ENV GOPATH=/go
3166ENV GOROOT=/usr/local/go
3167ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3168ENV VARIABLE_VALUE=value
3169"#
3170        );
3171
3172        let golang_install_wrapper = files
3173            .iter()
3174            .find(|f| {
3175                f.file_name()
3176                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3177                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3178            })
3179            .expect("to be found");
3180        let golang_install_wrapper = test_dependencies
3181            .fs
3182            .load(golang_install_wrapper)
3183            .await
3184            .unwrap();
3185        assert_eq!(
3186            &golang_install_wrapper,
3187            r#"#!/bin/sh
3188set -e
3189
3190on_exit () {
3191    [ $? -eq 0 ] && exit
3192    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3193}
3194
3195trap on_exit EXIT
3196
3197echo ===========================================================================
3198echo 'Feature       : go'
3199echo 'Id            : ghcr.io/devcontainers/features/go:1'
3200echo 'Options       :'
3201echo '    GOLANGCILINTVERSION=latest
3202    VERSION=latest'
3203echo ===========================================================================
3204
3205set -a
3206. ../devcontainer-features.builtin.env
3207. ./devcontainer-features.env
3208set +a
3209
3210chmod +x ./install.sh
3211./install.sh
3212"#
3213        );
3214
3215        let docker_commands = test_dependencies
3216            .command_runner
3217            .commands_by_program("docker");
3218
3219        let docker_run_command = docker_commands
3220            .iter()
3221            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3222            .expect("found");
3223
3224        assert_eq!(
3225            docker_run_command.args,
3226            vec![
3227                "run".to_string(),
3228                "--privileged".to_string(),
3229                "--sig-proxy=false".to_string(),
3230                "-d".to_string(),
3231                "--mount".to_string(),
3232                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3233                "--mount".to_string(),
3234                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3235                "--mount".to_string(),
3236                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3237                "-l".to_string(),
3238                "devcontainer.local_folder=/path/to/local/project".to_string(),
3239                "-l".to_string(),
3240                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3241                "-l".to_string(),
3242                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3243                "-p".to_string(),
3244                "8082:8082".to_string(),
3245                "-p".to_string(),
3246                "8083:8083".to_string(),
3247                "-p".to_string(),
3248                "8084:8084".to_string(),
3249                "--entrypoint".to_string(),
3250                "/bin/sh".to_string(),
3251                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3252                "-c".to_string(),
3253                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3254                "-".to_string()
3255            ]
3256        );
3257
3258        let docker_exec_commands = test_dependencies
3259            .docker
3260            .exec_commands_recorded
3261            .lock()
3262            .unwrap();
3263
3264        assert!(docker_exec_commands.iter().all(|exec| {
3265            exec.env
3266                == HashMap::from([
3267                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3268                    (
3269                        "PATH".to_string(),
3270                        "/initial/path:/some/other/path".to_string(),
3271                    ),
3272                ])
3273        }))
3274    }
3275
3276    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3277    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3278    #[cfg(not(target_os = "windows"))]
3279    #[gpui::test]
3280    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3281        cx.executor().allow_parking();
3282        env_logger::try_init().ok();
3283        let given_devcontainer_contents = r#"
3284            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3285            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3286            {
3287              "features": {
3288                "ghcr.io/devcontainers/features/aws-cli:1": {},
3289                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3290              },
3291              "name": "Rust and PostgreSQL",
3292              "dockerComposeFile": "docker-compose.yml",
3293              "service": "app",
3294              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3295
3296              // Features to add to the dev container. More info: https://containers.dev/features.
3297              // "features": {},
3298
3299              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3300              "forwardPorts": [
3301                8083,
3302                "db:5432",
3303                "db:1234",
3304              ],
3305              "appPort": "8084",
3306
3307              // Use 'postCreateCommand' to run commands after the container is created.
3308              // "postCreateCommand": "rustc --version",
3309
3310              // Configure tool-specific properties.
3311              // "customizations": {},
3312
3313              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3314              // "remoteUser": "root"
3315            }
3316            "#;
3317        let (test_dependencies, mut devcontainer_manifest) =
3318            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3319                .await
3320                .unwrap();
3321
3322        test_dependencies
3323            .fs
3324            .atomic_write(
3325                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3326                r#"
3327version: '3.8'
3328
3329volumes:
3330    postgres-data:
3331
3332services:
3333    app:
3334        build:
3335            context: .
3336            dockerfile: Dockerfile
3337        env_file:
3338            # Ensure that the variables in .env match the same variables in devcontainer.json
3339            - .env
3340
3341        volumes:
3342            - ../..:/workspaces:cached
3343
3344        # Overrides default command so things don't shut down after the process ends.
3345        command: sleep infinity
3346
3347        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3348        network_mode: service:db
3349
3350        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3351        # (Adding the "ports" property to this file will not forward from a Codespace.)
3352
3353    db:
3354        image: postgres:14.1
3355        restart: unless-stopped
3356        volumes:
3357            - postgres-data:/var/lib/postgresql/data
3358        env_file:
3359            # Ensure that the variables in .env match the same variables in devcontainer.json
3360            - .env
3361
3362        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3363        # (Adding the "ports" property to this file will not forward from a Codespace.)
3364                    "#.trim().to_string(),
3365            )
3366            .await
3367            .unwrap();
3368
3369        test_dependencies.fs.atomic_write(
3370            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3371            r#"
3372FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3373
3374# Include lld linker to improve build times either by using environment variable
3375# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3376RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3377    && apt-get -y install clang lld \
3378    && apt-get autoremove -y && apt-get clean -y
3379            "#.trim().to_string()).await.unwrap();
3380
3381        devcontainer_manifest.parse_nonremote_vars().unwrap();
3382
3383        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3384
3385        let files = test_dependencies.fs.files();
3386        let feature_dockerfile = files
3387            .iter()
3388            .find(|f| {
3389                f.file_name()
3390                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3391            })
3392            .expect("to be found");
3393        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3394        assert_eq!(
3395            &feature_dockerfile,
3396            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3397
3398FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3399
3400# Include lld linker to improve build times either by using environment variable
3401# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3402RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3403    && apt-get -y install clang lld \
3404    && apt-get autoremove -y && apt-get clean -y
3405
3406FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3407USER root
3408COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3409RUN chmod -R 0755 /tmp/build-features/
3410
3411FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3412
3413USER root
3414
3415RUN mkdir -p /tmp/dev-container-features
3416COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3417
3418RUN \
3419echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3420echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3421
3422
3423RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3424cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3425&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3426&& cd /tmp/dev-container-features/aws-cli_0 \
3427&& chmod +x ./devcontainer-features-install.sh \
3428&& ./devcontainer-features-install.sh \
3429&& rm -rf /tmp/dev-container-features/aws-cli_0
3430
3431RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3432cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3433&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3434&& cd /tmp/dev-container-features/docker-in-docker_1 \
3435&& chmod +x ./devcontainer-features-install.sh \
3436&& ./devcontainer-features-install.sh \
3437&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3438
3439
3440ARG _DEV_CONTAINERS_IMAGE_USER=root
3441USER $_DEV_CONTAINERS_IMAGE_USER
3442"#
3443        );
3444
3445        let uid_dockerfile = files
3446            .iter()
3447            .find(|f| {
3448                f.file_name()
3449                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3450            })
3451            .expect("to be found");
3452        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3453
3454        assert_eq!(
3455            &uid_dockerfile,
3456            r#"ARG BASE_IMAGE
3457FROM $BASE_IMAGE
3458
3459USER root
3460
3461ARG REMOTE_USER
3462ARG NEW_UID
3463ARG NEW_GID
3464SHELL ["/bin/sh", "-c"]
3465RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3466	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3467	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3468	if [ -z "$OLD_UID" ]; then \
3469		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3470	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3471		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3472	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3473		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3474	else \
3475		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3476			FREE_GID=65532; \
3477			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3478			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3479			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3480		fi; \
3481		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3482		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3483		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3484			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3485		fi; \
3486		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3487	fi;
3488
3489ARG IMAGE_USER
3490USER $IMAGE_USER
3491
3492# Ensure that /etc/profile does not clobber the existing path
3493RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3494
3495
3496ENV DOCKER_BUILDKIT=1
3497"#
3498        );
3499
3500        let runtime_override = files
3501            .iter()
3502            .find(|f| {
3503                f.file_name()
3504                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3505            })
3506            .expect("to be found");
3507        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3508
3509        let expected_runtime_override = DockerComposeConfig {
3510            name: None,
3511            services: HashMap::from([
3512                (
3513                    "app".to_string(),
3514                    DockerComposeService {
3515                        entrypoint: Some(vec![
3516                            "/bin/sh".to_string(),
3517                            "-c".to_string(),
3518                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3519                            "-".to_string(),
3520                        ]),
3521                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3522                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3523                        privileged: Some(true),
3524                        labels: Some(HashMap::from([
3525                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3526                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3527                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3528                        ])),
3529                        volumes: vec![
3530                            MountDefinition {
3531                                source: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3532                                target: "/var/lib/docker".to_string(),
3533                                mount_type: Some("volume".to_string())
3534                            }
3535                        ],
3536                        ..Default::default()
3537                    },
3538                ),
3539                (
3540                    "db".to_string(),
3541                    DockerComposeService {
3542                        ports: vec![
3543                            "8083:8083".to_string(),
3544                            "5432:5432".to_string(),
3545                            "1234:1234".to_string(),
3546                            "8084:8084".to_string()
3547                        ],
3548                        ..Default::default()
3549                    },
3550                ),
3551            ]),
3552            volumes: HashMap::from([(
3553                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3554                DockerComposeVolume {
3555                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3556                },
3557            )]),
3558        };
3559
3560        assert_eq!(
3561            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3562            expected_runtime_override
3563        )
3564    }
3565
3566    #[gpui::test]
3567    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3568        cx: &mut TestAppContext,
3569    ) {
3570        cx.executor().allow_parking();
3571        env_logger::try_init().ok();
3572        let given_devcontainer_contents = r#"
3573        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3574        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3575        {
3576          "features": {
3577            "ghcr.io/devcontainers/features/aws-cli:1": {},
3578            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3579          },
3580          "name": "Rust and PostgreSQL",
3581          "dockerComposeFile": "docker-compose.yml",
3582          "service": "app",
3583          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3584
3585          // Features to add to the dev container. More info: https://containers.dev/features.
3586          // "features": {},
3587
3588          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3589          "forwardPorts": [
3590            8083,
3591            "db:5432",
3592            "db:1234",
3593          ],
3594          "updateRemoteUserUID": false,
3595          "appPort": "8084",
3596
3597          // Use 'postCreateCommand' to run commands after the container is created.
3598          // "postCreateCommand": "rustc --version",
3599
3600          // Configure tool-specific properties.
3601          // "customizations": {},
3602
3603          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3604          // "remoteUser": "root"
3605        }
3606        "#;
3607        let (test_dependencies, mut devcontainer_manifest) =
3608            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3609                .await
3610                .unwrap();
3611
3612        test_dependencies
3613        .fs
3614        .atomic_write(
3615            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3616            r#"
3617version: '3.8'
3618
3619volumes:
3620postgres-data:
3621
3622services:
3623app:
3624    build:
3625        context: .
3626        dockerfile: Dockerfile
3627    env_file:
3628        # Ensure that the variables in .env match the same variables in devcontainer.json
3629        - .env
3630
3631    volumes:
3632        - ../..:/workspaces:cached
3633
3634    # Overrides default command so things don't shut down after the process ends.
3635    command: sleep infinity
3636
3637    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3638    network_mode: service:db
3639
3640    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3641    # (Adding the "ports" property to this file will not forward from a Codespace.)
3642
3643db:
3644    image: postgres:14.1
3645    restart: unless-stopped
3646    volumes:
3647        - postgres-data:/var/lib/postgresql/data
3648    env_file:
3649        # Ensure that the variables in .env match the same variables in devcontainer.json
3650        - .env
3651
3652    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3653    # (Adding the "ports" property to this file will not forward from a Codespace.)
3654                "#.trim().to_string(),
3655        )
3656        .await
3657        .unwrap();
3658
3659        test_dependencies.fs.atomic_write(
3660        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3661        r#"
3662FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3663
3664# Include lld linker to improve build times either by using environment variable
3665# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3666RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3667&& apt-get -y install clang lld \
3668&& apt-get autoremove -y && apt-get clean -y
3669        "#.trim().to_string()).await.unwrap();
3670
3671        devcontainer_manifest.parse_nonremote_vars().unwrap();
3672
3673        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3674
3675        let files = test_dependencies.fs.files();
3676        let feature_dockerfile = files
3677            .iter()
3678            .find(|f| {
3679                f.file_name()
3680                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3681            })
3682            .expect("to be found");
3683        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3684        assert_eq!(
3685            &feature_dockerfile,
3686            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3687
3688FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3689
3690# Include lld linker to improve build times either by using environment variable
3691# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3692RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3693&& apt-get -y install clang lld \
3694&& apt-get autoremove -y && apt-get clean -y
3695
3696FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3697USER root
3698COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3699RUN chmod -R 0755 /tmp/build-features/
3700
3701FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3702
3703USER root
3704
3705RUN mkdir -p /tmp/dev-container-features
3706COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3707
3708RUN \
3709echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3710echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3711
3712
3713RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3714cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3715&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3716&& cd /tmp/dev-container-features/aws-cli_0 \
3717&& chmod +x ./devcontainer-features-install.sh \
3718&& ./devcontainer-features-install.sh \
3719&& rm -rf /tmp/dev-container-features/aws-cli_0
3720
3721RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3722cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3723&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3724&& cd /tmp/dev-container-features/docker-in-docker_1 \
3725&& chmod +x ./devcontainer-features-install.sh \
3726&& ./devcontainer-features-install.sh \
3727&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3728
3729
3730ARG _DEV_CONTAINERS_IMAGE_USER=root
3731USER $_DEV_CONTAINERS_IMAGE_USER
3732
3733# Ensure that /etc/profile does not clobber the existing path
3734RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3735
3736
3737ENV DOCKER_BUILDKIT=1
3738"#
3739        );
3740    }
3741
3742    #[cfg(not(target_os = "windows"))]
3743    #[gpui::test]
3744    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3745        cx.executor().allow_parking();
3746        env_logger::try_init().ok();
3747        let given_devcontainer_contents = r#"
3748        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3749        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3750        {
3751          "features": {
3752            "ghcr.io/devcontainers/features/aws-cli:1": {},
3753            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3754          },
3755          "name": "Rust and PostgreSQL",
3756          "dockerComposeFile": "docker-compose.yml",
3757          "service": "app",
3758          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3759
3760          // Features to add to the dev container. More info: https://containers.dev/features.
3761          // "features": {},
3762
3763          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3764          // "forwardPorts": [5432],
3765
3766          // Use 'postCreateCommand' to run commands after the container is created.
3767          // "postCreateCommand": "rustc --version",
3768
3769          // Configure tool-specific properties.
3770          // "customizations": {},
3771
3772          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3773          // "remoteUser": "root"
3774        }
3775        "#;
3776        let mut fake_docker = FakeDocker::new();
3777        fake_docker.set_podman(true);
3778        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3779            cx,
3780            FakeFs::new(cx.executor()),
3781            fake_http_client(),
3782            Arc::new(fake_docker),
3783            Arc::new(TestCommandRunner::new()),
3784            HashMap::new(),
3785            given_devcontainer_contents,
3786        )
3787        .await
3788        .unwrap();
3789
3790        test_dependencies
3791        .fs
3792        .atomic_write(
3793            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3794            r#"
3795version: '3.8'
3796
3797volumes:
3798postgres-data:
3799
3800services:
3801app:
3802build:
3803    context: .
3804    dockerfile: Dockerfile
3805env_file:
3806    # Ensure that the variables in .env match the same variables in devcontainer.json
3807    - .env
3808
3809volumes:
3810    - ../..:/workspaces:cached
3811
3812# Overrides default command so things don't shut down after the process ends.
3813command: sleep infinity
3814
3815# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3816network_mode: service:db
3817
3818# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3819# (Adding the "ports" property to this file will not forward from a Codespace.)
3820
3821db:
3822image: postgres:14.1
3823restart: unless-stopped
3824volumes:
3825    - postgres-data:/var/lib/postgresql/data
3826env_file:
3827    # Ensure that the variables in .env match the same variables in devcontainer.json
3828    - .env
3829
3830# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3831# (Adding the "ports" property to this file will not forward from a Codespace.)
3832                "#.trim().to_string(),
3833        )
3834        .await
3835        .unwrap();
3836
3837        test_dependencies.fs.atomic_write(
3838        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3839        r#"
3840FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3841
3842# Include lld linker to improve build times either by using environment variable
3843# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3844RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3845&& apt-get -y install clang lld \
3846&& apt-get autoremove -y && apt-get clean -y
3847        "#.trim().to_string()).await.unwrap();
3848
3849        devcontainer_manifest.parse_nonremote_vars().unwrap();
3850
3851        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3852
3853        let files = test_dependencies.fs.files();
3854
3855        let feature_dockerfile = files
3856            .iter()
3857            .find(|f| {
3858                f.file_name()
3859                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3860            })
3861            .expect("to be found");
3862        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3863        assert_eq!(
3864            &feature_dockerfile,
3865            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3866
3867FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3868
3869# Include lld linker to improve build times either by using environment variable
3870# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3871RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3872&& apt-get -y install clang lld \
3873&& apt-get autoremove -y && apt-get clean -y
3874
3875FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3876
3877FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3878USER root
3879COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3880RUN chmod -R 0755 /tmp/build-features/
3881
3882FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3883
3884USER root
3885
3886RUN mkdir -p /tmp/dev-container-features
3887COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3888
3889RUN \
3890echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3891echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3892
3893
3894COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3895RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3896&& cd /tmp/dev-container-features/aws-cli_0 \
3897&& chmod +x ./devcontainer-features-install.sh \
3898&& ./devcontainer-features-install.sh
3899
3900COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3901RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3902&& cd /tmp/dev-container-features/docker-in-docker_1 \
3903&& chmod +x ./devcontainer-features-install.sh \
3904&& ./devcontainer-features-install.sh
3905
3906
3907ARG _DEV_CONTAINERS_IMAGE_USER=root
3908USER $_DEV_CONTAINERS_IMAGE_USER
3909"#
3910        );
3911
3912        let uid_dockerfile = files
3913            .iter()
3914            .find(|f| {
3915                f.file_name()
3916                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3917            })
3918            .expect("to be found");
3919        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3920
3921        assert_eq!(
3922            &uid_dockerfile,
3923            r#"ARG BASE_IMAGE
3924FROM $BASE_IMAGE
3925
3926USER root
3927
3928ARG REMOTE_USER
3929ARG NEW_UID
3930ARG NEW_GID
3931SHELL ["/bin/sh", "-c"]
3932RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3933	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3934	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3935	if [ -z "$OLD_UID" ]; then \
3936		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3937	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3938		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3939	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3940		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3941	else \
3942		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3943			FREE_GID=65532; \
3944			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3945			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3946			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3947		fi; \
3948		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3949		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3950		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3951			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3952		fi; \
3953		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3954	fi;
3955
3956ARG IMAGE_USER
3957USER $IMAGE_USER
3958
3959# Ensure that /etc/profile does not clobber the existing path
3960RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3961
3962
3963ENV DOCKER_BUILDKIT=1
3964"#
3965        );
3966    }
3967
3968    #[gpui::test]
3969    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
3970        cx.executor().allow_parking();
3971        env_logger::try_init().ok();
3972        let given_devcontainer_contents = r#"
3973            /*---------------------------------------------------------------------------------------------
3974             *  Copyright (c) Microsoft Corporation. All rights reserved.
3975             *  Licensed under the MIT License. See License.txt in the project root for license information.
3976             *--------------------------------------------------------------------------------------------*/
3977            {
3978              "name": "cli-${devcontainerId}",
3979              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3980              "build": {
3981                "dockerfile": "Dockerfile",
3982                "args": {
3983                  "VARIANT": "18-bookworm",
3984                  "FOO": "bar",
3985                },
3986              },
3987              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3988              "workspaceFolder": "/workspace2",
3989              "mounts": [
3990                // Keep command history across instances
3991                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3992              ],
3993
3994              "forwardPorts": [
3995                8082,
3996                8083,
3997              ],
3998              "appPort": "8084",
3999              "updateRemoteUserUID": false,
4000
4001              "containerEnv": {
4002                "VARIABLE_VALUE": "value",
4003              },
4004
4005              "initializeCommand": "touch IAM.md",
4006
4007              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4008
4009              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4010
4011              "postCreateCommand": {
4012                "yarn": "yarn install",
4013                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4014              },
4015
4016              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4017
4018              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4019
4020              "remoteUser": "node",
4021
4022              "remoteEnv": {
4023                "PATH": "${containerEnv:PATH}:/some/other/path",
4024                "OTHER_ENV": "other_env_value"
4025              },
4026
4027              "features": {
4028                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4029                  "moby": false,
4030                },
4031                "ghcr.io/devcontainers/features/go:1": {},
4032              },
4033
4034              "customizations": {
4035                "vscode": {
4036                  "extensions": [
4037                    "dbaeumer.vscode-eslint",
4038                    "GitHub.vscode-pull-request-github",
4039                  ],
4040                },
4041                "zed": {
4042                  "extensions": ["vue", "ruby"],
4043                },
4044                "codespaces": {
4045                  "repositories": {
4046                    "devcontainers/features": {
4047                      "permissions": {
4048                        "contents": "write",
4049                        "workflows": "write",
4050                      },
4051                    },
4052                  },
4053                },
4054              },
4055            }
4056            "#;
4057
4058        let (test_dependencies, mut devcontainer_manifest) =
4059            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4060                .await
4061                .unwrap();
4062
4063        test_dependencies
4064            .fs
4065            .atomic_write(
4066                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4067                r#"
4068#  Copyright (c) Microsoft Corporation. All rights reserved.
4069#  Licensed under the MIT License. See License.txt in the project root for license information.
4070ARG VARIANT="16-bullseye"
4071FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4072
4073RUN mkdir -p /workspaces && chown node:node /workspaces
4074
4075ARG USERNAME=node
4076USER $USERNAME
4077
4078# Save command line history
4079RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4080&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4081&& mkdir -p /home/$USERNAME/commandhistory \
4082&& touch /home/$USERNAME/commandhistory/.bash_history \
4083&& chown -R $USERNAME /home/$USERNAME/commandhistory
4084                    "#.trim().to_string(),
4085            )
4086            .await
4087            .unwrap();
4088
4089        devcontainer_manifest.parse_nonremote_vars().unwrap();
4090
4091        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4092
4093        assert_eq!(
4094            devcontainer_up.extension_ids,
4095            vec!["vue".to_string(), "ruby".to_string()]
4096        );
4097
4098        let files = test_dependencies.fs.files();
4099        let feature_dockerfile = files
4100            .iter()
4101            .find(|f| {
4102                f.file_name()
4103                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4104            })
4105            .expect("to be found");
4106        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4107        assert_eq!(
4108            &feature_dockerfile,
4109            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4110
4111#  Copyright (c) Microsoft Corporation. All rights reserved.
4112#  Licensed under the MIT License. See License.txt in the project root for license information.
4113ARG VARIANT="16-bullseye"
4114FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4115
4116RUN mkdir -p /workspaces && chown node:node /workspaces
4117
4118ARG USERNAME=node
4119USER $USERNAME
4120
4121# Save command line history
4122RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4123&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4124&& mkdir -p /home/$USERNAME/commandhistory \
4125&& touch /home/$USERNAME/commandhistory/.bash_history \
4126&& chown -R $USERNAME /home/$USERNAME/commandhistory
4127
4128FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4129USER root
4130COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4131RUN chmod -R 0755 /tmp/build-features/
4132
4133FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4134
4135USER root
4136
4137RUN mkdir -p /tmp/dev-container-features
4138COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4139
4140RUN \
4141echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4142echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4143
4144
4145RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4146cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4147&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4148&& cd /tmp/dev-container-features/docker-in-docker_0 \
4149&& chmod +x ./devcontainer-features-install.sh \
4150&& ./devcontainer-features-install.sh \
4151&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4152
4153RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4154cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4155&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4156&& cd /tmp/dev-container-features/go_1 \
4157&& chmod +x ./devcontainer-features-install.sh \
4158&& ./devcontainer-features-install.sh \
4159&& rm -rf /tmp/dev-container-features/go_1
4160
4161
4162ARG _DEV_CONTAINERS_IMAGE_USER=root
4163USER $_DEV_CONTAINERS_IMAGE_USER
4164
4165# Ensure that /etc/profile does not clobber the existing path
4166RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4167
4168ENV DOCKER_BUILDKIT=1
4169
4170ENV GOPATH=/go
4171ENV GOROOT=/usr/local/go
4172ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4173ENV VARIABLE_VALUE=value
4174"#
4175        );
4176
4177        let golang_install_wrapper = files
4178            .iter()
4179            .find(|f| {
4180                f.file_name()
4181                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4182                    && f.to_str().is_some_and(|s| s.contains("go_"))
4183            })
4184            .expect("to be found");
4185        let golang_install_wrapper = test_dependencies
4186            .fs
4187            .load(golang_install_wrapper)
4188            .await
4189            .unwrap();
4190        assert_eq!(
4191            &golang_install_wrapper,
4192            r#"#!/bin/sh
4193set -e
4194
4195on_exit () {
4196    [ $? -eq 0 ] && exit
4197    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4198}
4199
4200trap on_exit EXIT
4201
4202echo ===========================================================================
4203echo 'Feature       : go'
4204echo 'Id            : ghcr.io/devcontainers/features/go:1'
4205echo 'Options       :'
4206echo '    GOLANGCILINTVERSION=latest
4207    VERSION=latest'
4208echo ===========================================================================
4209
4210set -a
4211. ../devcontainer-features.builtin.env
4212. ./devcontainer-features.env
4213set +a
4214
4215chmod +x ./install.sh
4216./install.sh
4217"#
4218        );
4219
4220        let docker_commands = test_dependencies
4221            .command_runner
4222            .commands_by_program("docker");
4223
4224        let docker_run_command = docker_commands
4225            .iter()
4226            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4227
4228        assert!(docker_run_command.is_some());
4229
4230        let docker_exec_commands = test_dependencies
4231            .docker
4232            .exec_commands_recorded
4233            .lock()
4234            .unwrap();
4235
4236        assert!(docker_exec_commands.iter().all(|exec| {
4237            exec.env
4238                == HashMap::from([
4239                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4240                    (
4241                        "PATH".to_string(),
4242                        "/initial/path:/some/other/path".to_string(),
4243                    ),
4244                ])
4245        }))
4246    }
4247
4248    pub(crate) struct RecordedExecCommand {
4249        pub(crate) _container_id: String,
4250        pub(crate) _remote_folder: String,
4251        pub(crate) _user: String,
4252        pub(crate) env: HashMap<String, String>,
4253        pub(crate) _inner_command: Command,
4254    }
4255
4256    pub(crate) struct FakeDocker {
4257        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4258        podman: bool,
4259    }
4260
4261    impl FakeDocker {
4262        pub(crate) fn new() -> Self {
4263            Self {
4264                podman: false,
4265                exec_commands_recorded: Mutex::new(Vec::new()),
4266            }
4267        }
4268        #[cfg(not(target_os = "windows"))]
4269        fn set_podman(&mut self, podman: bool) {
4270            self.podman = podman;
4271        }
4272    }
4273
4274    #[async_trait]
4275    impl DockerClient for FakeDocker {
4276        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4277            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4278                return Ok(DockerInspect {
4279                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4280                        .to_string(),
4281                    config: DockerInspectConfig {
4282                        labels: DockerConfigLabels {
4283                            metadata: Some(vec![HashMap::from([(
4284                                "remoteUser".to_string(),
4285                                Value::String("node".to_string()),
4286                            )])]),
4287                        },
4288                        env: Vec::new(),
4289                        image_user: Some("root".to_string()),
4290                    },
4291                    mounts: None,
4292                    state: None,
4293                });
4294            }
4295            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4296                return Ok(DockerInspect {
4297                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4298                        .to_string(),
4299                    config: DockerInspectConfig {
4300                        labels: DockerConfigLabels {
4301                            metadata: Some(vec![HashMap::from([(
4302                                "remoteUser".to_string(),
4303                                Value::String("vscode".to_string()),
4304                            )])]),
4305                        },
4306                        image_user: Some("root".to_string()),
4307                        env: Vec::new(),
4308                    },
4309                    mounts: None,
4310                    state: None,
4311                });
4312            }
4313            if id.starts_with("cli_") {
4314                return Ok(DockerInspect {
4315                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4316                        .to_string(),
4317                    config: DockerInspectConfig {
4318                        labels: DockerConfigLabels {
4319                            metadata: Some(vec![HashMap::from([(
4320                                "remoteUser".to_string(),
4321                                Value::String("node".to_string()),
4322                            )])]),
4323                        },
4324                        image_user: Some("root".to_string()),
4325                        env: vec!["PATH=/initial/path".to_string()],
4326                    },
4327                    mounts: None,
4328                    state: None,
4329                });
4330            }
4331            if id == "found_docker_ps" {
4332                return Ok(DockerInspect {
4333                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4334                        .to_string(),
4335                    config: DockerInspectConfig {
4336                        labels: DockerConfigLabels {
4337                            metadata: Some(vec![HashMap::from([(
4338                                "remoteUser".to_string(),
4339                                Value::String("node".to_string()),
4340                            )])]),
4341                        },
4342                        image_user: Some("root".to_string()),
4343                        env: vec!["PATH=/initial/path".to_string()],
4344                    },
4345                    mounts: Some(vec![DockerInspectMount {
4346                        source: "/path/to/local/project".to_string(),
4347                        destination: "/workspaces/project".to_string(),
4348                    }]),
4349                    state: None,
4350                });
4351            }
4352            if id.starts_with("rust_a-") {
4353                return Ok(DockerInspect {
4354                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4355                        .to_string(),
4356                    config: DockerInspectConfig {
4357                        labels: DockerConfigLabels {
4358                            metadata: Some(vec![HashMap::from([(
4359                                "remoteUser".to_string(),
4360                                Value::String("vscode".to_string()),
4361                            )])]),
4362                        },
4363                        image_user: Some("root".to_string()),
4364                        env: Vec::new(),
4365                    },
4366                    mounts: None,
4367                    state: None,
4368                });
4369            }
4370
4371            Err(DevContainerError::DockerNotAvailable)
4372        }
4373        async fn get_docker_compose_config(
4374            &self,
4375            config_files: &Vec<PathBuf>,
4376        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4377            if config_files.len() == 1
4378                && config_files.get(0)
4379                    == Some(&PathBuf::from(
4380                        "/path/to/local/project/.devcontainer/docker-compose.yml",
4381                    ))
4382            {
4383                return Ok(Some(DockerComposeConfig {
4384                    name: None,
4385                    services: HashMap::from([
4386                        (
4387                            "app".to_string(),
4388                            DockerComposeService {
4389                                build: Some(DockerComposeServiceBuild {
4390                                    context: Some(".".to_string()),
4391                                    dockerfile: Some("Dockerfile".to_string()),
4392                                    args: None,
4393                                    additional_contexts: None,
4394                                }),
4395                                volumes: vec![MountDefinition {
4396                                    source: "../..".to_string(),
4397                                    target: "/workspaces".to_string(),
4398                                    mount_type: Some("bind".to_string()),
4399                                }],
4400                                network_mode: Some("service:db".to_string()),
4401                                ..Default::default()
4402                            },
4403                        ),
4404                        (
4405                            "db".to_string(),
4406                            DockerComposeService {
4407                                image: Some("postgres:14.1".to_string()),
4408                                volumes: vec![MountDefinition {
4409                                    source: "postgres-data".to_string(),
4410                                    target: "/var/lib/postgresql/data".to_string(),
4411                                    mount_type: Some("volume".to_string()),
4412                                }],
4413                                env_file: Some(vec![".env".to_string()]),
4414                                ..Default::default()
4415                            },
4416                        ),
4417                    ]),
4418                    volumes: HashMap::from([(
4419                        "postgres-data".to_string(),
4420                        DockerComposeVolume::default(),
4421                    )]),
4422                }));
4423            }
4424            Err(DevContainerError::DockerNotAvailable)
4425        }
4426        async fn docker_compose_build(
4427            &self,
4428            _config_files: &Vec<PathBuf>,
4429            _project_name: &str,
4430        ) -> Result<(), DevContainerError> {
4431            Ok(())
4432        }
4433        async fn run_docker_exec(
4434            &self,
4435            container_id: &str,
4436            remote_folder: &str,
4437            user: &str,
4438            env: &HashMap<String, String>,
4439            inner_command: Command,
4440        ) -> Result<(), DevContainerError> {
4441            let mut record = self
4442                .exec_commands_recorded
4443                .lock()
4444                .expect("should be available");
4445            record.push(RecordedExecCommand {
4446                _container_id: container_id.to_string(),
4447                _remote_folder: remote_folder.to_string(),
4448                _user: user.to_string(),
4449                env: env.clone(),
4450                _inner_command: inner_command,
4451            });
4452            Ok(())
4453        }
4454        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4455            Err(DevContainerError::DockerNotAvailable)
4456        }
4457        async fn find_process_by_filters(
4458            &self,
4459            _filters: Vec<String>,
4460        ) -> Result<Option<DockerPs>, DevContainerError> {
4461            Ok(Some(DockerPs {
4462                id: "found_docker_ps".to_string(),
4463            }))
4464        }
4465        fn supports_compose_buildkit(&self) -> bool {
4466            !self.podman
4467        }
4468        fn docker_cli(&self) -> String {
4469            if self.podman {
4470                "podman".to_string()
4471            } else {
4472                "docker".to_string()
4473            }
4474        }
4475    }
4476
4477    #[derive(Debug, Clone)]
4478    pub(crate) struct TestCommand {
4479        pub(crate) program: String,
4480        pub(crate) args: Vec<String>,
4481    }
4482
4483    pub(crate) struct TestCommandRunner {
4484        commands_recorded: Mutex<Vec<TestCommand>>,
4485    }
4486
4487    impl TestCommandRunner {
4488        fn new() -> Self {
4489            Self {
4490                commands_recorded: Mutex::new(Vec::new()),
4491            }
4492        }
4493
4494        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4495            let record = self.commands_recorded.lock().expect("poisoned");
4496            record
4497                .iter()
4498                .filter(|r| r.program == program)
4499                .map(|r| r.clone())
4500                .collect()
4501        }
4502    }
4503
4504    #[async_trait]
4505    impl CommandRunner for TestCommandRunner {
4506        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4507            let mut record = self.commands_recorded.lock().expect("poisoned");
4508
4509            record.push(TestCommand {
4510                program: command.get_program().display().to_string(),
4511                args: command
4512                    .get_args()
4513                    .map(|a| a.display().to_string())
4514                    .collect(),
4515            });
4516
4517            Ok(Output {
4518                status: ExitStatus::default(),
4519                stdout: vec![],
4520                stderr: vec![],
4521            })
4522        }
4523    }
4524
4525    fn fake_http_client() -> Arc<dyn HttpClient> {
4526        FakeHttpClient::create(|request| async move {
4527            let (parts, _body) = request.into_parts();
4528            if parts.uri.path() == "/token" {
4529                let token_response = TokenResponse {
4530                    token: "token".to_string(),
4531                };
4532                return Ok(http::Response::builder()
4533                    .status(200)
4534                    .body(http_client::AsyncBody::from(
4535                        serde_json_lenient::to_string(&token_response).unwrap(),
4536                    ))
4537                    .unwrap());
4538            }
4539
4540            // OCI specific things
4541            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4542                let response = r#"
4543                    {
4544                        "schemaVersion": 2,
4545                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
4546                        "config": {
4547                            "mediaType": "application/vnd.devcontainers",
4548                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4549                            "size": 2
4550                        },
4551                        "layers": [
4552                            {
4553                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4554                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4555                                "size": 59392,
4556                                "annotations": {
4557                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4558                                }
4559                            }
4560                        ],
4561                        "annotations": {
4562                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4563                            "com.github.package.type": "devcontainer_feature"
4564                        }
4565                    }
4566                    "#;
4567                return Ok(http::Response::builder()
4568                    .status(200)
4569                    .body(http_client::AsyncBody::from(response))
4570                    .unwrap());
4571            }
4572
4573            if parts.uri.path()
4574                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4575            {
4576                let response = build_tarball(vec![
4577                    ("./NOTES.md", r#"
4578                        ## Limitations
4579
4580                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4581                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4582                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4583                          ```
4584                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4585                          ```
4586                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4587
4588
4589                        ## OS Support
4590
4591                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4592
4593                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4594
4595                        `bash` is required to execute the `install.sh` script."#),
4596                    ("./README.md", r#"
4597                        # Docker (Docker-in-Docker) (docker-in-docker)
4598
4599                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4600
4601                        ## Example Usage
4602
4603                        ```json
4604                        "features": {
4605                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4606                        }
4607                        ```
4608
4609                        ## Options
4610
4611                        | Options Id | Description | Type | Default Value |
4612                        |-----|-----|-----|-----|
4613                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4614                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4615                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4616                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4617                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4618                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4619                        | installDockerBuildx | Install Docker Buildx | boolean | true |
4620                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4621                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4622
4623                        ## Customizations
4624
4625                        ### VS Code Extensions
4626
4627                        - `ms-azuretools.vscode-containers`
4628
4629                        ## Limitations
4630
4631                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4632                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4633                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4634                          ```
4635                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4636                          ```
4637                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4638
4639
4640                        ## OS Support
4641
4642                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4643
4644                        `bash` is required to execute the `install.sh` script.
4645
4646
4647                        ---
4648
4649                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
4650                    ("./devcontainer-feature.json", r#"
4651                        {
4652                          "id": "docker-in-docker",
4653                          "version": "2.16.1",
4654                          "name": "Docker (Docker-in-Docker)",
4655                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4656                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4657                          "options": {
4658                            "version": {
4659                              "type": "string",
4660                              "proposals": [
4661                                "latest",
4662                                "none",
4663                                "20.10"
4664                              ],
4665                              "default": "latest",
4666                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4667                            },
4668                            "moby": {
4669                              "type": "boolean",
4670                              "default": true,
4671                              "description": "Install OSS Moby build instead of Docker CE"
4672                            },
4673                            "mobyBuildxVersion": {
4674                              "type": "string",
4675                              "default": "latest",
4676                              "description": "Install a specific version of moby-buildx when using Moby"
4677                            },
4678                            "dockerDashComposeVersion": {
4679                              "type": "string",
4680                              "enum": [
4681                                "none",
4682                                "v1",
4683                                "v2"
4684                              ],
4685                              "default": "v2",
4686                              "description": "Default version of Docker Compose (v1, v2 or none)"
4687                            },
4688                            "azureDnsAutoDetection": {
4689                              "type": "boolean",
4690                              "default": true,
4691                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4692                            },
4693                            "dockerDefaultAddressPool": {
4694                              "type": "string",
4695                              "default": "",
4696                              "proposals": [],
4697                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4698                            },
4699                            "installDockerBuildx": {
4700                              "type": "boolean",
4701                              "default": true,
4702                              "description": "Install Docker Buildx"
4703                            },
4704                            "installDockerComposeSwitch": {
4705                              "type": "boolean",
4706                              "default": false,
4707                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4708                            },
4709                            "disableIp6tables": {
4710                              "type": "boolean",
4711                              "default": false,
4712                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4713                            }
4714                          },
4715                          "entrypoint": "/usr/local/share/docker-init.sh",
4716                          "privileged": true,
4717                          "containerEnv": {
4718                            "DOCKER_BUILDKIT": "1"
4719                          },
4720                          "customizations": {
4721                            "vscode": {
4722                              "extensions": [
4723                                "ms-azuretools.vscode-containers"
4724                              ],
4725                              "settings": {
4726                                "github.copilot.chat.codeGeneration.instructions": [
4727                                  {
4728                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
4729                                  }
4730                                ]
4731                              }
4732                            }
4733                          },
4734                          "mounts": [
4735                            {
4736                              "source": "dind-var-lib-docker-${devcontainerId}",
4737                              "target": "/var/lib/docker",
4738                              "type": "volume"
4739                            }
4740                          ],
4741                          "installsAfter": [
4742                            "ghcr.io/devcontainers/features/common-utils"
4743                          ]
4744                        }"#),
4745                    ("./install.sh", r#"
4746                    #!/usr/bin/env bash
4747                    #-------------------------------------------------------------------------------------------------------------
4748                    # Copyright (c) Microsoft Corporation. All rights reserved.
4749                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
4750                    #-------------------------------------------------------------------------------------------------------------
4751                    #
4752                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
4753                    # Maintainer: The Dev Container spec maintainers
4754
4755
4756                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
4757                    USE_MOBY="${MOBY:-"true"}"
4758                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
4759                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
4760                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
4761                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
4762                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
4763                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
4764                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
4765                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
4766                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
4767                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
4768                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
4769                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
4770
4771                    # Default: Exit on any failure.
4772                    set -e
4773
4774                    # Clean up
4775                    rm -rf /var/lib/apt/lists/*
4776
4777                    # Setup STDERR.
4778                    err() {
4779                        echo "(!) $*" >&2
4780                    }
4781
4782                    if [ "$(id -u)" -ne 0 ]; then
4783                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
4784                        exit 1
4785                    fi
4786
4787                    ###################
4788                    # Helper Functions
4789                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
4790                    ###################
4791
4792                    # Determine the appropriate non-root user
4793                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
4794                        USERNAME=""
4795                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
4796                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
4797                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
4798                                USERNAME=${CURRENT_USER}
4799                                break
4800                            fi
4801                        done
4802                        if [ "${USERNAME}" = "" ]; then
4803                            USERNAME=root
4804                        fi
4805                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
4806                        USERNAME=root
4807                    fi
4808
4809                    # Package manager update function
4810                    pkg_mgr_update() {
4811                        case ${ADJUSTED_ID} in
4812                            debian)
4813                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
4814                                    echo "Running apt-get update..."
4815                                    apt-get update -y
4816                                fi
4817                                ;;
4818                            rhel)
4819                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
4820                                    cache_check_dir="/var/cache/yum"
4821                                else
4822                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
4823                                fi
4824                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
4825                                    echo "Running ${PKG_MGR_CMD} makecache ..."
4826                                    ${PKG_MGR_CMD} makecache
4827                                fi
4828                                ;;
4829                        esac
4830                    }
4831
4832                    # Checks if packages are installed and installs them if not
4833                    check_packages() {
4834                        case ${ADJUSTED_ID} in
4835                            debian)
4836                                if ! dpkg -s "$@" > /dev/null 2>&1; then
4837                                    pkg_mgr_update
4838                                    apt-get -y install --no-install-recommends "$@"
4839                                fi
4840                                ;;
4841                            rhel)
4842                                if ! rpm -q "$@" > /dev/null 2>&1; then
4843                                    pkg_mgr_update
4844                                    ${PKG_MGR_CMD} -y install "$@"
4845                                fi
4846                                ;;
4847                        esac
4848                    }
4849
4850                    # Figure out correct version of a three part version number is not passed
4851                    find_version_from_git_tags() {
4852                        local variable_name=$1
4853                        local requested_version=${!variable_name}
4854                        if [ "${requested_version}" = "none" ]; then return; fi
4855                        local repository=$2
4856                        local prefix=${3:-"tags/v"}
4857                        local separator=${4:-"."}
4858                        local last_part_optional=${5:-"false"}
4859                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
4860                            local escaped_separator=${separator//./\\.}
4861                            local last_part
4862                            if [ "${last_part_optional}" = "true" ]; then
4863                                last_part="(${escaped_separator}[0-9]+)?"
4864                            else
4865                                last_part="${escaped_separator}[0-9]+"
4866                            fi
4867                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
4868                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
4869                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
4870                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
4871                            else
4872                                set +e
4873                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
4874                                set -e
4875                            fi
4876                        fi
4877                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
4878                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
4879                            exit 1
4880                        fi
4881                        echo "${variable_name}=${!variable_name}"
4882                    }
4883
4884                    # Use semver logic to decrement a version number then look for the closest match
4885                    find_prev_version_from_git_tags() {
4886                        local variable_name=$1
4887                        local current_version=${!variable_name}
4888                        local repository=$2
4889                        # Normally a "v" is used before the version number, but support alternate cases
4890                        local prefix=${3:-"tags/v"}
4891                        # Some repositories use "_" instead of "." for version number part separation, support that
4892                        local separator=${4:-"."}
4893                        # Some tools release versions that omit the last digit (e.g. go)
4894                        local last_part_optional=${5:-"false"}
4895                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
4896                        local version_suffix_regex=$6
4897                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
4898                        set +e
4899                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
4900                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
4901                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
4902
4903                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
4904                                ((major=major-1))
4905                                declare -g ${variable_name}="${major}"
4906                                # Look for latest version from previous major release
4907                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4908                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
4909                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
4910                                ((minor=minor-1))
4911                                declare -g ${variable_name}="${major}.${minor}"
4912                                # Look for latest version from previous minor release
4913                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4914                            else
4915                                ((breakfix=breakfix-1))
4916                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
4917                                    declare -g ${variable_name}="${major}.${minor}"
4918                                else
4919                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
4920                                fi
4921                            fi
4922                        set -e
4923                    }
4924
4925                    # Function to fetch the version released prior to the latest version
4926                    get_previous_version() {
4927                        local url=$1
4928                        local repo_url=$2
4929                        local variable_name=$3
4930                        prev_version=${!variable_name}
4931
4932                        output=$(curl -s "$repo_url");
4933                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
4934                          message=$(echo "$output" | jq -r '.message')
4935
4936                          if [[ $message == "API rate limit exceeded"* ]]; then
4937                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
4938                                echo -e "\nAttempting to find latest version using GitHub tags."
4939                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
4940                                declare -g ${variable_name}="${prev_version}"
4941                           fi
4942                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
4943                            echo -e "\nAttempting to find latest version using GitHub Api."
4944                            version=$(echo "$output" | jq -r '.[1].tag_name')
4945                            declare -g ${variable_name}="${version#v}"
4946                        fi
4947                        echo "${variable_name}=${!variable_name}"
4948                    }
4949
4950                    get_github_api_repo_url() {
4951                        local url=$1
4952                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
4953                    }
4954
4955                    ###########################################
4956                    # Start docker-in-docker installation
4957                    ###########################################
4958
4959                    # Ensure apt is in non-interactive to avoid prompts
4960                    export DEBIAN_FRONTEND=noninteractive
4961
4962                    # Source /etc/os-release to get OS info
4963                    . /etc/os-release
4964
4965                    # Determine adjusted ID and package manager
4966                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
4967                        ADJUSTED_ID="debian"
4968                        PKG_MGR_CMD="apt-get"
4969                        # Use dpkg for Debian-based systems
4970                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
4971                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
4972                        ADJUSTED_ID="rhel"
4973                        # Determine the appropriate package manager for RHEL-based systems
4974                        for pkg_mgr in tdnf dnf microdnf yum; do
4975                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
4976                                PKG_MGR_CMD="$pkg_mgr"
4977                                break
4978                            fi
4979                        done
4980
4981                        if [ -z "${PKG_MGR_CMD}" ]; then
4982                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
4983                            exit 1
4984                        fi
4985
4986                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
4987                    else
4988                        err "Linux distro ${ID} not supported."
4989                        exit 1
4990                    fi
4991
4992                    # Azure Linux specific setup
4993                    if [ "${ID}" = "azurelinux" ]; then
4994                        VERSION_CODENAME="azurelinux${VERSION_ID}"
4995                    fi
4996
4997                    # Prevent attempting to install Moby on Debian trixie (packages removed)
4998                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
4999                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5000                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5001                        exit 1
5002                    fi
5003
5004                    # Check if distro is supported
5005                    if [ "${USE_MOBY}" = "true" ]; then
5006                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5007                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5008                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5009                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5010                                exit 1
5011                            fi
5012                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
5013                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5014                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5015                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5016                            else
5017                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5018                            fi
5019                        fi
5020                    else
5021                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5022                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5023                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5024                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5025                                exit 1
5026                            fi
5027                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5028                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5029
5030                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5031                        fi
5032                    fi
5033
5034                    # Install base dependencies
5035                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5036                    case ${ADJUSTED_ID} in
5037                        debian)
5038                            check_packages apt-transport-https $base_packages dirmngr
5039                            ;;
5040                        rhel)
5041                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
5042
5043                            ;;
5044                    esac
5045
5046                    # Install git if not already present
5047                    if ! command -v git >/dev/null 2>&1; then
5048                        check_packages git
5049                    fi
5050
5051                    # Update CA certificates to ensure HTTPS connections work properly
5052                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5053                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5054                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5055                        update-ca-certificates
5056                    fi
5057
5058                    # Swap to legacy iptables for compatibility (Debian only)
5059                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5060                        update-alternatives --set iptables /usr/sbin/iptables-legacy
5061                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5062                    fi
5063
5064                    # Set up the necessary repositories
5065                    if [ "${USE_MOBY}" = "true" ]; then
5066                        # Name of open source engine/cli
5067                        engine_package_name="moby-engine"
5068                        cli_package_name="moby-cli"
5069
5070                        case ${ADJUSTED_ID} in
5071                            debian)
5072                                # Import key safely and import Microsoft apt repo
5073                                {
5074                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5075                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5076                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5077                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5078                                ;;
5079                            rhel)
5080                                echo "(*) ${ID} detected - checking for Moby packages..."
5081
5082                                # Check if moby packages are available in default repos
5083                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5084                                    echo "(*) Using built-in ${ID} Moby packages"
5085                                else
5086                                    case "${ID}" in
5087                                        azurelinux)
5088                                            echo "(*) Moby packages not found in Azure Linux repositories"
5089                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5090                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5091                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5092                                            exit 1
5093                                            ;;
5094                                        mariner)
5095                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
5096                                            # Add Microsoft repository if packages aren't available locally
5097                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5098                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
5099                    [microsoft]
5100                    name=Microsoft Repository
5101                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5102                    enabled=1
5103                    gpgcheck=1
5104                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5105                    EOF
5106                                    # Verify packages are available after adding repo
5107                                    pkg_mgr_update
5108                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5109                                        echo "(*) Moby packages not found in Microsoft repository either"
5110                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5111                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5112                                        exit 1
5113                                    fi
5114                                    ;;
5115                                *)
5116                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5117                                    exit 1
5118                                    ;;
5119                                esac
5120                            fi
5121                            ;;
5122                        esac
5123                    else
5124                        # Name of licensed engine/cli
5125                        engine_package_name="docker-ce"
5126                        cli_package_name="docker-ce-cli"
5127                        case ${ADJUSTED_ID} in
5128                            debian)
5129                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5130                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5131                                ;;
5132                            rhel)
5133                                # Docker CE repository setup for RHEL-based systems
5134                                setup_docker_ce_repo() {
5135                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5136                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
5137                    [docker-ce-stable]
5138                    name=Docker CE Stable
5139                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5140                    enabled=1
5141                    gpgcheck=1
5142                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5143                    skip_if_unavailable=1
5144                    module_hotfixes=1
5145                    EOF
5146                                }
5147                                install_azure_linux_deps() {
5148                                    echo "(*) Installing device-mapper libraries for Docker CE..."
5149                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5150                                    echo "(*) Installing additional Docker CE dependencies..."
5151                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5152                                        echo "(*) Some optional dependencies could not be installed, continuing..."
5153                                    }
5154                                }
5155                                setup_selinux_context() {
5156                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5157                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
5158                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5159                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5160                                    fi
5161                                }
5162
5163                                # Special handling for RHEL Docker CE installation
5164                                case "${ID}" in
5165                                    azurelinux|mariner)
5166                                        echo "(*) ${ID} detected"
5167                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5168                                        echo "(*) Setting up Docker CE repository..."
5169
5170                                        setup_docker_ce_repo
5171                                        install_azure_linux_deps
5172
5173                                        if [ "${USE_MOBY}" != "true" ]; then
5174                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5175                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5176                                            setup_selinux_context
5177                                        else
5178                                            echo "(*) Using Moby - container-selinux not required"
5179                                        fi
5180                                        ;;
5181                                    *)
5182                                        # Standard RHEL/CentOS/Fedora approach
5183                                        if command -v dnf >/dev/null 2>&1; then
5184                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5185                                        elif command -v yum-config-manager >/dev/null 2>&1; then
5186                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5187                                        else
5188                                            # Manual fallback
5189                                            setup_docker_ce_repo
5190                                fi
5191                                ;;
5192                            esac
5193                            ;;
5194                        esac
5195                    fi
5196
5197                    # Refresh package database
5198                    case ${ADJUSTED_ID} in
5199                        debian)
5200                            apt-get update
5201                            ;;
5202                        rhel)
5203                            pkg_mgr_update
5204                            ;;
5205                    esac
5206
5207                    # Soft version matching
5208                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5209                        # Empty, meaning grab whatever "latest" is in apt repo
5210                        engine_version_suffix=""
5211                        cli_version_suffix=""
5212                    else
5213                        case ${ADJUSTED_ID} in
5214                            debian)
5215                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5216                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5217                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5218                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5219                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5220                        set +e # Don't exit if finding version fails - will handle gracefully
5221                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5222                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5223                        set -e
5224                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5225                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5226                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5227                            exit 1
5228                        fi
5229                        ;;
5230                    rhel)
5231                         # For RHEL-based systems, use dnf/yum to find versions
5232                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
5233                                set +e # Don't exit if finding version fails - will handle gracefully
5234                                    if [ "${USE_MOBY}" = "true" ]; then
5235                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5236                                    else
5237                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5238                                    fi
5239                                set -e
5240                                if [ -n "${available_versions}" ]; then
5241                                    engine_version_suffix="-${available_versions}"
5242                                    cli_version_suffix="-${available_versions}"
5243                                else
5244                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5245                                    engine_version_suffix=""
5246                                    cli_version_suffix=""
5247                                fi
5248                                ;;
5249                        esac
5250                    fi
5251
5252                    # Version matching for moby-buildx
5253                    if [ "${USE_MOBY}" = "true" ]; then
5254                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5255                            # Empty, meaning grab whatever "latest" is in apt repo
5256                            buildx_version_suffix=""
5257                        else
5258                            case ${ADJUSTED_ID} in
5259                                debian)
5260                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5261                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5262                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5263                            set +e
5264                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5265                            set -e
5266                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5267                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5268                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5269                                exit 1
5270                            fi
5271                            ;;
5272                                rhel)
5273                                    # For RHEL-based systems, try to find buildx version or use latest
5274                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5275                                    set +e
5276                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5277                                    set -e
5278                                    if [ -n "${available_buildx}" ]; then
5279                                        buildx_version_suffix="-${available_buildx}"
5280                                    else
5281                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5282                                        buildx_version_suffix=""
5283                                    fi
5284                                    ;;
5285                            esac
5286                            echo "buildx_version_suffix ${buildx_version_suffix}"
5287                        fi
5288                    fi
5289
5290                    # Install Docker / Moby CLI if not already installed
5291                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5292                        echo "Docker / Moby CLI and Engine already installed."
5293                    else
5294                            case ${ADJUSTED_ID} in
5295                            debian)
5296                                if [ "${USE_MOBY}" = "true" ]; then
5297                                    # Install engine
5298                                    set +e # Handle error gracefully
5299                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5300                                        exit_code=$?
5301                                    set -e
5302
5303                                    if [ ${exit_code} -ne 0 ]; then
5304                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5305                                        exit 1
5306                                    fi
5307
5308                                    # Install compose
5309                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5310                                else
5311                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5312                                    # Install compose
5313                                    apt-mark hold docker-ce docker-ce-cli
5314                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5315                                fi
5316                                ;;
5317                            rhel)
5318                                if [ "${USE_MOBY}" = "true" ]; then
5319                                    set +e # Handle error gracefully
5320                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5321                                        exit_code=$?
5322                                    set -e
5323
5324                                    if [ ${exit_code} -ne 0 ]; then
5325                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5326                                        exit 1
5327                                    fi
5328
5329                                    # Install compose
5330                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5331                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5332                                    fi
5333                                else
5334                                                   # Special handling for Azure Linux Docker CE installation
5335                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5336                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5337
5338                                        # Use rpm with --force and --nodeps for Azure Linux
5339                                        set +e  # Don't exit on error for this section
5340                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5341                                        install_result=$?
5342                                        set -e
5343
5344                                        if [ $install_result -ne 0 ]; then
5345                                            echo "(*) Standard installation failed, trying manual installation..."
5346
5347                                            echo "(*) Standard installation failed, trying manual installation..."
5348
5349                                            # Create directory for downloading packages
5350                                            mkdir -p /tmp/docker-ce-install
5351
5352                                            # Download packages manually using curl since tdnf doesn't support download
5353                                            echo "(*) Downloading Docker CE packages manually..."
5354
5355                                            # Get the repository baseurl
5356                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5357
5358                                            # Download packages directly
5359                                            cd /tmp/docker-ce-install
5360
5361                                            # Get package names with versions
5362                                            if [ -n "${cli_version_suffix}" ]; then
5363                                                docker_ce_version="${cli_version_suffix#-}"
5364                                                docker_cli_version="${engine_version_suffix#-}"
5365                                            else
5366                                                # Get latest version from repository
5367                                                docker_ce_version="latest"
5368                                            fi
5369
5370                                            echo "(*) Attempting to download Docker CE packages from repository..."
5371
5372                                            # Try to download latest packages if specific version fails
5373                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5374                                                # Fallback: try to get latest available version
5375                                                echo "(*) Specific version not found, trying latest..."
5376                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5377                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5378                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5379
5380                                                if [ -n "${latest_docker}" ]; then
5381                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5382                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5383                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5384                                                else
5385                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
5386                                                    echo "(*) Please check repository configuration or use 'moby': true"
5387                                                    exit 1
5388                                                fi
5389                                            fi
5390                                            # Install systemd libraries required by Docker CE
5391                                            echo "(*) Installing systemd libraries required by Docker CE..."
5392                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5393                                                echo "(*) WARNING: Could not install systemd libraries"
5394                                                echo "(*) Docker may fail to start without these"
5395                                            }
5396
5397                                            # Install with rpm --force --nodeps
5398                                            echo "(*) Installing Docker CE packages with dependency override..."
5399                                            rpm -Uvh --force --nodeps *.rpm
5400
5401                                            # Cleanup
5402                                            cd /
5403                                            rm -rf /tmp/docker-ce-install
5404
5405                                            echo "(*) Docker CE installation completed with dependency bypass"
5406                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5407                                        fi
5408                                    else
5409                                        # Standard installation for other RHEL-based systems
5410                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5411                                    fi
5412                                    # Install compose
5413                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5414                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5415                                    fi
5416                                fi
5417                                ;;
5418                        esac
5419                    fi
5420
5421                    echo "Finished installing docker / moby!"
5422
5423                    docker_home="/usr/libexec/docker"
5424                    cli_plugins_dir="${docker_home}/cli-plugins"
5425
5426                    # fallback for docker-compose
5427                    fallback_compose(){
5428                        local url=$1
5429                        local repo_url=$(get_github_api_repo_url "$url")
5430                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5431                        get_previous_version "${url}" "${repo_url}" compose_version
5432                        echo -e "\nAttempting to install v${compose_version}"
5433                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5434                    }
5435
5436                    # If 'docker-compose' command is to be included
5437                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5438                        case "${architecture}" in
5439                        amd64|x86_64) target_compose_arch=x86_64 ;;
5440                        arm64|aarch64) target_compose_arch=aarch64 ;;
5441                        *)
5442                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5443                            exit 1
5444                        esac
5445
5446                        docker_compose_path="/usr/local/bin/docker-compose"
5447                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5448                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5449                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
5450
5451                            if [ "${target_compose_arch}" = "x86_64" ]; then
5452                                echo "(*) Installing docker compose v1..."
5453                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5454                                chmod +x ${docker_compose_path}
5455
5456                                # Download the SHA256 checksum
5457                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5458                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5459                                sha256sum -c docker-compose.sha256sum --ignore-missing
5460                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5461                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5462                                exit 1
5463                            else
5464                                # Use pip to get a version that runs on this architecture
5465                                check_packages python3-minimal python3-pip libffi-dev python3-venv
5466                                echo "(*) Installing docker compose v1 via pip..."
5467                                export PYTHONUSERBASE=/usr/local
5468                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5469                            fi
5470                        else
5471                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5472                            docker_compose_url="https://github.com/docker/compose"
5473                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5474                            echo "(*) Installing docker-compose ${compose_version}..."
5475                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5476                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5477                                     fallback_compose "$docker_compose_url"
5478                            }
5479
5480                            chmod +x ${docker_compose_path}
5481
5482                            # Download the SHA256 checksum
5483                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5484                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5485                            sha256sum -c docker-compose.sha256sum --ignore-missing
5486
5487                            mkdir -p ${cli_plugins_dir}
5488                            cp ${docker_compose_path} ${cli_plugins_dir}
5489                        fi
5490                    fi
5491
5492                    # fallback method for compose-switch
5493                    fallback_compose-switch() {
5494                        local url=$1
5495                        local repo_url=$(get_github_api_repo_url "$url")
5496                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5497                        get_previous_version "$url" "$repo_url" compose_switch_version
5498                        echo -e "\nAttempting to install v${compose_switch_version}"
5499                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5500                    }
5501                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5502                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5503                        if type docker-compose > /dev/null 2>&1; then
5504                            echo "(*) Installing compose-switch..."
5505                            current_compose_path="$(command -v docker-compose)"
5506                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5507                            compose_switch_version="latest"
5508                            compose_switch_url="https://github.com/docker/compose-switch"
5509                            # Try to get latest version, fallback to known stable version if GitHub API fails
5510                            set +e
5511                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
5512                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5513                                echo "(*) GitHub API rate limited or failed, using fallback method"
5514                                fallback_compose-switch "$compose_switch_url"
5515                            fi
5516                            set -e
5517
5518                            # Map architecture for compose-switch downloads
5519                            case "${architecture}" in
5520                                amd64|x86_64) target_switch_arch=amd64 ;;
5521                                arm64|aarch64) target_switch_arch=arm64 ;;
5522                                *) target_switch_arch=${architecture} ;;
5523                            esac
5524                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5525                            chmod +x /usr/local/bin/compose-switch
5526                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5527                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5528                            mv "${current_compose_path}" "${target_compose_path}"
5529                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5530                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5531                        else
5532                            err "Skipping installation of compose-switch as docker compose is unavailable..."
5533                        fi
5534                    fi
5535
5536                    # If init file already exists, exit
5537                    if [ -f "/usr/local/share/docker-init.sh" ]; then
5538                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
5539                        # Clean up
5540                        rm -rf /var/lib/apt/lists/*
5541                        exit 0
5542                    fi
5543                    echo "docker-init doesn't exist, adding..."
5544
5545                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5546                            groupadd -r docker
5547                    fi
5548
5549                    usermod -aG docker ${USERNAME}
5550
5551                    # fallback for docker/buildx
5552                    fallback_buildx() {
5553                        local url=$1
5554                        local repo_url=$(get_github_api_repo_url "$url")
5555                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5556                        get_previous_version "$url" "$repo_url" buildx_version
5557                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5558                        echo -e "\nAttempting to install v${buildx_version}"
5559                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5560                    }
5561
5562                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5563                        buildx_version="latest"
5564                        docker_buildx_url="https://github.com/docker/buildx"
5565                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5566                        echo "(*) Installing buildx ${buildx_version}..."
5567
5568                          # Map architecture for buildx downloads
5569                        case "${architecture}" in
5570                            amd64|x86_64) target_buildx_arch=amd64 ;;
5571                            arm64|aarch64) target_buildx_arch=arm64 ;;
5572                            *) target_buildx_arch=${architecture} ;;
5573                        esac
5574
5575                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5576
5577                        cd /tmp
5578                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5579
5580                        docker_home="/usr/libexec/docker"
5581                        cli_plugins_dir="${docker_home}/cli-plugins"
5582
5583                        mkdir -p ${cli_plugins_dir}
5584                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5585                        chmod +x ${cli_plugins_dir}/docker-buildx
5586
5587                        chown -R "${USERNAME}:docker" "${docker_home}"
5588                        chmod -R g+r+w "${docker_home}"
5589                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5590                    fi
5591
5592                    DOCKER_DEFAULT_IP6_TABLES=""
5593                    if [ "$DISABLE_IP6_TABLES" == true ]; then
5594                        requested_version=""
5595                        # checking whether the version requested either is in semver format or just a number denoting the major version
5596                        # and, extracting the major version number out of the two scenarios
5597                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5598                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5599                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5600                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5601                            requested_version=$DOCKER_VERSION
5602                        fi
5603                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5604                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5605                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5606                        fi
5607                    fi
5608
5609                    if [ ! -d /usr/local/share ]; then
5610                        mkdir -p /usr/local/share
5611                    fi
5612
5613                    tee /usr/local/share/docker-init.sh > /dev/null \
5614                    << EOF
5615                    #!/bin/sh
5616                    #-------------------------------------------------------------------------------------------------------------
5617                    # Copyright (c) Microsoft Corporation. All rights reserved.
5618                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5619                    #-------------------------------------------------------------------------------------------------------------
5620
5621                    set -e
5622
5623                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5624                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5625                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5626                    EOF
5627
5628                    tee -a /usr/local/share/docker-init.sh > /dev/null \
5629                    << 'EOF'
5630                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5631                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5632                        find /run /var/run -iname 'docker*.pid' -delete || :
5633                        find /run /var/run -iname 'container*.pid' -delete || :
5634
5635                        # -- Start: dind wrapper script --
5636                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5637
5638                        export container=docker
5639
5640                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5641                            mount -t securityfs none /sys/kernel/security || {
5642                                echo >&2 'Could not mount /sys/kernel/security.'
5643                                echo >&2 'AppArmor detection and --privileged mode might break.'
5644                            }
5645                        fi
5646
5647                        # Mount /tmp (conditionally)
5648                        if ! mountpoint -q /tmp; then
5649                            mount -t tmpfs none /tmp
5650                        fi
5651
5652                        set_cgroup_nesting()
5653                        {
5654                            # cgroup v2: enable nesting
5655                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5656                                # move the processes from the root group to the /init group,
5657                                # otherwise writing subtree_control fails with EBUSY.
5658                                # An error during moving non-existent process (i.e., "cat") is ignored.
5659                                mkdir -p /sys/fs/cgroup/init
5660                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5661                                # enable controllers
5662                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5663                                    > /sys/fs/cgroup/cgroup.subtree_control
5664                            fi
5665                        }
5666
5667                        # Set cgroup nesting, retrying if necessary
5668                        retry_cgroup_nesting=0
5669
5670                        until [ "${retry_cgroup_nesting}" -eq "5" ];
5671                        do
5672                            set +e
5673                                set_cgroup_nesting
5674
5675                                if [ $? -ne 0 ]; then
5676                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5677                                else
5678                                    break
5679                                fi
5680
5681                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5682                            set -e
5683                        done
5684
5685                        # -- End: dind wrapper script --
5686
5687                        # Handle DNS
5688                        set +e
5689                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5690                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5691                            then
5692                                echo "Setting dockerd Azure DNS."
5693                                CUSTOMDNS="--dns 168.63.129.16"
5694                            else
5695                                echo "Not setting dockerd DNS manually."
5696                                CUSTOMDNS=""
5697                            fi
5698                        set -e
5699
5700                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5701                        then
5702                            DEFAULT_ADDRESS_POOL=""
5703                        else
5704                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5705                        fi
5706
5707                        # Start docker/moby engine
5708                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5709                    INNEREOF
5710                    )"
5711
5712                    sudo_if() {
5713                        COMMAND="$*"
5714
5715                        if [ "$(id -u)" -ne 0 ]; then
5716                            sudo $COMMAND
5717                        else
5718                            $COMMAND
5719                        fi
5720                    }
5721
5722                    retry_docker_start_count=0
5723                    docker_ok="false"
5724
5725                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
5726                    do
5727                        # Start using sudo if not invoked as root
5728                        if [ "$(id -u)" -ne 0 ]; then
5729                            sudo /bin/sh -c "${dockerd_start}"
5730                        else
5731                            eval "${dockerd_start}"
5732                        fi
5733
5734                        retry_count=0
5735                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
5736                        do
5737                            sleep 1s
5738                            set +e
5739                                docker info > /dev/null 2>&1 && docker_ok="true"
5740                            set -e
5741
5742                            retry_count=`expr $retry_count + 1`
5743                        done
5744
5745                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
5746                            echo "(*) Failed to start docker, retrying..."
5747                            set +e
5748                                sudo_if pkill dockerd
5749                                sudo_if pkill containerd
5750                            set -e
5751                        fi
5752
5753                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
5754                    done
5755
5756                    # Execute whatever commands were passed in (if any). This allows us
5757                    # to set this script to ENTRYPOINT while still executing the default CMD.
5758                    exec "$@"
5759                    EOF
5760
5761                    chmod +x /usr/local/share/docker-init.sh
5762                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
5763
5764                    # Clean up
5765                    rm -rf /var/lib/apt/lists/*
5766
5767                    echo 'docker-in-docker-debian script has completed!'"#),
5768                ]).await;
5769
5770                return Ok(http::Response::builder()
5771                    .status(200)
5772                    .body(AsyncBody::from(response))
5773                    .unwrap());
5774            }
5775            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
5776                let response = r#"
5777                    {
5778                        "schemaVersion": 2,
5779                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
5780                        "config": {
5781                            "mediaType": "application/vnd.devcontainers",
5782                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5783                            "size": 2
5784                        },
5785                        "layers": [
5786                            {
5787                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5788                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
5789                                "size": 20992,
5790                                "annotations": {
5791                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
5792                                }
5793                            }
5794                        ],
5795                        "annotations": {
5796                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5797                            "com.github.package.type": "devcontainer_feature"
5798                        }
5799                    }
5800                    "#;
5801
5802                return Ok(http::Response::builder()
5803                    .status(200)
5804                    .body(http_client::AsyncBody::from(response))
5805                    .unwrap());
5806            }
5807            if parts.uri.path()
5808                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
5809            {
5810                let response = build_tarball(vec![
5811                    ("./devcontainer-feature.json", r#"
5812                        {
5813                            "id": "go",
5814                            "version": "1.3.3",
5815                            "name": "Go",
5816                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
5817                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
5818                            "options": {
5819                                "version": {
5820                                    "type": "string",
5821                                    "proposals": [
5822                                        "latest",
5823                                        "none",
5824                                        "1.24",
5825                                        "1.23"
5826                                    ],
5827                                    "default": "latest",
5828                                    "description": "Select or enter a Go version to install"
5829                                },
5830                                "golangciLintVersion": {
5831                                    "type": "string",
5832                                    "default": "latest",
5833                                    "description": "Version of golangci-lint to install"
5834                                }
5835                            },
5836                            "init": true,
5837                            "customizations": {
5838                                "vscode": {
5839                                    "extensions": [
5840                                        "golang.Go"
5841                                    ],
5842                                    "settings": {
5843                                        "github.copilot.chat.codeGeneration.instructions": [
5844                                            {
5845                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
5846                                            }
5847                                        ]
5848                                    }
5849                                }
5850                            },
5851                            "containerEnv": {
5852                                "GOROOT": "/usr/local/go",
5853                                "GOPATH": "/go",
5854                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
5855                            },
5856                            "capAdd": [
5857                                "SYS_PTRACE"
5858                            ],
5859                            "securityOpt": [
5860                                "seccomp=unconfined"
5861                            ],
5862                            "installsAfter": [
5863                                "ghcr.io/devcontainers/features/common-utils"
5864                            ]
5865                        }
5866                        "#),
5867                    ("./install.sh", r#"
5868                    #!/usr/bin/env bash
5869                    #-------------------------------------------------------------------------------------------------------------
5870                    # Copyright (c) Microsoft Corporation. All rights reserved.
5871                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
5872                    #-------------------------------------------------------------------------------------------------------------
5873                    #
5874                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
5875                    # Maintainer: The VS Code and Codespaces Teams
5876
5877                    TARGET_GO_VERSION="${VERSION:-"latest"}"
5878                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
5879
5880                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
5881                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
5882                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5883                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
5884
5885                    # https://www.google.com/linuxrepositories/
5886                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
5887
5888                    set -e
5889
5890                    if [ "$(id -u)" -ne 0 ]; then
5891                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5892                        exit 1
5893                    fi
5894
5895                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
5896                    . /etc/os-release
5897                    # Get an adjusted ID independent of distro variants
5898                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
5899                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5900                        ADJUSTED_ID="debian"
5901                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
5902                        ADJUSTED_ID="rhel"
5903                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
5904                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
5905                        else
5906                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
5907                        fi
5908                    else
5909                        echo "Linux distro ${ID} not supported."
5910                        exit 1
5911                    fi
5912
5913                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
5914                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
5915                        # Update the repo files to reference vault.centos.org.
5916                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
5917                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
5918                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
5919                    fi
5920
5921                    # Setup INSTALL_CMD & PKG_MGR_CMD
5922                    if type apt-get > /dev/null 2>&1; then
5923                        PKG_MGR_CMD=apt-get
5924                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
5925                    elif type microdnf > /dev/null 2>&1; then
5926                        PKG_MGR_CMD=microdnf
5927                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5928                    elif type dnf > /dev/null 2>&1; then
5929                        PKG_MGR_CMD=dnf
5930                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5931                    else
5932                        PKG_MGR_CMD=yum
5933                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
5934                    fi
5935
5936                    # Clean up
5937                    clean_up() {
5938                        case ${ADJUSTED_ID} in
5939                            debian)
5940                                rm -rf /var/lib/apt/lists/*
5941                                ;;
5942                            rhel)
5943                                rm -rf /var/cache/dnf/* /var/cache/yum/*
5944                                rm -rf /tmp/yum.log
5945                                rm -rf ${GPG_INSTALL_PATH}
5946                                ;;
5947                        esac
5948                    }
5949                    clean_up
5950
5951
5952                    # Figure out correct version of a three part version number is not passed
5953                    find_version_from_git_tags() {
5954                        local variable_name=$1
5955                        local requested_version=${!variable_name}
5956                        if [ "${requested_version}" = "none" ]; then return; fi
5957                        local repository=$2
5958                        local prefix=${3:-"tags/v"}
5959                        local separator=${4:-"."}
5960                        local last_part_optional=${5:-"false"}
5961                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5962                            local escaped_separator=${separator//./\\.}
5963                            local last_part
5964                            if [ "${last_part_optional}" = "true" ]; then
5965                                last_part="(${escaped_separator}[0-9]+)?"
5966                            else
5967                                last_part="${escaped_separator}[0-9]+"
5968                            fi
5969                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5970                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5971                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5972                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5973                            else
5974                                set +e
5975                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5976                                set -e
5977                            fi
5978                        fi
5979                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5980                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5981                            exit 1
5982                        fi
5983                        echo "${variable_name}=${!variable_name}"
5984                    }
5985
5986                    pkg_mgr_update() {
5987                        case $ADJUSTED_ID in
5988                            debian)
5989                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5990                                    echo "Running apt-get update..."
5991                                    ${PKG_MGR_CMD} update -y
5992                                fi
5993                                ;;
5994                            rhel)
5995                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5996                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
5997                                        echo "Running ${PKG_MGR_CMD} makecache ..."
5998                                        ${PKG_MGR_CMD} makecache
5999                                    fi
6000                                else
6001                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6002                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6003                                        set +e
6004                                        ${PKG_MGR_CMD} check-update
6005                                        rc=$?
6006                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6007                                            exit 1
6008                                        fi
6009                                        set -e
6010                                    fi
6011                                fi
6012                                ;;
6013                        esac
6014                    }
6015
6016                    # Checks if packages are installed and installs them if not
6017                    check_packages() {
6018                        case ${ADJUSTED_ID} in
6019                            debian)
6020                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6021                                    pkg_mgr_update
6022                                    ${INSTALL_CMD} "$@"
6023                                fi
6024                                ;;
6025                            rhel)
6026                                if ! rpm -q "$@" > /dev/null 2>&1; then
6027                                    pkg_mgr_update
6028                                    ${INSTALL_CMD} "$@"
6029                                fi
6030                                ;;
6031                        esac
6032                    }
6033
6034                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6035                    rm -f /etc/profile.d/00-restore-env.sh
6036                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6037                    chmod +x /etc/profile.d/00-restore-env.sh
6038
6039                    # Some distributions do not install awk by default (e.g. Mariner)
6040                    if ! type awk >/dev/null 2>&1; then
6041                        check_packages awk
6042                    fi
6043
6044                    # Determine the appropriate non-root user
6045                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6046                        USERNAME=""
6047                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6048                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6049                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6050                                USERNAME=${CURRENT_USER}
6051                                break
6052                            fi
6053                        done
6054                        if [ "${USERNAME}" = "" ]; then
6055                            USERNAME=root
6056                        fi
6057                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6058                        USERNAME=root
6059                    fi
6060
6061                    export DEBIAN_FRONTEND=noninteractive
6062
6063                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
6064
6065                    if [ $ADJUSTED_ID = "debian" ]; then
6066                        check_packages g++ libc6-dev
6067                    else
6068                        check_packages gcc-c++ glibc-devel
6069                    fi
6070                    # Install curl, git, other dependencies if missing
6071                    if ! type curl > /dev/null 2>&1; then
6072                        check_packages curl
6073                    fi
6074                    if ! type git > /dev/null 2>&1; then
6075                        check_packages git
6076                    fi
6077                    # Some systems, e.g. Mariner, still a few more packages
6078                    if ! type as > /dev/null 2>&1; then
6079                        check_packages binutils
6080                    fi
6081                    if ! [ -f /usr/include/linux/errno.h ]; then
6082                        check_packages kernel-headers
6083                    fi
6084                    # Minimal RHEL install may need findutils installed
6085                    if ! [ -f /usr/bin/find ]; then
6086                        check_packages findutils
6087                    fi
6088
6089                    # Get closest match for version number specified
6090                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6091
6092                    architecture="$(uname -m)"
6093                    case $architecture in
6094                        x86_64) architecture="amd64";;
6095                        aarch64 | armv8*) architecture="arm64";;
6096                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
6097                        i?86) architecture="386";;
6098                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6099                    esac
6100
6101                    # Install Go
6102                    umask 0002
6103                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6104                        groupadd -r golang
6105                    fi
6106                    usermod -a -G golang "${USERNAME}"
6107                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6108
6109                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6110                        # Use a temporary location for gpg keys to avoid polluting image
6111                        export GNUPGHOME="/tmp/tmp-gnupg"
6112                        mkdir -p ${GNUPGHOME}
6113                        chmod 700 ${GNUPGHOME}
6114                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6115                        gpg -q --import /tmp/tmp-gnupg/golang_key
6116                        echo "Downloading Go ${TARGET_GO_VERSION}..."
6117                        set +e
6118                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6119                        exit_code=$?
6120                        set -e
6121                        if [ "$exit_code" != "0" ]; then
6122                            echo "(!) Download failed."
6123                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6124                            set +e
6125                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6126                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6127                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6128                            # Handle Go's odd version pattern where "0" releases omit the last part
6129                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6130                                ((minor=minor-1))
6131                                TARGET_GO_VERSION="${major}.${minor}"
6132                                # Look for latest version from previous minor release
6133                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6134                            else
6135                                ((breakfix=breakfix-1))
6136                                if [ "${breakfix}" = "0" ]; then
6137                                    TARGET_GO_VERSION="${major}.${minor}"
6138                                else
6139                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6140                                fi
6141                            fi
6142                            set -e
6143                            echo "Trying ${TARGET_GO_VERSION}..."
6144                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6145                        fi
6146                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6147                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6148                        echo "Extracting Go ${TARGET_GO_VERSION}..."
6149                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6150                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6151                    else
6152                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6153                    fi
6154
6155                    # Install Go tools that are isImportant && !replacedByGopls based on
6156                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6157                    GO_TOOLS="\
6158                        golang.org/x/tools/gopls@latest \
6159                        honnef.co/go/tools/cmd/staticcheck@latest \
6160                        golang.org/x/lint/golint@latest \
6161                        github.com/mgechev/revive@latest \
6162                        github.com/go-delve/delve/cmd/dlv@latest \
6163                        github.com/fatih/gomodifytags@latest \
6164                        github.com/haya14busa/goplay/cmd/goplay@latest \
6165                        github.com/cweill/gotests/gotests@latest \
6166                        github.com/josharian/impl@latest"
6167
6168                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6169                        echo "Installing common Go tools..."
6170                        export PATH=${TARGET_GOROOT}/bin:${PATH}
6171                        export GOPATH=/tmp/gotools
6172                        export GOCACHE="${GOPATH}/cache"
6173
6174                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6175                        cd "${GOPATH}"
6176
6177                        # Use go get for versions of go under 1.16
6178                        go_install_command=install
6179                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6180                            export GO111MODULE=on
6181                            go_install_command=get
6182                            echo "Go version < 1.16, using go get."
6183                        fi
6184
6185                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6186
6187                        # Move Go tools into path
6188                        if [ -d "${GOPATH}/bin" ]; then
6189                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6190                        fi
6191
6192                        # Install golangci-lint from precompiled binaries
6193                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6194                            echo "Installing golangci-lint latest..."
6195                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6196                                sh -s -- -b "${TARGET_GOPATH}/bin"
6197                        else
6198                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6199                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6200                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6201                        fi
6202
6203                        # Remove Go tools temp directory
6204                        rm -rf "${GOPATH}"
6205                    fi
6206
6207
6208                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6209                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6210                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6211                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6212
6213                    # Clean up
6214                    clean_up
6215
6216                    echo "Done!"
6217                        "#),
6218                ])
6219                .await;
6220                return Ok(http::Response::builder()
6221                    .status(200)
6222                    .body(AsyncBody::from(response))
6223                    .unwrap());
6224            }
6225            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6226                let response = r#"
6227                    {
6228                        "schemaVersion": 2,
6229                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6230                        "config": {
6231                            "mediaType": "application/vnd.devcontainers",
6232                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6233                            "size": 2
6234                        },
6235                        "layers": [
6236                            {
6237                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6238                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6239                                "size": 19968,
6240                                "annotations": {
6241                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6242                                }
6243                            }
6244                        ],
6245                        "annotations": {
6246                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6247                            "com.github.package.type": "devcontainer_feature"
6248                        }
6249                    }"#;
6250                return Ok(http::Response::builder()
6251                    .status(200)
6252                    .body(AsyncBody::from(response))
6253                    .unwrap());
6254            }
6255            if parts.uri.path()
6256                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6257            {
6258                let response = build_tarball(vec![
6259                    (
6260                        "./devcontainer-feature.json",
6261                        r#"
6262{
6263    "id": "aws-cli",
6264    "version": "1.1.3",
6265    "name": "AWS CLI",
6266    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6267    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6268    "options": {
6269        "version": {
6270            "type": "string",
6271            "proposals": [
6272                "latest"
6273            ],
6274            "default": "latest",
6275            "description": "Select or enter an AWS CLI version."
6276        },
6277        "verbose": {
6278            "type": "boolean",
6279            "default": true,
6280            "description": "Suppress verbose output."
6281        }
6282    },
6283    "customizations": {
6284        "vscode": {
6285            "extensions": [
6286                "AmazonWebServices.aws-toolkit-vscode"
6287            ],
6288            "settings": {
6289                "github.copilot.chat.codeGeneration.instructions": [
6290                    {
6291                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6292                    }
6293                ]
6294            }
6295        }
6296    },
6297    "installsAfter": [
6298        "ghcr.io/devcontainers/features/common-utils"
6299    ]
6300}
6301                    "#,
6302                    ),
6303                    (
6304                        "./install.sh",
6305                        r#"#!/usr/bin/env bash
6306                    #-------------------------------------------------------------------------------------------------------------
6307                    # Copyright (c) Microsoft Corporation. All rights reserved.
6308                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6309                    #-------------------------------------------------------------------------------------------------------------
6310                    #
6311                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6312                    # Maintainer: The VS Code and Codespaces Teams
6313
6314                    set -e
6315
6316                    # Clean up
6317                    rm -rf /var/lib/apt/lists/*
6318
6319                    VERSION=${VERSION:-"latest"}
6320                    VERBOSE=${VERBOSE:-"true"}
6321
6322                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6323                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6324
6325                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6326                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6327                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6328                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6329                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6330                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6331                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6332                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6333                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6334                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6335                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6336                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6337                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6338                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6339                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6340                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6341                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6342                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6343                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6344                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6345                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6346                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6347                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6348                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6349                    YLZATHZKTJyiqA==
6350                    =vYOk
6351                    -----END PGP PUBLIC KEY BLOCK-----"
6352
6353                    if [ "$(id -u)" -ne 0 ]; then
6354                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6355                        exit 1
6356                    fi
6357
6358                    apt_get_update()
6359                    {
6360                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6361                            echo "Running apt-get update..."
6362                            apt-get update -y
6363                        fi
6364                    }
6365
6366                    # Checks if packages are installed and installs them if not
6367                    check_packages() {
6368                        if ! dpkg -s "$@" > /dev/null 2>&1; then
6369                            apt_get_update
6370                            apt-get -y install --no-install-recommends "$@"
6371                        fi
6372                    }
6373
6374                    export DEBIAN_FRONTEND=noninteractive
6375
6376                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6377
6378                    verify_aws_cli_gpg_signature() {
6379                        local filePath=$1
6380                        local sigFilePath=$2
6381                        local awsGpgKeyring=aws-cli-public-key.gpg
6382
6383                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6384                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6385                        local status=$?
6386
6387                        rm "./${awsGpgKeyring}"
6388
6389                        return ${status}
6390                    }
6391
6392                    install() {
6393                        local scriptZipFile=awscli.zip
6394                        local scriptSigFile=awscli.sig
6395
6396                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6397                        if [ "${VERSION}" != "latest" ]; then
6398                            local versionStr=-${VERSION}
6399                        fi
6400                        architecture=$(dpkg --print-architecture)
6401                        case "${architecture}" in
6402                            amd64) architectureStr=x86_64 ;;
6403                            arm64) architectureStr=aarch64 ;;
6404                            *)
6405                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6406                                exit 1
6407                        esac
6408                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6409                        curl "${scriptUrl}" -o "${scriptZipFile}"
6410                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6411
6412                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6413                        if (( $? > 0 )); then
6414                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6415                            exit 1
6416                        fi
6417
6418                        if [ "${VERBOSE}" = "false" ]; then
6419                            unzip -q "${scriptZipFile}"
6420                        else
6421                            unzip "${scriptZipFile}"
6422                        fi
6423
6424                        ./aws/install
6425
6426                        # kubectl bash completion
6427                        mkdir -p /etc/bash_completion.d
6428                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6429
6430                        # kubectl zsh completion
6431                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6432                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6433                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6434                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6435                        fi
6436
6437                        rm -rf ./aws
6438                    }
6439
6440                    echo "(*) Installing AWS CLI..."
6441
6442                    install
6443
6444                    # Clean up
6445                    rm -rf /var/lib/apt/lists/*
6446
6447                    echo "Done!""#,
6448                    ),
6449                    ("./scripts/", r#""#),
6450                    (
6451                        "./scripts/fetch-latest-completer-scripts.sh",
6452                        r#"
6453                        #!/bin/bash
6454                        #-------------------------------------------------------------------------------------------------------------
6455                        # Copyright (c) Microsoft Corporation. All rights reserved.
6456                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6457                        #-------------------------------------------------------------------------------------------------------------
6458                        #
6459                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6460                        # Maintainer: The Dev Container spec maintainers
6461                        #
6462                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6463                        #
6464                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6465                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6466                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6467
6468                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6469                        chmod +x "$BASH_COMPLETER_SCRIPT"
6470
6471                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6472                        chmod +x "$ZSH_COMPLETER_SCRIPT"
6473                        "#,
6474                    ),
6475                    ("./scripts/vendor/", r#""#),
6476                    (
6477                        "./scripts/vendor/aws_bash_completer",
6478                        r#"
6479                        # Typically that would be added under one of the following paths:
6480                        # - /etc/bash_completion.d
6481                        # - /usr/local/etc/bash_completion.d
6482                        # - /usr/share/bash-completion/completions
6483
6484                        complete -C aws_completer aws
6485                        "#,
6486                    ),
6487                    (
6488                        "./scripts/vendor/aws_zsh_completer.sh",
6489                        r#"
6490                        # Source this file to activate auto completion for zsh using the bash
6491                        # compatibility helper.  Make sure to run `compinit` before, which should be
6492                        # given usually.
6493                        #
6494                        # % source /path/to/zsh_complete.sh
6495                        #
6496                        # Typically that would be called somewhere in your .zshrc.
6497                        #
6498                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6499                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6500                        #
6501                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6502                        #
6503                        # zsh releases prior to that version do not export the required env variables!
6504
6505                        autoload -Uz bashcompinit
6506                        bashcompinit -i
6507
6508                        _bash_complete() {
6509                          local ret=1
6510                          local -a suf matches
6511                          local -x COMP_POINT COMP_CWORD
6512                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6513                          local -x COMP_LINE="$words"
6514                          local -A savejobstates savejobtexts
6515
6516                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6517                          (( COMP_CWORD = CURRENT - 1))
6518                          COMP_WORDS=( $words )
6519                          BASH_VERSINFO=( 2 05b 0 1 release )
6520
6521                          savejobstates=( ${(kv)jobstates} )
6522                          savejobtexts=( ${(kv)jobtexts} )
6523
6524                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6525
6526                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6527
6528                          if [[ -n $matches ]]; then
6529                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6530                              compset -P '*/' && matches=( ${matches##*/} )
6531                              compset -S '/*' && matches=( ${matches%%/*} )
6532                              compadd -Q -f "${suf[@]}" -a matches && ret=0
6533                            else
6534                              compadd -Q "${suf[@]}" -a matches && ret=0
6535                            fi
6536                          fi
6537
6538                          if (( ret )); then
6539                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6540                              _default "${suf[@]}" && ret=0
6541                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6542                              _directories "${suf[@]}" && ret=0
6543                            fi
6544                          fi
6545
6546                          return ret
6547                        }
6548
6549                        complete -C aws_completer aws
6550                        "#,
6551                    ),
6552                ]).await;
6553
6554                return Ok(http::Response::builder()
6555                    .status(200)
6556                    .body(AsyncBody::from(response))
6557                    .unwrap());
6558            }
6559
6560            Ok(http::Response::builder()
6561                .status(404)
6562                .body(http_client::AsyncBody::default())
6563                .unwrap())
6564        })
6565    }
6566}