devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use regex::Regex;
  10
  11use fs::Fs;
  12use http_client::HttpClient;
  13use util::{ResultExt, command::Command};
  14
  15use crate::{
  16    DevContainerConfig, DevContainerContext,
  17    command_json::{CommandRunner, DefaultCommandRunner},
  18    devcontainer_api::{DevContainerError, DevContainerUp},
  19    devcontainer_json::{
  20        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
  21        deserialize_devcontainer_json,
  22    },
  23    docker::{
  24        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  25        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  26        get_remote_dir_from_config,
  27    },
  28    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  29    get_oci_token,
  30    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  31    safe_id_lower,
  32};
  33
  34enum ConfigStatus {
  35    Deserialized(DevContainer),
  36    VariableParsed(DevContainer),
  37}
  38
  39#[derive(Debug, Clone, Eq, PartialEq, Default)]
  40pub(crate) struct DockerComposeResources {
  41    files: Vec<PathBuf>,
  42    config: DockerComposeConfig,
  43}
  44
  45struct DevContainerManifest {
  46    http_client: Arc<dyn HttpClient>,
  47    fs: Arc<dyn Fs>,
  48    docker_client: Arc<dyn DockerClient>,
  49    command_runner: Arc<dyn CommandRunner>,
  50    raw_config: String,
  51    config: ConfigStatus,
  52    local_environment: HashMap<String, String>,
  53    local_project_directory: PathBuf,
  54    config_directory: PathBuf,
  55    file_name: String,
  56    root_image: Option<DockerInspect>,
  57    features_build_info: Option<FeaturesBuildInfo>,
  58    features: Vec<FeatureManifest>,
  59}
  60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
  61impl DevContainerManifest {
  62    async fn new(
  63        context: &DevContainerContext,
  64        environment: HashMap<String, String>,
  65        docker_client: Arc<dyn DockerClient>,
  66        command_runner: Arc<dyn CommandRunner>,
  67        local_config: DevContainerConfig,
  68        local_project_path: &Path,
  69    ) -> Result<Self, DevContainerError> {
  70        let config_path = local_project_path.join(local_config.config_path.clone());
  71        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  72        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  73            log::error!("Unable to read devcontainer contents: {e}");
  74            DevContainerError::DevContainerParseFailed
  75        })?;
  76
  77        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  78
  79        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  80            log::error!("Dev container file should be in a directory");
  81            DevContainerError::NotInValidProject
  82        })?;
  83        let file_name = config_path
  84            .file_name()
  85            .and_then(|f| f.to_str())
  86            .ok_or_else(|| {
  87                log::error!("Dev container file has no file name, or is invalid unicode");
  88                DevContainerError::DevContainerParseFailed
  89            })?;
  90
  91        Ok(Self {
  92            fs: context.fs.clone(),
  93            http_client: context.http_client.clone(),
  94            docker_client,
  95            command_runner,
  96            raw_config: devcontainer_contents,
  97            config: ConfigStatus::Deserialized(devcontainer),
  98            local_project_directory: local_project_path.to_path_buf(),
  99            local_environment: environment,
 100            config_directory: devcontainer_directory.to_path_buf(),
 101            file_name: file_name.to_string(),
 102            root_image: None,
 103            features_build_info: None,
 104            features: Vec::new(),
 105        })
 106    }
 107
 108    fn devcontainer_id(&self) -> String {
 109        let mut labels = self.identifying_labels();
 110        labels.sort_by_key(|(key, _)| *key);
 111
 112        let mut hasher = DefaultHasher::new();
 113        for (key, value) in &labels {
 114            key.hash(&mut hasher);
 115            value.hash(&mut hasher);
 116        }
 117
 118        format!("{:016x}", hasher.finish())
 119    }
 120
 121    fn identifying_labels(&self) -> Vec<(&str, String)> {
 122        let labels = vec![
 123            (
 124                "devcontainer.local_folder",
 125                (self.local_project_directory.display()).to_string(),
 126            ),
 127            (
 128                "devcontainer.config_file",
 129                (self.config_file().display()).to_string(),
 130            ),
 131        ];
 132        labels
 133    }
 134
 135    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
 136        let mut replaced_content = content
 137            .replace("${devcontainerId}", &self.devcontainer_id())
 138            .replace(
 139                "${containerWorkspaceFolderBasename}",
 140                &self.remote_workspace_base_name().unwrap_or_default(),
 141            )
 142            .replace(
 143                "${localWorkspaceFolderBasename}",
 144                &self.local_workspace_base_name()?,
 145            )
 146            .replace(
 147                "${containerWorkspaceFolder}",
 148                &self
 149                    .remote_workspace_folder()
 150                    .map(|path| path.display().to_string())
 151                    .unwrap_or_default()
 152                    .replace('\\', "/"),
 153            )
 154            .replace(
 155                "${localWorkspaceFolder}",
 156                &self.local_workspace_folder().replace('\\', "/"),
 157            );
 158        for (k, v) in &self.local_environment {
 159            let find = format!("${{localEnv:{k}}}");
 160            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
 161        }
 162
 163        Ok(replaced_content)
 164    }
 165
 166    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 167        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 168        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
 169
 170        self.config = ConfigStatus::VariableParsed(parsed_config);
 171
 172        Ok(())
 173    }
 174
 175    fn runtime_remote_env(
 176        &self,
 177        container_env: &HashMap<String, String>,
 178    ) -> Result<HashMap<String, String>, DevContainerError> {
 179        let mut merged_remote_env = container_env.clone();
 180        // HOME is user-specific, and we will often not run as the image user
 181        merged_remote_env.remove("HOME");
 182        if let Some(remote_env) = self.dev_container().remote_env.clone() {
 183            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
 184                log::error!(
 185                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
 186                    remote_env
 187                );
 188                DevContainerError::DevContainerParseFailed
 189            })?;
 190            for (k, v) in container_env {
 191                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
 192            }
 193            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
 194                .map_err(|e| {
 195                    log::error!(
 196                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
 197                        &raw
 198                    );
 199                    DevContainerError::DevContainerParseFailed
 200                })?;
 201            for (k, v) in reserialized {
 202                merged_remote_env.insert(k, v);
 203            }
 204        }
 205        Ok(merged_remote_env)
 206    }
 207
 208    fn config_file(&self) -> PathBuf {
 209        self.config_directory.join(&self.file_name)
 210    }
 211
 212    fn dev_container(&self) -> &DevContainer {
 213        match &self.config {
 214            ConfigStatus::Deserialized(dev_container) => dev_container,
 215            ConfigStatus::VariableParsed(dev_container) => dev_container,
 216        }
 217    }
 218
 219    async fn dockerfile_location(&self) -> Option<PathBuf> {
 220        let dev_container = self.dev_container();
 221        match dev_container.build_type() {
 222            DevContainerBuildType::Image(_) => None,
 223            DevContainerBuildType::Dockerfile(build) => {
 224                Some(self.config_directory.join(&build.dockerfile))
 225            }
 226            DevContainerBuildType::DockerCompose => {
 227                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 228                    return None;
 229                };
 230                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 231                else {
 232                    return None;
 233                };
 234                main_service
 235                    .build
 236                    .and_then(|b| b.dockerfile)
 237                    .map(|dockerfile| self.config_directory.join(dockerfile))
 238            }
 239            DevContainerBuildType::None => None,
 240        }
 241    }
 242
 243    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 244        let mut hasher = DefaultHasher::new();
 245        let prefix = match &self.dev_container().name {
 246            Some(name) => &safe_id_lower(name),
 247            None => "zed-dc",
 248        };
 249        let prefix = prefix.get(..6).unwrap_or(prefix);
 250
 251        dockerfile_build_path.hash(&mut hasher);
 252
 253        let hash = hasher.finish();
 254        format!("{}-{:x}-features", prefix, hash)
 255    }
 256
 257    /// Gets the base image from the devcontainer with the following precedence:
 258    /// - The devcontainer image if an image is specified
 259    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 260    /// - The image sourced in the docker-compose main service, if one is specified
 261    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 262    /// If no such image is available, return an error
 263    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 264        match self.dev_container().build_type() {
 265            DevContainerBuildType::Image(image) => {
 266                return Ok(image);
 267            }
 268            DevContainerBuildType::Dockerfile(build) => {
 269                let dockerfile_contents = self.expanded_dockerfile_content().await?;
 270                return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
 271                    || {
 272                        log::error!("Unable to find base image in Dockerfile");
 273                        DevContainerError::DevContainerParseFailed
 274                    },
 275                );
 276            }
 277            DevContainerBuildType::DockerCompose => {
 278                let docker_compose_manifest = self.docker_compose_manifest().await?;
 279                let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 280
 281                if let Some(_) = main_service
 282                    .build
 283                    .as_ref()
 284                    .and_then(|b| b.dockerfile.as_ref())
 285                {
 286                    let dockerfile_contents = self.expanded_dockerfile_content().await?;
 287                    return image_from_dockerfile(
 288                        dockerfile_contents,
 289                        &main_service.build.as_ref().and_then(|b| b.target.clone()),
 290                    )
 291                    .ok_or_else(|| {
 292                        log::error!("Unable to find base image in Dockerfile");
 293                        DevContainerError::DevContainerParseFailed
 294                    });
 295                }
 296                if let Some(image) = &main_service.image {
 297                    return Ok(image.to_string());
 298                }
 299
 300                log::error!("No valid base image found in docker-compose configuration");
 301                return Err(DevContainerError::DevContainerParseFailed);
 302            }
 303            DevContainerBuildType::None => {
 304                log::error!("Not a valid devcontainer config for build");
 305                return Err(DevContainerError::NotInValidProject);
 306            }
 307        }
 308    }
 309
 310    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 311        let dev_container = match &self.config {
 312            ConfigStatus::Deserialized(_) => {
 313                log::error!(
 314                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 315                );
 316                return Err(DevContainerError::DevContainerParseFailed);
 317            }
 318            ConfigStatus::VariableParsed(dev_container) => dev_container,
 319        };
 320        let root_image_tag = self.get_base_image_from_config().await?;
 321        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 322
 323        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 324        let timestamp = std::time::SystemTime::now()
 325            .duration_since(std::time::UNIX_EPOCH)
 326            .map(|d| d.as_millis())
 327            .unwrap_or(0);
 328
 329        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 330        let empty_context_dir = temp_base.join("empty-folder");
 331
 332        self.fs
 333            .create_dir(&features_content_dir)
 334            .await
 335            .map_err(|e| {
 336                log::error!("Failed to create features content dir: {e}");
 337                DevContainerError::FilesystemError
 338            })?;
 339
 340        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 341            log::error!("Failed to create empty context dir: {e}");
 342            DevContainerError::FilesystemError
 343        })?;
 344
 345        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 346        let image_tag =
 347            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 348
 349        let build_info = FeaturesBuildInfo {
 350            dockerfile_path,
 351            features_content_dir,
 352            empty_context_dir,
 353            build_image: dev_container.image.clone(),
 354            image_tag,
 355        };
 356
 357        let features = match &dev_container.features {
 358            Some(features) => features,
 359            None => &HashMap::new(),
 360        };
 361
 362        let container_user = get_container_user_from_config(&root_image, self)?;
 363        let remote_user = get_remote_user_from_config(&root_image, self)?;
 364
 365        let builtin_env_content = format!(
 366            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 367            container_user, remote_user
 368        );
 369
 370        let builtin_env_path = build_info
 371            .features_content_dir
 372            .join("devcontainer-features.builtin.env");
 373
 374        self.fs
 375            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 376            .await
 377            .map_err(|e| {
 378                log::error!("Failed to write builtin env file: {e}");
 379                DevContainerError::FilesystemError
 380            })?;
 381
 382        let ordered_features =
 383            resolve_feature_order(features, &dev_container.override_feature_install_order);
 384
 385        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 386            if matches!(options, FeatureOptions::Bool(false)) {
 387                log::debug!(
 388                    "Feature '{}' is disabled (set to false), skipping",
 389                    feature_ref
 390                );
 391                continue;
 392            }
 393
 394            let feature_id = extract_feature_id(feature_ref);
 395            let consecutive_id = format!("{}_{}", feature_id, index);
 396            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 397
 398            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 399                log::error!(
 400                    "Failed to create feature directory for {}: {e}",
 401                    feature_ref
 402                );
 403                DevContainerError::FilesystemError
 404            })?;
 405
 406            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 407                log::error!(
 408                    "Feature '{}' is not a supported OCI feature reference",
 409                    feature_ref
 410                );
 411                DevContainerError::DevContainerParseFailed
 412            })?;
 413            let TokenResponse { token } =
 414                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 415                    .await
 416                    .map_err(|e| {
 417                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 418                        DevContainerError::ResourceFetchFailed
 419                    })?;
 420            let manifest = get_oci_manifest(
 421                &oci_ref.registry,
 422                &oci_ref.path,
 423                &token,
 424                &self.http_client,
 425                &oci_ref.version,
 426                None,
 427            )
 428            .await
 429            .map_err(|e| {
 430                log::error!(
 431                    "Failed to fetch OCI manifest for feature '{}': {e}",
 432                    feature_ref
 433                );
 434                DevContainerError::ResourceFetchFailed
 435            })?;
 436            let digest = &manifest
 437                .layers
 438                .first()
 439                .ok_or_else(|| {
 440                    log::error!(
 441                        "OCI manifest for feature '{}' contains no layers",
 442                        feature_ref
 443                    );
 444                    DevContainerError::ResourceFetchFailed
 445                })?
 446                .digest;
 447            download_oci_tarball(
 448                &token,
 449                &oci_ref.registry,
 450                &oci_ref.path,
 451                digest,
 452                "application/vnd.devcontainers.layer.v1+tar",
 453                &feature_dir,
 454                &self.http_client,
 455                &self.fs,
 456                None,
 457            )
 458            .await?;
 459
 460            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 461            if !self.fs.is_file(feature_json_path).await {
 462                let message = format!(
 463                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 464                    feature_json_path
 465                );
 466                log::error!("{}", &message);
 467                return Err(DevContainerError::ResourceFetchFailed);
 468            }
 469
 470            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 471                log::error!("error reading devcontainer-feature.json: {:?}", e);
 472                DevContainerError::FilesystemError
 473            })?;
 474
 475            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 476
 477            let feature_json: DevContainerFeatureJson =
 478                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
 479                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 480                    DevContainerError::ResourceFetchFailed
 481                })?;
 482
 483            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 484
 485            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 486
 487            let env_content = feature_manifest
 488                .write_feature_env(&self.fs, options)
 489                .await?;
 490
 491            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 492
 493            self.fs
 494                .write(
 495                    &feature_manifest
 496                        .file_path()
 497                        .join("devcontainer-features-install.sh"),
 498                    &wrapper_content.as_bytes(),
 499                )
 500                .await
 501                .map_err(|e| {
 502                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 503                    DevContainerError::FilesystemError
 504                })?;
 505
 506            self.features.push(feature_manifest);
 507        }
 508
 509        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 510
 511        let is_compose = match dev_container.build_type() {
 512            DevContainerBuildType::DockerCompose => true,
 513            _ => false,
 514        };
 515        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 516
 517        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 518            self.fs.load(location).await.log_err()
 519        } else {
 520            None
 521        };
 522
 523        let build_target = if is_compose {
 524            find_primary_service(&self.docker_compose_manifest().await?, self)?
 525                .1
 526                .build
 527                .and_then(|b| b.target)
 528        } else {
 529            dev_container.build.as_ref().and_then(|b| b.target.clone())
 530        };
 531
 532        let dockerfile_content = dockerfile_base_content
 533            .map(|content| {
 534                dockerfile_inject_alias(
 535                    &content,
 536                    "dev_container_auto_added_stage_label",
 537                    build_target,
 538                )
 539            })
 540            .unwrap_or_default();
 541
 542        let dockerfile_content = self.generate_dockerfile_extended(
 543            &container_user,
 544            &remote_user,
 545            dockerfile_content,
 546            use_buildkit,
 547        );
 548
 549        self.fs
 550            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 551            .await
 552            .map_err(|e| {
 553                log::error!("Failed to write Dockerfile.extended: {e}");
 554                DevContainerError::FilesystemError
 555            })?;
 556
 557        log::debug!(
 558            "Features build resources written to {:?}",
 559            build_info.features_content_dir
 560        );
 561
 562        self.root_image = Some(root_image);
 563        self.features_build_info = Some(build_info);
 564
 565        Ok(())
 566    }
 567
 568    fn generate_dockerfile_extended(
 569        &self,
 570        container_user: &str,
 571        remote_user: &str,
 572        dockerfile_content: String,
 573        use_buildkit: bool,
 574    ) -> String {
 575        #[cfg(not(target_os = "windows"))]
 576        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 577        #[cfg(target_os = "windows")]
 578        let update_remote_user_uid = false;
 579        let feature_layers: String = self
 580            .features
 581            .iter()
 582            .map(|manifest| {
 583                manifest.generate_dockerfile_feature_layer(
 584                    use_buildkit,
 585                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 586                )
 587            })
 588            .collect();
 589
 590        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 591        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 592
 593        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 594
 595        let feature_content_source_stage = if use_buildkit {
 596            "".to_string()
 597        } else {
 598            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 599                .to_string()
 600        };
 601
 602        let builtin_env_source_path = if use_buildkit {
 603            "./devcontainer-features.builtin.env"
 604        } else {
 605            "/tmp/build-features/devcontainer-features.builtin.env"
 606        };
 607
 608        let mut extended_dockerfile = format!(
 609            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 610
 611{dockerfile_content}
 612{feature_content_source_stage}
 613FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 614USER root
 615COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 616RUN chmod -R 0755 /tmp/build-features/
 617
 618FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 619
 620USER root
 621
 622RUN mkdir -p {dest}
 623COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 624
 625RUN \
 626echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 627echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 628
 629{feature_layers}
 630
 631ARG _DEV_CONTAINERS_IMAGE_USER=root
 632USER $_DEV_CONTAINERS_IMAGE_USER
 633"#
 634        );
 635
 636        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 637        if !update_remote_user_uid {
 638            extended_dockerfile = format!(
 639                r#"{extended_dockerfile}
 640# Ensure that /etc/profile does not clobber the existing path
 641RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 642"#
 643            );
 644
 645            for feature in &self.features {
 646                let container_env_layer = feature.generate_dockerfile_env();
 647                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 648            }
 649
 650            if let Some(env) = &self.dev_container().container_env {
 651                for (key, value) in env {
 652                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 653                }
 654            }
 655        }
 656
 657        extended_dockerfile
 658    }
 659
 660    fn build_merged_resources(
 661        &self,
 662        base_image: DockerInspect,
 663    ) -> Result<DockerBuildResources, DevContainerError> {
 664        let dev_container = match &self.config {
 665            ConfigStatus::Deserialized(_) => {
 666                log::error!(
 667                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 668                );
 669                return Err(DevContainerError::DevContainerParseFailed);
 670            }
 671            ConfigStatus::VariableParsed(dev_container) => dev_container,
 672        };
 673        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 674
 675        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 676
 677        mounts.append(&mut feature_mounts);
 678
 679        let privileged = dev_container.privileged.unwrap_or(false)
 680            || self.features.iter().any(|f| f.privileged());
 681
 682        let mut entrypoint_script_lines = vec![
 683            "echo Container started".to_string(),
 684            "trap \"exit 0\" 15".to_string(),
 685        ];
 686
 687        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 688            entrypoint_script_lines.push(entrypoint.clone());
 689        }
 690        entrypoint_script_lines.append(&mut vec![
 691            "exec \"$@\"".to_string(),
 692            "while sleep 1 & wait $!; do :; done".to_string(),
 693        ]);
 694
 695        Ok(DockerBuildResources {
 696            image: base_image,
 697            additional_mounts: mounts,
 698            privileged,
 699            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 700        })
 701    }
 702
 703    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 704        if let ConfigStatus::Deserialized(_) = &self.config {
 705            log::error!(
 706                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 707            );
 708            return Err(DevContainerError::DevContainerParseFailed);
 709        }
 710        let dev_container = self.dev_container();
 711        match dev_container.build_type() {
 712            DevContainerBuildType::Image(base_image) => {
 713                let built_docker_image = self.build_docker_image().await?;
 714
 715                let built_docker_image = self
 716                    .update_remote_user_uid(built_docker_image, &base_image)
 717                    .await?;
 718
 719                let resources = self.build_merged_resources(built_docker_image)?;
 720                Ok(DevContainerBuildResources::Docker(resources))
 721            }
 722            DevContainerBuildType::Dockerfile(_) => {
 723                let built_docker_image = self.build_docker_image().await?;
 724                let Some(features_build_info) = &self.features_build_info else {
 725                    log::error!(
 726                        "Can't attempt to build update UID dockerfile before initial docker build"
 727                    );
 728                    return Err(DevContainerError::DevContainerParseFailed);
 729                };
 730                let built_docker_image = self
 731                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 732                    .await?;
 733
 734                let resources = self.build_merged_resources(built_docker_image)?;
 735                Ok(DevContainerBuildResources::Docker(resources))
 736            }
 737            DevContainerBuildType::DockerCompose => {
 738                log::debug!("Using docker compose. Building extended compose files");
 739                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 740
 741                return Ok(DevContainerBuildResources::DockerCompose(
 742                    docker_compose_resources,
 743                ));
 744            }
 745            DevContainerBuildType::None => {
 746                return Err(DevContainerError::DevContainerParseFailed);
 747            }
 748        }
 749    }
 750
 751    async fn run_dev_container(
 752        &self,
 753        build_resources: DevContainerBuildResources,
 754    ) -> Result<DevContainerUp, DevContainerError> {
 755        let ConfigStatus::VariableParsed(_) = &self.config else {
 756            log::error!(
 757                "Variables have not been parsed; cannot proceed with running the dev container"
 758            );
 759            return Err(DevContainerError::DevContainerParseFailed);
 760        };
 761        let running_container = match build_resources {
 762            DevContainerBuildResources::DockerCompose(resources) => {
 763                self.run_docker_compose(resources).await?
 764            }
 765            DevContainerBuildResources::Docker(resources) => {
 766                self.run_docker_image(resources).await?
 767            }
 768        };
 769
 770        let remote_user = get_remote_user_from_config(&running_container, self)?;
 771        let remote_workspace_folder = get_remote_dir_from_config(
 772            &running_container,
 773            (&self.local_project_directory.display()).to_string(),
 774        )?;
 775
 776        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 777
 778        Ok(DevContainerUp {
 779            container_id: running_container.id,
 780            remote_user,
 781            remote_workspace_folder,
 782            extension_ids: self.extension_ids(),
 783            remote_env,
 784        })
 785    }
 786
 787    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 788        let dev_container = match &self.config {
 789            ConfigStatus::Deserialized(_) => {
 790                log::error!(
 791                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 792                );
 793                return Err(DevContainerError::DevContainerParseFailed);
 794            }
 795            ConfigStatus::VariableParsed(dev_container) => dev_container,
 796        };
 797        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 798            return Err(DevContainerError::DevContainerParseFailed);
 799        };
 800        let docker_compose_full_paths = docker_compose_files
 801            .iter()
 802            .map(|relative| self.config_directory.join(relative))
 803            .collect::<Vec<PathBuf>>();
 804
 805        let Some(config) = self
 806            .docker_client
 807            .get_docker_compose_config(&docker_compose_full_paths)
 808            .await?
 809        else {
 810            log::error!("Output could not deserialize into DockerComposeConfig");
 811            return Err(DevContainerError::DevContainerParseFailed);
 812        };
 813        Ok(DockerComposeResources {
 814            files: docker_compose_full_paths,
 815            config,
 816        })
 817    }
 818
 819    async fn build_and_extend_compose_files(
 820        &self,
 821    ) -> Result<DockerComposeResources, DevContainerError> {
 822        let dev_container = match &self.config {
 823            ConfigStatus::Deserialized(_) => {
 824                log::error!(
 825                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 826                );
 827                return Err(DevContainerError::DevContainerParseFailed);
 828            }
 829            ConfigStatus::VariableParsed(dev_container) => dev_container,
 830        };
 831
 832        let Some(features_build_info) = &self.features_build_info else {
 833            log::error!(
 834                "Cannot build and extend compose files: features build info is not yet constructed"
 835            );
 836            return Err(DevContainerError::DevContainerParseFailed);
 837        };
 838        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 839        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 840
 841        let (main_service_name, main_service) =
 842            find_primary_service(&docker_compose_resources, self)?;
 843        let (built_service_image, built_service_image_tag) = if main_service
 844            .build
 845            .as_ref()
 846            .map(|b| b.dockerfile.as_ref())
 847            .is_some()
 848        {
 849            if !supports_buildkit {
 850                self.build_feature_content_image().await?;
 851            }
 852
 853            let dockerfile_path = &features_build_info.dockerfile_path;
 854
 855            let build_args = if !supports_buildkit {
 856                HashMap::from([
 857                    (
 858                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 859                        "dev_container_auto_added_stage_label".to_string(),
 860                    ),
 861                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 862                ])
 863            } else {
 864                HashMap::from([
 865                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 866                    (
 867                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 868                        "dev_container_auto_added_stage_label".to_string(),
 869                    ),
 870                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 871                ])
 872            };
 873
 874            let additional_contexts = if !supports_buildkit {
 875                None
 876            } else {
 877                Some(HashMap::from([(
 878                    "dev_containers_feature_content_source".to_string(),
 879                    features_build_info
 880                        .features_content_dir
 881                        .display()
 882                        .to_string(),
 883                )]))
 884            };
 885
 886            let build_override = DockerComposeConfig {
 887                name: None,
 888                services: HashMap::from([(
 889                    main_service_name.clone(),
 890                    DockerComposeService {
 891                        image: Some(features_build_info.image_tag.clone()),
 892                        entrypoint: None,
 893                        cap_add: None,
 894                        security_opt: None,
 895                        labels: None,
 896                        build: Some(DockerComposeServiceBuild {
 897                            context: Some(
 898                                main_service
 899                                    .build
 900                                    .as_ref()
 901                                    .and_then(|b| b.context.clone())
 902                                    .unwrap_or_else(|| {
 903                                        features_build_info.empty_context_dir.display().to_string()
 904                                    }),
 905                            ),
 906                            dockerfile: Some(dockerfile_path.display().to_string()),
 907                            target: Some("dev_containers_target_stage".to_string()),
 908                            args: Some(build_args),
 909                            additional_contexts,
 910                        }),
 911                        volumes: Vec::new(),
 912                        ..Default::default()
 913                    },
 914                )]),
 915                volumes: HashMap::new(),
 916            };
 917
 918            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 919            let config_location = temp_base.join("docker_compose_build.json");
 920
 921            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 922                log::error!("Error serializing docker compose runtime override: {e}");
 923                DevContainerError::DevContainerParseFailed
 924            })?;
 925
 926            self.fs
 927                .write(&config_location, config_json.as_bytes())
 928                .await
 929                .map_err(|e| {
 930                    log::error!("Error writing the runtime override file: {e}");
 931                    DevContainerError::FilesystemError
 932                })?;
 933
 934            docker_compose_resources.files.push(config_location);
 935
 936            self.docker_client
 937                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 938                .await?;
 939            (
 940                self.docker_client
 941                    .inspect(&features_build_info.image_tag)
 942                    .await?,
 943                &features_build_info.image_tag,
 944            )
 945        } else if let Some(image) = &main_service.image {
 946            if dev_container
 947                .features
 948                .as_ref()
 949                .is_none_or(|features| features.is_empty())
 950            {
 951                (self.docker_client.inspect(image).await?, image)
 952            } else {
 953                if !supports_buildkit {
 954                    self.build_feature_content_image().await?;
 955                }
 956
 957                let dockerfile_path = &features_build_info.dockerfile_path;
 958
 959                let build_args = if !supports_buildkit {
 960                    HashMap::from([
 961                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 962                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 963                    ])
 964                } else {
 965                    HashMap::from([
 966                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 967                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 968                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 969                    ])
 970                };
 971
 972                let additional_contexts = if !supports_buildkit {
 973                    None
 974                } else {
 975                    Some(HashMap::from([(
 976                        "dev_containers_feature_content_source".to_string(),
 977                        features_build_info
 978                            .features_content_dir
 979                            .display()
 980                            .to_string(),
 981                    )]))
 982                };
 983
 984                let build_override = DockerComposeConfig {
 985                    name: None,
 986                    services: HashMap::from([(
 987                        main_service_name.clone(),
 988                        DockerComposeService {
 989                            image: Some(features_build_info.image_tag.clone()),
 990                            entrypoint: None,
 991                            cap_add: None,
 992                            security_opt: None,
 993                            labels: None,
 994                            build: Some(DockerComposeServiceBuild {
 995                                context: Some(
 996                                    features_build_info.empty_context_dir.display().to_string(),
 997                                ),
 998                                dockerfile: Some(dockerfile_path.display().to_string()),
 999                                target: Some("dev_containers_target_stage".to_string()),
1000                                args: Some(build_args),
1001                                additional_contexts,
1002                            }),
1003                            volumes: Vec::new(),
1004                            ..Default::default()
1005                        },
1006                    )]),
1007                    volumes: HashMap::new(),
1008                };
1009
1010                let temp_base = std::env::temp_dir().join("devcontainer-zed");
1011                let config_location = temp_base.join("docker_compose_build.json");
1012
1013                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1014                    log::error!("Error serializing docker compose runtime override: {e}");
1015                    DevContainerError::DevContainerParseFailed
1016                })?;
1017
1018                self.fs
1019                    .write(&config_location, config_json.as_bytes())
1020                    .await
1021                    .map_err(|e| {
1022                        log::error!("Error writing the runtime override file: {e}");
1023                        DevContainerError::FilesystemError
1024                    })?;
1025
1026                docker_compose_resources.files.push(config_location);
1027
1028                self.docker_client
1029                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1030                    .await?;
1031
1032                (
1033                    self.docker_client
1034                        .inspect(&features_build_info.image_tag)
1035                        .await?,
1036                    &features_build_info.image_tag,
1037                )
1038            }
1039        } else {
1040            log::error!("Docker compose must have either image or dockerfile defined");
1041            return Err(DevContainerError::DevContainerParseFailed);
1042        };
1043
1044        let built_service_image = self
1045            .update_remote_user_uid(built_service_image, built_service_image_tag)
1046            .await?;
1047
1048        let resources = self.build_merged_resources(built_service_image)?;
1049
1050        let network_mode = main_service.network_mode.as_ref();
1051        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1052        let runtime_override_file = self
1053            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1054            .await?;
1055
1056        docker_compose_resources.files.push(runtime_override_file);
1057
1058        Ok(docker_compose_resources)
1059    }
1060
1061    async fn write_runtime_override_file(
1062        &self,
1063        main_service_name: &str,
1064        network_mode_service: Option<&str>,
1065        resources: DockerBuildResources,
1066    ) -> Result<PathBuf, DevContainerError> {
1067        let config =
1068            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1069        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1070        let config_location = temp_base.join("docker_compose_runtime.json");
1071
1072        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1073            log::error!("Error serializing docker compose runtime override: {e}");
1074            DevContainerError::DevContainerParseFailed
1075        })?;
1076
1077        self.fs
1078            .write(&config_location, config_json.as_bytes())
1079            .await
1080            .map_err(|e| {
1081                log::error!("Error writing the runtime override file: {e}");
1082                DevContainerError::FilesystemError
1083            })?;
1084
1085        Ok(config_location)
1086    }
1087
1088    fn build_runtime_override(
1089        &self,
1090        main_service_name: &str,
1091        network_mode_service: Option<&str>,
1092        resources: DockerBuildResources,
1093    ) -> Result<DockerComposeConfig, DevContainerError> {
1094        let mut runtime_labels = HashMap::new();
1095
1096        if let Some(metadata) = &resources.image.config.labels.metadata {
1097            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1098                log::error!("Error serializing docker image metadata: {e}");
1099                DevContainerError::ContainerNotValid(resources.image.id.clone())
1100            })?;
1101
1102            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1103        }
1104
1105        for (k, v) in self.identifying_labels() {
1106            runtime_labels.insert(k.to_string(), v.to_string());
1107        }
1108
1109        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1110            .additional_mounts
1111            .iter()
1112            .filter_map(|mount| {
1113                if let Some(mount_type) = &mount.mount_type
1114                    && mount_type.to_lowercase() == "volume"
1115                    && let Some(source) = &mount.source
1116                {
1117                    Some((
1118                        source.clone(),
1119                        DockerComposeVolume {
1120                            name: source.clone(),
1121                        },
1122                    ))
1123                } else {
1124                    None
1125                }
1126            })
1127            .collect();
1128
1129        let volumes: Vec<MountDefinition> = resources
1130            .additional_mounts
1131            .iter()
1132            .map(|v| MountDefinition {
1133                source: v.source.clone(),
1134                target: v.target.clone(),
1135                mount_type: v.mount_type.clone(),
1136            })
1137            .collect();
1138
1139        let mut main_service = DockerComposeService {
1140            entrypoint: Some(vec![
1141                "/bin/sh".to_string(),
1142                "-c".to_string(),
1143                resources.entrypoint_script,
1144                "-".to_string(),
1145            ]),
1146            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1147            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1148            labels: Some(runtime_labels),
1149            volumes,
1150            privileged: Some(resources.privileged),
1151            ..Default::default()
1152        };
1153        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1154        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1155        if let Some(forward_ports) = &self.dev_container().forward_ports {
1156            let main_service_ports: Vec<String> = forward_ports
1157                .iter()
1158                .filter_map(|f| match f {
1159                    ForwardPort::Number(port) => Some(port.to_string()),
1160                    ForwardPort::String(port) => {
1161                        let parts: Vec<&str> = port.split(":").collect();
1162                        if parts.len() <= 1 {
1163                            Some(port.to_string())
1164                        } else if parts.len() == 2 {
1165                            if parts[0] == main_service_name {
1166                                Some(parts[1].to_string())
1167                            } else {
1168                                None
1169                            }
1170                        } else {
1171                            None
1172                        }
1173                    }
1174                })
1175                .collect();
1176            for port in main_service_ports {
1177                // If the main service uses a different service's network bridge, append to that service's ports instead
1178                if let Some(network_service_name) = network_mode_service {
1179                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1180                        service.ports.push(DockerComposeServicePort {
1181                            target: port.clone(),
1182                            published: port.clone(),
1183                            ..Default::default()
1184                        });
1185                    } else {
1186                        service_declarations.insert(
1187                            network_service_name.to_string(),
1188                            DockerComposeService {
1189                                ports: vec![DockerComposeServicePort {
1190                                    target: port.clone(),
1191                                    published: port.clone(),
1192                                    ..Default::default()
1193                                }],
1194                                ..Default::default()
1195                            },
1196                        );
1197                    }
1198                } else {
1199                    main_service.ports.push(DockerComposeServicePort {
1200                        target: port.clone(),
1201                        published: port.clone(),
1202                        ..Default::default()
1203                    });
1204                }
1205            }
1206            let other_service_ports: Vec<(&str, &str)> = forward_ports
1207                .iter()
1208                .filter_map(|f| match f {
1209                    ForwardPort::Number(_) => None,
1210                    ForwardPort::String(port) => {
1211                        let parts: Vec<&str> = port.split(":").collect();
1212                        if parts.len() != 2 {
1213                            None
1214                        } else {
1215                            if parts[0] == main_service_name {
1216                                None
1217                            } else {
1218                                Some((parts[0], parts[1]))
1219                            }
1220                        }
1221                    }
1222                })
1223                .collect();
1224            for (service_name, port) in other_service_ports {
1225                if let Some(service) = service_declarations.get_mut(service_name) {
1226                    service.ports.push(DockerComposeServicePort {
1227                        target: port.to_string(),
1228                        published: port.to_string(),
1229                        ..Default::default()
1230                    });
1231                } else {
1232                    service_declarations.insert(
1233                        service_name.to_string(),
1234                        DockerComposeService {
1235                            ports: vec![DockerComposeServicePort {
1236                                target: port.to_string(),
1237                                published: port.to_string(),
1238                                ..Default::default()
1239                            }],
1240                            ..Default::default()
1241                        },
1242                    );
1243                }
1244            }
1245        }
1246
1247        service_declarations.insert(main_service_name.to_string(), main_service);
1248        let new_docker_compose_config = DockerComposeConfig {
1249            name: None,
1250            services: service_declarations,
1251            volumes: config_volumes,
1252        };
1253
1254        Ok(new_docker_compose_config)
1255    }
1256
1257    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1258        let dev_container = match &self.config {
1259            ConfigStatus::Deserialized(_) => {
1260                log::error!(
1261                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1262                );
1263                return Err(DevContainerError::DevContainerParseFailed);
1264            }
1265            ConfigStatus::VariableParsed(dev_container) => dev_container,
1266        };
1267
1268        match dev_container.build_type() {
1269            DevContainerBuildType::Image(image_tag) => {
1270                let base_image = self.docker_client.inspect(&image_tag).await?;
1271                if dev_container
1272                    .features
1273                    .as_ref()
1274                    .is_none_or(|features| features.is_empty())
1275                {
1276                    log::debug!("No features to add. Using base image");
1277                    return Ok(base_image);
1278                }
1279            }
1280            DevContainerBuildType::Dockerfile(_) => {}
1281            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1282                return Err(DevContainerError::DevContainerParseFailed);
1283            }
1284        };
1285
1286        let mut command = self.create_docker_build()?;
1287
1288        let output = self
1289            .command_runner
1290            .run_command(&mut command)
1291            .await
1292            .map_err(|e| {
1293                log::error!("Error building docker image: {e}");
1294                DevContainerError::CommandFailed(command.get_program().display().to_string())
1295            })?;
1296
1297        if !output.status.success() {
1298            let stderr = String::from_utf8_lossy(&output.stderr);
1299            log::error!("docker buildx build failed: {stderr}");
1300            return Err(DevContainerError::CommandFailed(
1301                command.get_program().display().to_string(),
1302            ));
1303        }
1304
1305        // After a successful build, inspect the newly tagged image to get its metadata
1306        let Some(features_build_info) = &self.features_build_info else {
1307            log::error!("Features build info expected, but not created");
1308            return Err(DevContainerError::DevContainerParseFailed);
1309        };
1310        let image = self
1311            .docker_client
1312            .inspect(&features_build_info.image_tag)
1313            .await?;
1314
1315        Ok(image)
1316    }
1317
1318    #[cfg(target_os = "windows")]
1319    async fn update_remote_user_uid(
1320        &self,
1321        image: DockerInspect,
1322        _base_image: &str,
1323    ) -> Result<DockerInspect, DevContainerError> {
1324        Ok(image)
1325    }
1326    #[cfg(not(target_os = "windows"))]
1327    async fn update_remote_user_uid(
1328        &self,
1329        image: DockerInspect,
1330        base_image: &str,
1331    ) -> Result<DockerInspect, DevContainerError> {
1332        let dev_container = self.dev_container();
1333
1334        let Some(features_build_info) = &self.features_build_info else {
1335            return Ok(image);
1336        };
1337
1338        // updateRemoteUserUID defaults to true per the devcontainers spec
1339        if dev_container.update_remote_user_uid == Some(false) {
1340            return Ok(image);
1341        }
1342
1343        let remote_user = get_remote_user_from_config(&image, self)?;
1344        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1345            return Ok(image);
1346        }
1347
1348        let image_user = image
1349            .config
1350            .image_user
1351            .as_deref()
1352            .unwrap_or("root")
1353            .to_string();
1354
1355        let host_uid = Command::new("id")
1356            .arg("-u")
1357            .output()
1358            .await
1359            .map_err(|e| {
1360                log::error!("Failed to get host UID: {e}");
1361                DevContainerError::CommandFailed("id -u".to_string())
1362            })
1363            .and_then(|output| {
1364                String::from_utf8_lossy(&output.stdout)
1365                    .trim()
1366                    .parse::<u32>()
1367                    .map_err(|e| {
1368                        log::error!("Failed to parse host UID: {e}");
1369                        DevContainerError::CommandFailed("id -u".to_string())
1370                    })
1371            })?;
1372
1373        let host_gid = Command::new("id")
1374            .arg("-g")
1375            .output()
1376            .await
1377            .map_err(|e| {
1378                log::error!("Failed to get host GID: {e}");
1379                DevContainerError::CommandFailed("id -g".to_string())
1380            })
1381            .and_then(|output| {
1382                String::from_utf8_lossy(&output.stdout)
1383                    .trim()
1384                    .parse::<u32>()
1385                    .map_err(|e| {
1386                        log::error!("Failed to parse host GID: {e}");
1387                        DevContainerError::CommandFailed("id -g".to_string())
1388                    })
1389            })?;
1390
1391        let dockerfile_content = self.generate_update_uid_dockerfile();
1392
1393        let dockerfile_path = features_build_info
1394            .features_content_dir
1395            .join("updateUID.Dockerfile");
1396        self.fs
1397            .write(&dockerfile_path, dockerfile_content.as_bytes())
1398            .await
1399            .map_err(|e| {
1400                log::error!("Failed to write updateUID Dockerfile: {e}");
1401                DevContainerError::FilesystemError
1402            })?;
1403
1404        let updated_image_tag = features_build_info.image_tag.clone();
1405
1406        let mut command = Command::new(self.docker_client.docker_cli());
1407        command.args(["build"]);
1408        command.args(["-f", &dockerfile_path.display().to_string()]);
1409        command.args(["-t", &updated_image_tag]);
1410        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1411        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1412        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1413        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1414        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1415        command.arg(features_build_info.empty_context_dir.display().to_string());
1416
1417        let output = self
1418            .command_runner
1419            .run_command(&mut command)
1420            .await
1421            .map_err(|e| {
1422                log::error!("Error building UID update image: {e}");
1423                DevContainerError::CommandFailed(command.get_program().display().to_string())
1424            })?;
1425
1426        if !output.status.success() {
1427            let stderr = String::from_utf8_lossy(&output.stderr);
1428            log::error!("UID update build failed: {stderr}");
1429            return Err(DevContainerError::CommandFailed(
1430                command.get_program().display().to_string(),
1431            ));
1432        }
1433
1434        self.docker_client.inspect(&updated_image_tag).await
1435    }
1436
1437    #[cfg(not(target_os = "windows"))]
1438    fn generate_update_uid_dockerfile(&self) -> String {
1439        let mut dockerfile = r#"ARG BASE_IMAGE
1440FROM $BASE_IMAGE
1441
1442USER root
1443
1444ARG REMOTE_USER
1445ARG NEW_UID
1446ARG NEW_GID
1447SHELL ["/bin/sh", "-c"]
1448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1449	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1450	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1451	if [ -z "$OLD_UID" ]; then \
1452		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1453	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1454		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1455	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1456		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1457	else \
1458		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1459			FREE_GID=65532; \
1460			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1461			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1462			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1463		fi; \
1464		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1465		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1466		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1467			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1468		fi; \
1469		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1470	fi;
1471
1472ARG IMAGE_USER
1473USER $IMAGE_USER
1474
1475# Ensure that /etc/profile does not clobber the existing path
1476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1477"#.to_string();
1478        for feature in &self.features {
1479            let container_env_layer = feature.generate_dockerfile_env();
1480            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1481        }
1482
1483        if let Some(env) = &self.dev_container().container_env {
1484            for (key, value) in env {
1485                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1486            }
1487        }
1488        dockerfile
1489    }
1490
1491    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1492        let Some(features_build_info) = &self.features_build_info else {
1493            log::error!("Features build info not available for building feature content image");
1494            return Err(DevContainerError::DevContainerParseFailed);
1495        };
1496        let features_content_dir = &features_build_info.features_content_dir;
1497
1498        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1499        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1500
1501        self.fs
1502            .write(&dockerfile_path, dockerfile_content.as_bytes())
1503            .await
1504            .map_err(|e| {
1505                log::error!("Failed to write feature content Dockerfile: {e}");
1506                DevContainerError::FilesystemError
1507            })?;
1508
1509        let mut command = Command::new(self.docker_client.docker_cli());
1510        command.args([
1511            "build",
1512            "-t",
1513            "dev_container_feature_content_temp",
1514            "-f",
1515            &dockerfile_path.display().to_string(),
1516            &features_content_dir.display().to_string(),
1517        ]);
1518
1519        let output = self
1520            .command_runner
1521            .run_command(&mut command)
1522            .await
1523            .map_err(|e| {
1524                log::error!("Error building feature content image: {e}");
1525                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1526            })?;
1527
1528        if !output.status.success() {
1529            let stderr = String::from_utf8_lossy(&output.stderr);
1530            log::error!("Feature content image build failed: {stderr}");
1531            return Err(DevContainerError::CommandFailed(
1532                self.docker_client.docker_cli(),
1533            ));
1534        }
1535
1536        Ok(())
1537    }
1538
1539    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1540        let dev_container = match &self.config {
1541            ConfigStatus::Deserialized(_) => {
1542                log::error!(
1543                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1544                );
1545                return Err(DevContainerError::DevContainerParseFailed);
1546            }
1547            ConfigStatus::VariableParsed(dev_container) => dev_container,
1548        };
1549
1550        let Some(features_build_info) = &self.features_build_info else {
1551            log::error!(
1552                "Cannot create docker build command; features build info has not been constructed"
1553            );
1554            return Err(DevContainerError::DevContainerParseFailed);
1555        };
1556        let mut command = Command::new(self.docker_client.docker_cli());
1557
1558        command.args(["buildx", "build"]);
1559
1560        // --load is short for --output=docker, loading the built image into the local docker images
1561        command.arg("--load");
1562
1563        // BuildKit build context: provides the features content directory as a named context
1564        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1565        command.args([
1566            "--build-context",
1567            &format!(
1568                "dev_containers_feature_content_source={}",
1569                features_build_info.features_content_dir.display()
1570            ),
1571        ]);
1572
1573        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1574        if let Some(build_image) = &features_build_info.build_image {
1575            command.args([
1576                "--build-arg",
1577                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1578            ]);
1579        } else {
1580            command.args([
1581                "--build-arg",
1582                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1583            ]);
1584        }
1585
1586        command.args([
1587            "--build-arg",
1588            &format!(
1589                "_DEV_CONTAINERS_IMAGE_USER={}",
1590                self.root_image
1591                    .as_ref()
1592                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1593                    .unwrap_or(&"root".to_string())
1594            ),
1595        ]);
1596
1597        command.args([
1598            "--build-arg",
1599            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1600        ]);
1601
1602        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1603            for (key, value) in args {
1604                command.args(["--build-arg", &format!("{}={}", key, value)]);
1605            }
1606        }
1607
1608        command.args(["--target", "dev_containers_target_stage"]);
1609
1610        command.args([
1611            "-f",
1612            &features_build_info.dockerfile_path.display().to_string(),
1613        ]);
1614
1615        command.args(["-t", &features_build_info.image_tag]);
1616
1617        if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1618            command.arg(self.config_directory.display().to_string());
1619        } else {
1620            // Use an empty folder as the build context to avoid pulling in unneeded files.
1621            // The actual feature content is supplied via the BuildKit build context above.
1622            command.arg(features_build_info.empty_context_dir.display().to_string());
1623        }
1624
1625        Ok(command)
1626    }
1627
1628    async fn run_docker_compose(
1629        &self,
1630        resources: DockerComposeResources,
1631    ) -> Result<DockerInspect, DevContainerError> {
1632        let mut command = Command::new(self.docker_client.docker_cli());
1633        command.args(&["compose", "--project-name", &self.project_name()]);
1634        for docker_compose_file in resources.files {
1635            command.args(&["-f", &docker_compose_file.display().to_string()]);
1636        }
1637        command.args(&["up", "-d"]);
1638
1639        let output = self
1640            .command_runner
1641            .run_command(&mut command)
1642            .await
1643            .map_err(|e| {
1644                log::error!("Error running docker compose up: {e}");
1645                DevContainerError::CommandFailed(command.get_program().display().to_string())
1646            })?;
1647
1648        if !output.status.success() {
1649            let stderr = String::from_utf8_lossy(&output.stderr);
1650            log::error!("Non-success status from docker compose up: {}", stderr);
1651            return Err(DevContainerError::CommandFailed(
1652                command.get_program().display().to_string(),
1653            ));
1654        }
1655
1656        if let Some(docker_ps) = self.check_for_existing_container().await? {
1657            log::debug!("Found newly created dev container");
1658            return self.docker_client.inspect(&docker_ps.id).await;
1659        }
1660
1661        log::error!("Could not find existing container after docker compose up");
1662
1663        Err(DevContainerError::DevContainerParseFailed)
1664    }
1665
1666    async fn run_docker_image(
1667        &self,
1668        build_resources: DockerBuildResources,
1669    ) -> Result<DockerInspect, DevContainerError> {
1670        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1671
1672        let output = self
1673            .command_runner
1674            .run_command(&mut docker_run_command)
1675            .await
1676            .map_err(|e| {
1677                log::error!("Error running docker run: {e}");
1678                DevContainerError::CommandFailed(
1679                    docker_run_command.get_program().display().to_string(),
1680                )
1681            })?;
1682
1683        if !output.status.success() {
1684            let std_err = String::from_utf8_lossy(&output.stderr);
1685            log::error!("Non-success status from docker run. StdErr: {std_err}");
1686            return Err(DevContainerError::CommandFailed(
1687                docker_run_command.get_program().display().to_string(),
1688            ));
1689        }
1690
1691        log::debug!("Checking for container that was started");
1692        let Some(docker_ps) = self.check_for_existing_container().await? else {
1693            log::error!("Could not locate container just created");
1694            return Err(DevContainerError::DevContainerParseFailed);
1695        };
1696        self.docker_client.inspect(&docker_ps.id).await
1697    }
1698
1699    fn local_workspace_folder(&self) -> String {
1700        self.local_project_directory.display().to_string()
1701    }
1702    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1703        self.local_project_directory
1704            .file_name()
1705            .map(|f| f.display().to_string())
1706            .ok_or(DevContainerError::DevContainerParseFailed)
1707    }
1708
1709    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1710        self.dev_container()
1711            .workspace_folder
1712            .as_ref()
1713            .map(|folder| PathBuf::from(folder))
1714            .or(Some(
1715                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1716            ))
1717            .ok_or(DevContainerError::DevContainerParseFailed)
1718    }
1719    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1720        self.remote_workspace_folder().and_then(|f| {
1721            f.file_name()
1722                .map(|file_name| file_name.display().to_string())
1723                .ok_or(DevContainerError::DevContainerParseFailed)
1724        })
1725    }
1726
1727    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1728        if let Some(mount) = &self.dev_container().workspace_mount {
1729            return Ok(mount.clone());
1730        }
1731        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1732            return Err(DevContainerError::DevContainerParseFailed);
1733        };
1734
1735        Ok(MountDefinition {
1736            source: Some(self.local_workspace_folder()),
1737            target: format!("/workspaces/{}", project_directory_name.display()),
1738            mount_type: None,
1739        })
1740    }
1741
1742    fn create_docker_run_command(
1743        &self,
1744        build_resources: DockerBuildResources,
1745    ) -> Result<Command, DevContainerError> {
1746        let remote_workspace_mount = self.remote_workspace_mount()?;
1747
1748        let docker_cli = self.docker_client.docker_cli();
1749        let mut command = Command::new(&docker_cli);
1750
1751        command.arg("run");
1752
1753        if build_resources.privileged {
1754            command.arg("--privileged");
1755        }
1756
1757        if &docker_cli == "podman" {
1758            command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1759        }
1760
1761        command.arg("--sig-proxy=false");
1762        command.arg("-d");
1763        command.arg("--mount");
1764        command.arg(remote_workspace_mount.to_string());
1765
1766        for mount in &build_resources.additional_mounts {
1767            command.arg("--mount");
1768            command.arg(mount.to_string());
1769        }
1770
1771        for (key, val) in self.identifying_labels() {
1772            command.arg("-l");
1773            command.arg(format!("{}={}", key, val));
1774        }
1775
1776        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1777            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1778                log::error!("Problem serializing image metadata: {e}");
1779                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1780            })?;
1781            command.arg("-l");
1782            command.arg(format!(
1783                "{}={}",
1784                "devcontainer.metadata", serialized_metadata
1785            ));
1786        }
1787
1788        if let Some(forward_ports) = &self.dev_container().forward_ports {
1789            for port in forward_ports {
1790                if let ForwardPort::Number(port_number) = port {
1791                    command.arg("-p");
1792                    command.arg(format!("{port_number}:{port_number}"));
1793                }
1794            }
1795        }
1796        for app_port in &self.dev_container().app_port {
1797            command.arg("-p");
1798            command.arg(app_port);
1799        }
1800
1801        command.arg("--entrypoint");
1802        command.arg("/bin/sh");
1803        command.arg(&build_resources.image.id);
1804        command.arg("-c");
1805
1806        command.arg(build_resources.entrypoint_script);
1807        command.arg("-");
1808
1809        Ok(command)
1810    }
1811
1812    fn extension_ids(&self) -> Vec<String> {
1813        self.dev_container()
1814            .customizations
1815            .as_ref()
1816            .map(|c| c.zed.extensions.clone())
1817            .unwrap_or_default()
1818    }
1819
1820    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1821        self.run_initialize_commands().await?;
1822
1823        self.download_feature_and_dockerfile_resources().await?;
1824
1825        let build_resources = self.build_resources().await?;
1826
1827        let devcontainer_up = self.run_dev_container(build_resources).await?;
1828
1829        self.run_remote_scripts(&devcontainer_up, true).await?;
1830
1831        Ok(devcontainer_up)
1832    }
1833
1834    async fn run_remote_scripts(
1835        &self,
1836        devcontainer_up: &DevContainerUp,
1837        new_container: bool,
1838    ) -> Result<(), DevContainerError> {
1839        let ConfigStatus::VariableParsed(config) = &self.config else {
1840            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1841            return Err(DevContainerError::DevContainerScriptsFailed);
1842        };
1843        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1844
1845        if new_container {
1846            if let Some(on_create_command) = &config.on_create_command {
1847                for (command_name, command) in on_create_command.script_commands() {
1848                    log::debug!("Running on create command {command_name}");
1849                    self.docker_client
1850                        .run_docker_exec(
1851                            &devcontainer_up.container_id,
1852                            &remote_folder,
1853                            "root",
1854                            &devcontainer_up.remote_env,
1855                            command,
1856                        )
1857                        .await?;
1858                }
1859            }
1860            if let Some(update_content_command) = &config.update_content_command {
1861                for (command_name, command) in update_content_command.script_commands() {
1862                    log::debug!("Running update content command {command_name}");
1863                    self.docker_client
1864                        .run_docker_exec(
1865                            &devcontainer_up.container_id,
1866                            &remote_folder,
1867                            "root",
1868                            &devcontainer_up.remote_env,
1869                            command,
1870                        )
1871                        .await?;
1872                }
1873            }
1874
1875            if let Some(post_create_command) = &config.post_create_command {
1876                for (command_name, command) in post_create_command.script_commands() {
1877                    log::debug!("Running post create command {command_name}");
1878                    self.docker_client
1879                        .run_docker_exec(
1880                            &devcontainer_up.container_id,
1881                            &remote_folder,
1882                            &devcontainer_up.remote_user,
1883                            &devcontainer_up.remote_env,
1884                            command,
1885                        )
1886                        .await?;
1887                }
1888            }
1889            if let Some(post_start_command) = &config.post_start_command {
1890                for (command_name, command) in post_start_command.script_commands() {
1891                    log::debug!("Running post start command {command_name}");
1892                    self.docker_client
1893                        .run_docker_exec(
1894                            &devcontainer_up.container_id,
1895                            &remote_folder,
1896                            &devcontainer_up.remote_user,
1897                            &devcontainer_up.remote_env,
1898                            command,
1899                        )
1900                        .await?;
1901                }
1902            }
1903        }
1904        if let Some(post_attach_command) = &config.post_attach_command {
1905            for (command_name, command) in post_attach_command.script_commands() {
1906                log::debug!("Running post attach command {command_name}");
1907                self.docker_client
1908                    .run_docker_exec(
1909                        &devcontainer_up.container_id,
1910                        &remote_folder,
1911                        &devcontainer_up.remote_user,
1912                        &devcontainer_up.remote_env,
1913                        command,
1914                    )
1915                    .await?;
1916            }
1917        }
1918
1919        Ok(())
1920    }
1921
1922    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1923        let ConfigStatus::VariableParsed(config) = &self.config else {
1924            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1925            return Err(DevContainerError::DevContainerParseFailed);
1926        };
1927
1928        if let Some(initialize_command) = &config.initialize_command {
1929            log::debug!("Running initialize command");
1930            initialize_command
1931                .run(&self.command_runner, &self.local_project_directory)
1932                .await
1933        } else {
1934            log::warn!("No initialize command found");
1935            Ok(())
1936        }
1937    }
1938
1939    async fn check_for_existing_devcontainer(
1940        &self,
1941    ) -> Result<Option<DevContainerUp>, DevContainerError> {
1942        if let Some(docker_ps) = self.check_for_existing_container().await? {
1943            log::debug!("Dev container already found. Proceeding with it");
1944
1945            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1946
1947            if !docker_inspect.is_running() {
1948                log::debug!("Container not running. Will attempt to start, and then proceed");
1949                self.docker_client.start_container(&docker_ps.id).await?;
1950            }
1951
1952            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1953
1954            let remote_folder = get_remote_dir_from_config(
1955                &docker_inspect,
1956                (&self.local_project_directory.display()).to_string(),
1957            )?;
1958
1959            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1960
1961            let dev_container_up = DevContainerUp {
1962                container_id: docker_ps.id,
1963                remote_user: remote_user,
1964                remote_workspace_folder: remote_folder,
1965                extension_ids: self.extension_ids(),
1966                remote_env,
1967            };
1968
1969            self.run_remote_scripts(&dev_container_up, false).await?;
1970
1971            Ok(Some(dev_container_up))
1972        } else {
1973            log::debug!("Existing container not found.");
1974
1975            Ok(None)
1976        }
1977    }
1978
1979    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1980        self.docker_client
1981            .find_process_by_filters(
1982                self.identifying_labels()
1983                    .iter()
1984                    .map(|(k, v)| format!("label={k}={v}"))
1985                    .collect(),
1986            )
1987            .await
1988    }
1989
1990    fn project_name(&self) -> String {
1991        if let Some(name) = &self.dev_container().name {
1992            safe_id_lower(name)
1993        } else {
1994            let alternate_name = &self
1995                .local_workspace_base_name()
1996                .unwrap_or(self.local_workspace_folder());
1997            safe_id_lower(alternate_name)
1998        }
1999    }
2000
2001    async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2002        let Some(dockerfile_path) = self.dockerfile_location().await else {
2003            log::error!("Tried to expand dockerfile for an image-type config");
2004            return Err(DevContainerError::DevContainerParseFailed);
2005        };
2006
2007        let devcontainer_args = self
2008            .dev_container()
2009            .build
2010            .as_ref()
2011            .and_then(|b| b.args.clone())
2012            .unwrap_or_default();
2013        let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2014            log::error!("Failed to load Dockerfile: {e}");
2015            DevContainerError::FilesystemError
2016        })?;
2017        let mut parsed_lines: Vec<String> = Vec::new();
2018        let mut inline_args: Vec<(String, String)> = Vec::new();
2019        let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2020
2021        for line in contents.lines() {
2022            let mut parsed_line = line.to_string();
2023            // Replace from devcontainer args first, since they take precedence
2024            for (key, value) in &devcontainer_args {
2025                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2026            }
2027            for (key, value) in &inline_args {
2028                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2029            }
2030            if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2031                let trimmed = arg_directives.trim();
2032                let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2033                for (i, captures) in key_matches.iter().enumerate() {
2034                    let key = captures[1].to_string();
2035                    // Insert the devcontainer overrides here if needed
2036                    let value_start = captures.get(0).expect("full match").end();
2037                    let value_end = if i + 1 < key_matches.len() {
2038                        key_matches[i + 1].get(0).expect("full match").start()
2039                    } else {
2040                        trimmed.len()
2041                    };
2042                    let raw_value = trimmed[value_start..value_end].trim();
2043                    let value = if raw_value.starts_with('"')
2044                        && raw_value.ends_with('"')
2045                        && raw_value.len() > 1
2046                    {
2047                        &raw_value[1..raw_value.len() - 1]
2048                    } else {
2049                        raw_value
2050                    };
2051                    inline_args.push((key, value.to_string()));
2052                }
2053            }
2054            parsed_lines.push(parsed_line);
2055        }
2056
2057        Ok(parsed_lines.join("\n"))
2058    }
2059}
2060
2061/// Holds all the information needed to construct a `docker buildx build` command
2062/// that extends a base image with dev container features.
2063///
2064/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2065/// (cli/src/spec-node/containerFeatures.ts).
2066#[derive(Debug, Eq, PartialEq)]
2067pub(crate) struct FeaturesBuildInfo {
2068    /// Path to the generated Dockerfile.extended
2069    pub dockerfile_path: PathBuf,
2070    /// Path to the features content directory (used as a BuildKit build context)
2071    pub features_content_dir: PathBuf,
2072    /// Path to an empty directory used as the Docker build context
2073    pub empty_context_dir: PathBuf,
2074    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2075    pub build_image: Option<String>,
2076    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2077    pub image_tag: String,
2078}
2079
2080pub(crate) async fn read_devcontainer_configuration(
2081    config: DevContainerConfig,
2082    context: &DevContainerContext,
2083    environment: HashMap<String, String>,
2084) -> Result<DevContainer, DevContainerError> {
2085    let docker = if context.use_podman {
2086        Docker::new("podman")
2087    } else {
2088        Docker::new("docker")
2089    };
2090    let mut dev_container = DevContainerManifest::new(
2091        context,
2092        environment,
2093        Arc::new(docker),
2094        Arc::new(DefaultCommandRunner::new()),
2095        config,
2096        &context.project_directory.as_ref(),
2097    )
2098    .await?;
2099    dev_container.parse_nonremote_vars()?;
2100    Ok(dev_container.dev_container().clone())
2101}
2102
2103pub(crate) async fn spawn_dev_container(
2104    context: &DevContainerContext,
2105    environment: HashMap<String, String>,
2106    config: DevContainerConfig,
2107    local_project_path: &Path,
2108) -> Result<DevContainerUp, DevContainerError> {
2109    let docker = if context.use_podman {
2110        Docker::new("podman")
2111    } else {
2112        Docker::new("docker")
2113    };
2114    let mut devcontainer_manifest = DevContainerManifest::new(
2115        context,
2116        environment,
2117        Arc::new(docker),
2118        Arc::new(DefaultCommandRunner::new()),
2119        config,
2120        local_project_path,
2121    )
2122    .await?;
2123
2124    devcontainer_manifest.parse_nonremote_vars()?;
2125
2126    log::debug!("Checking for existing container");
2127    if let Some(devcontainer) = devcontainer_manifest
2128        .check_for_existing_devcontainer()
2129        .await?
2130    {
2131        Ok(devcontainer)
2132    } else {
2133        log::debug!("Existing container not found. Building");
2134
2135        devcontainer_manifest.build_and_run().await
2136    }
2137}
2138
2139#[derive(Debug)]
2140struct DockerBuildResources {
2141    image: DockerInspect,
2142    additional_mounts: Vec<MountDefinition>,
2143    privileged: bool,
2144    entrypoint_script: String,
2145}
2146
2147#[derive(Debug)]
2148enum DevContainerBuildResources {
2149    DockerCompose(DockerComposeResources),
2150    Docker(DockerBuildResources),
2151}
2152
2153fn find_primary_service(
2154    docker_compose: &DockerComposeResources,
2155    devcontainer: &DevContainerManifest,
2156) -> Result<(String, DockerComposeService), DevContainerError> {
2157    let Some(service_name) = &devcontainer.dev_container().service else {
2158        return Err(DevContainerError::DevContainerParseFailed);
2159    };
2160
2161    match docker_compose.config.services.get(service_name) {
2162        Some(service) => Ok((service_name.clone(), service.clone())),
2163        None => Err(DevContainerError::DevContainerParseFailed),
2164    }
2165}
2166
2167/// Destination folder inside the container where feature content is staged during build.
2168/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2169const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2170
2171/// Escapes regex special characters in a string.
2172fn escape_regex_chars(input: &str) -> String {
2173    let mut result = String::with_capacity(input.len() * 2);
2174    for c in input.chars() {
2175        if ".*+?^${}()|[]\\".contains(c) {
2176            result.push('\\');
2177        }
2178        result.push(c);
2179    }
2180    result
2181}
2182
2183/// Extracts the short feature ID from a full feature reference string.
2184///
2185/// Examples:
2186/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2187/// - `ghcr.io/user/repo/go` → `go`
2188/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2189/// - `./myFeature` → `myFeature`
2190fn extract_feature_id(feature_ref: &str) -> &str {
2191    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2192        &feature_ref[..at_idx]
2193    } else {
2194        let last_slash = feature_ref.rfind('/');
2195        let last_colon = feature_ref.rfind(':');
2196        match (last_slash, last_colon) {
2197            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2198            _ => feature_ref,
2199        }
2200    };
2201    match without_version.rfind('/') {
2202        Some(idx) => &without_version[idx + 1..],
2203        None => without_version,
2204    }
2205}
2206
2207/// Generates a shell command that looks up a user's passwd entry.
2208///
2209/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2210/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2211fn get_ent_passwd_shell_command(user: &str) -> String {
2212    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2213    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2214    format!(
2215        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2216        shell = escaped_for_shell,
2217        re = escaped_for_regex,
2218    )
2219}
2220
2221/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2222///
2223/// Features listed in the override come first (in the specified order), followed
2224/// by any remaining features sorted lexicographically by their full reference ID.
2225fn resolve_feature_order<'a>(
2226    features: &'a HashMap<String, FeatureOptions>,
2227    override_order: &Option<Vec<String>>,
2228) -> Vec<(&'a String, &'a FeatureOptions)> {
2229    if let Some(order) = override_order {
2230        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2231        for ordered_id in order {
2232            if let Some((key, options)) = features.get_key_value(ordered_id) {
2233                ordered.push((key, options));
2234            }
2235        }
2236        let mut remaining: Vec<_> = features
2237            .iter()
2238            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2239            .collect();
2240        remaining.sort_by_key(|(id, _)| id.as_str());
2241        ordered.extend(remaining);
2242        ordered
2243    } else {
2244        let mut entries: Vec<_> = features.iter().collect();
2245        entries.sort_by_key(|(id, _)| id.as_str());
2246        entries
2247    }
2248}
2249
2250/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2251///
2252/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2253/// `containerFeaturesConfiguration.ts`.
2254fn generate_install_wrapper(
2255    feature_ref: &str,
2256    feature_id: &str,
2257    env_variables: &str,
2258) -> Result<String, DevContainerError> {
2259    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2260        log::error!("Error escaping feature ref {feature_ref}: {e}");
2261        DevContainerError::DevContainerParseFailed
2262    })?;
2263    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2264        log::error!("Error escaping feature {feature_id}: {e}");
2265        DevContainerError::DevContainerParseFailed
2266    })?;
2267    let options_indented: String = env_variables
2268        .lines()
2269        .filter(|l| !l.is_empty())
2270        .map(|l| format!("    {}", l))
2271        .collect::<Vec<_>>()
2272        .join("\n");
2273    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2274        log::error!("Error escaping options {options_indented}: {e}");
2275        DevContainerError::DevContainerParseFailed
2276    })?;
2277
2278    let script = format!(
2279        r#"#!/bin/sh
2280set -e
2281
2282on_exit () {{
2283    [ $? -eq 0 ] && exit
2284    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2285}}
2286
2287trap on_exit EXIT
2288
2289echo ===========================================================================
2290echo 'Feature       : {escaped_name}'
2291echo 'Id            : {escaped_id}'
2292echo 'Options       :'
2293echo {escaped_options}
2294echo ===========================================================================
2295
2296set -a
2297. ../devcontainer-features.builtin.env
2298. ./devcontainer-features.env
2299set +a
2300
2301chmod +x ./install.sh
2302./install.sh
2303"#
2304    );
2305
2306    Ok(script)
2307}
2308
2309fn dockerfile_inject_alias(
2310    dockerfile_content: &str,
2311    alias: &str,
2312    build_target: Option<String>,
2313) -> String {
2314    match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2315        Some(target) => format!(
2316            r#"{dockerfile_content}
2317FROM {target} AS {alias}"#
2318        ),
2319        None => dockerfile_content.to_string(),
2320    }
2321}
2322
2323fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2324    dockerfile_contents
2325        .lines()
2326        .filter(|line| line.starts_with("FROM"))
2327        .rfind(|from_line| match &target {
2328            Some(target) => {
2329                let parts = from_line.split(' ').collect::<Vec<&str>>();
2330                if parts.len() >= 3
2331                    && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2332                {
2333                    parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2334                } else {
2335                    false
2336                }
2337            }
2338            None => true,
2339        })
2340        .and_then(|from_line| {
2341            from_line
2342                .split(' ')
2343                .collect::<Vec<&str>>()
2344                .get(1)
2345                .map(|s| s.to_string())
2346        })
2347}
2348
2349// Container user things
2350// This should come from spec - see the docs
2351fn get_remote_user_from_config(
2352    docker_config: &DockerInspect,
2353    devcontainer: &DevContainerManifest,
2354) -> Result<String, DevContainerError> {
2355    if let DevContainer {
2356        remote_user: Some(user),
2357        ..
2358    } = &devcontainer.dev_container()
2359    {
2360        return Ok(user.clone());
2361    }
2362    if let Some(metadata) = &docker_config.config.labels.metadata {
2363        for metadatum in metadata {
2364            if let Some(remote_user) = metadatum.get("remoteUser") {
2365                if let Some(remote_user_str) = remote_user.as_str() {
2366                    return Ok(remote_user_str.to_string());
2367                }
2368            }
2369        }
2370    }
2371    if let Some(image_user) = &docker_config.config.image_user {
2372        if !image_user.is_empty() {
2373            return Ok(image_user.to_string());
2374        }
2375    }
2376    Ok("root".to_string())
2377}
2378
2379// This should come from spec - see the docs
2380fn get_container_user_from_config(
2381    docker_config: &DockerInspect,
2382    devcontainer: &DevContainerManifest,
2383) -> Result<String, DevContainerError> {
2384    if let Some(user) = &devcontainer.dev_container().container_user {
2385        return Ok(user.to_string());
2386    }
2387    if let Some(metadata) = &docker_config.config.labels.metadata {
2388        for metadatum in metadata {
2389            if let Some(container_user) = metadatum.get("containerUser") {
2390                if let Some(container_user_str) = container_user.as_str() {
2391                    return Ok(container_user_str.to_string());
2392                }
2393            }
2394        }
2395    }
2396    if let Some(image_user) = &docker_config.config.image_user {
2397        return Ok(image_user.to_string());
2398    }
2399
2400    Ok("root".to_string())
2401}
2402
2403#[cfg(test)]
2404mod test {
2405    use std::{
2406        collections::HashMap,
2407        ffi::OsStr,
2408        path::PathBuf,
2409        process::{ExitStatus, Output},
2410        sync::{Arc, Mutex},
2411    };
2412
2413    use async_trait::async_trait;
2414    use fs::{FakeFs, Fs};
2415    use gpui::{AppContext, TestAppContext};
2416    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2417    use project::{
2418        ProjectEnvironment,
2419        worktree_store::{WorktreeIdCounter, WorktreeStore},
2420    };
2421    use serde_json_lenient::Value;
2422    use util::{command::Command, paths::SanitizedPath};
2423
2424    #[cfg(not(target_os = "windows"))]
2425    use crate::docker::DockerComposeServicePort;
2426    use crate::{
2427        DevContainerConfig, DevContainerContext,
2428        command_json::CommandRunner,
2429        devcontainer_api::DevContainerError,
2430        devcontainer_json::MountDefinition,
2431        devcontainer_manifest::{
2432            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2433            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2434            image_from_dockerfile,
2435        },
2436        docker::{
2437            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2438            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2439            DockerPs,
2440        },
2441        oci::TokenResponse,
2442    };
2443    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2444
2445    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2446        let buffer = futures::io::Cursor::new(Vec::new());
2447        let mut builder = async_tar::Builder::new(buffer);
2448        for (file_name, content) in content {
2449            if content.is_empty() {
2450                let mut header = async_tar::Header::new_gnu();
2451                header.set_size(0);
2452                header.set_mode(0o755);
2453                header.set_entry_type(async_tar::EntryType::Directory);
2454                header.set_cksum();
2455                builder
2456                    .append_data(&mut header, file_name, &[] as &[u8])
2457                    .await
2458                    .unwrap();
2459            } else {
2460                let data = content.as_bytes();
2461                let mut header = async_tar::Header::new_gnu();
2462                header.set_size(data.len() as u64);
2463                header.set_mode(0o755);
2464                header.set_entry_type(async_tar::EntryType::Regular);
2465                header.set_cksum();
2466                builder
2467                    .append_data(&mut header, file_name, data)
2468                    .await
2469                    .unwrap();
2470            }
2471        }
2472        let buffer = builder.into_inner().await.unwrap();
2473        buffer.into_inner()
2474    }
2475
2476    fn test_project_filename() -> String {
2477        PathBuf::from(TEST_PROJECT_PATH)
2478            .file_name()
2479            .expect("is valid")
2480            .display()
2481            .to_string()
2482    }
2483
2484    async fn init_devcontainer_config(
2485        fs: &Arc<FakeFs>,
2486        devcontainer_contents: &str,
2487    ) -> DevContainerConfig {
2488        fs.insert_tree(
2489            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2490            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2491        )
2492        .await;
2493
2494        DevContainerConfig::default_config()
2495    }
2496
2497    struct TestDependencies {
2498        fs: Arc<FakeFs>,
2499        _http_client: Arc<dyn HttpClient>,
2500        docker: Arc<FakeDocker>,
2501        command_runner: Arc<TestCommandRunner>,
2502    }
2503
2504    async fn init_default_devcontainer_manifest(
2505        cx: &mut TestAppContext,
2506        devcontainer_contents: &str,
2507    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2508        let fs = FakeFs::new(cx.executor());
2509        let http_client = fake_http_client();
2510        let command_runner = Arc::new(TestCommandRunner::new());
2511        let docker = Arc::new(FakeDocker::new());
2512        let environment = HashMap::new();
2513
2514        init_devcontainer_manifest(
2515            cx,
2516            fs,
2517            http_client,
2518            docker,
2519            command_runner,
2520            environment,
2521            devcontainer_contents,
2522        )
2523        .await
2524    }
2525
2526    async fn init_devcontainer_manifest(
2527        cx: &mut TestAppContext,
2528        fs: Arc<FakeFs>,
2529        http_client: Arc<dyn HttpClient>,
2530        docker_client: Arc<FakeDocker>,
2531        command_runner: Arc<TestCommandRunner>,
2532        environment: HashMap<String, String>,
2533        devcontainer_contents: &str,
2534    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2535        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2536        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2537        let worktree_store =
2538            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2539        let project_environment =
2540            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2541
2542        let context = DevContainerContext {
2543            project_directory: SanitizedPath::cast_arc(project_path),
2544            use_podman: false,
2545            fs: fs.clone(),
2546            http_client: http_client.clone(),
2547            environment: project_environment.downgrade(),
2548        };
2549
2550        let test_dependencies = TestDependencies {
2551            fs: fs.clone(),
2552            _http_client: http_client.clone(),
2553            docker: docker_client.clone(),
2554            command_runner: command_runner.clone(),
2555        };
2556        let manifest = DevContainerManifest::new(
2557            &context,
2558            environment,
2559            docker_client,
2560            command_runner,
2561            local_config,
2562            &PathBuf::from(TEST_PROJECT_PATH),
2563        )
2564        .await?;
2565
2566        Ok((test_dependencies, manifest))
2567    }
2568
2569    #[gpui::test]
2570    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2571        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2572            cx,
2573            r#"
2574// These are some external comments. serde_lenient should handle them
2575{
2576    // These are some internal comments
2577    "image": "image",
2578    "remoteUser": "root",
2579}
2580            "#,
2581        )
2582        .await
2583        .unwrap();
2584
2585        let mut metadata = HashMap::new();
2586        metadata.insert(
2587            "remoteUser".to_string(),
2588            serde_json_lenient::Value::String("vsCode".to_string()),
2589        );
2590        let given_docker_config = DockerInspect {
2591            id: "docker_id".to_string(),
2592            config: DockerInspectConfig {
2593                labels: DockerConfigLabels {
2594                    metadata: Some(vec![metadata]),
2595                },
2596                image_user: None,
2597                env: Vec::new(),
2598            },
2599            mounts: None,
2600            state: None,
2601        };
2602
2603        let remote_user =
2604            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2605
2606        assert_eq!(remote_user, "root".to_string())
2607    }
2608
2609    #[gpui::test]
2610    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2611        let (_, devcontainer_manifest) =
2612            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2613        let mut metadata = HashMap::new();
2614        metadata.insert(
2615            "remoteUser".to_string(),
2616            serde_json_lenient::Value::String("vsCode".to_string()),
2617        );
2618        let given_docker_config = DockerInspect {
2619            id: "docker_id".to_string(),
2620            config: DockerInspectConfig {
2621                labels: DockerConfigLabels {
2622                    metadata: Some(vec![metadata]),
2623                },
2624                image_user: None,
2625                env: Vec::new(),
2626            },
2627            mounts: None,
2628            state: None,
2629        };
2630
2631        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2632
2633        assert!(remote_user.is_ok());
2634        let remote_user = remote_user.expect("ok");
2635        assert_eq!(&remote_user, "vsCode")
2636    }
2637
2638    #[test]
2639    fn should_extract_feature_id_from_references() {
2640        assert_eq!(
2641            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2642            "aws-cli"
2643        );
2644        assert_eq!(
2645            extract_feature_id("ghcr.io/devcontainers/features/go"),
2646            "go"
2647        );
2648        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2649        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2650        assert_eq!(
2651            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2652            "rust"
2653        );
2654    }
2655
2656    #[gpui::test]
2657    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2658        let mut metadata = HashMap::new();
2659        metadata.insert(
2660            "remoteUser".to_string(),
2661            serde_json_lenient::Value::String("vsCode".to_string()),
2662        );
2663
2664        let (_, devcontainer_manifest) =
2665            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2666        let build_resources = DockerBuildResources {
2667            image: DockerInspect {
2668                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2669                config: DockerInspectConfig {
2670                    labels: DockerConfigLabels { metadata: None },
2671                    image_user: None,
2672                    env: Vec::new(),
2673                },
2674                mounts: None,
2675                state: None,
2676            },
2677            additional_mounts: vec![],
2678            privileged: false,
2679            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2680        };
2681        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2682
2683        assert!(docker_run_command.is_ok());
2684        let docker_run_command = docker_run_command.expect("ok");
2685
2686        assert_eq!(docker_run_command.get_program(), "docker");
2687        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2688            .join(".devcontainer")
2689            .join("devcontainer.json");
2690        let expected_config_file_label = expected_config_file_label.display();
2691        assert_eq!(
2692            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2693            vec![
2694                OsStr::new("run"),
2695                OsStr::new("--sig-proxy=false"),
2696                OsStr::new("-d"),
2697                OsStr::new("--mount"),
2698                OsStr::new(
2699                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2700                ),
2701                OsStr::new("-l"),
2702                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2703                OsStr::new("-l"),
2704                OsStr::new(&format!(
2705                    "devcontainer.config_file={expected_config_file_label}"
2706                )),
2707                OsStr::new("--entrypoint"),
2708                OsStr::new("/bin/sh"),
2709                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2710                OsStr::new("-c"),
2711                OsStr::new(
2712                    "
2713    echo Container started
2714    trap \"exit 0\" 15
2715    exec \"$@\"
2716    while sleep 1 & wait $!; do :; done
2717                        "
2718                    .trim()
2719                ),
2720                OsStr::new("-"),
2721            ]
2722        )
2723    }
2724
2725    #[gpui::test]
2726    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2727        // State where service not defined in dev container
2728        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2729        let given_docker_compose_config = DockerComposeResources {
2730            config: DockerComposeConfig {
2731                name: Some("devcontainers".to_string()),
2732                services: HashMap::new(),
2733                ..Default::default()
2734            },
2735            ..Default::default()
2736        };
2737
2738        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2739
2740        assert!(bad_result.is_err());
2741
2742        // State where service defined in devcontainer, not found in DockerCompose config
2743        let (_, given_dev_container) =
2744            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2745                .await
2746                .unwrap();
2747        let given_docker_compose_config = DockerComposeResources {
2748            config: DockerComposeConfig {
2749                name: Some("devcontainers".to_string()),
2750                services: HashMap::new(),
2751                ..Default::default()
2752            },
2753            ..Default::default()
2754        };
2755
2756        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2757
2758        assert!(bad_result.is_err());
2759        // State where service defined in devcontainer and in DockerCompose config
2760
2761        let (_, given_dev_container) =
2762            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2763                .await
2764                .unwrap();
2765        let given_docker_compose_config = DockerComposeResources {
2766            config: DockerComposeConfig {
2767                name: Some("devcontainers".to_string()),
2768                services: HashMap::from([(
2769                    "found_service".to_string(),
2770                    DockerComposeService {
2771                        ..Default::default()
2772                    },
2773                )]),
2774                ..Default::default()
2775            },
2776            ..Default::default()
2777        };
2778
2779        let (service_name, _) =
2780            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2781
2782        assert_eq!(service_name, "found_service".to_string());
2783    }
2784
2785    #[gpui::test]
2786    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2787        let fs = FakeFs::new(cx.executor());
2788        let given_devcontainer_contents = r#"
2789// These are some external comments. serde_lenient should handle them
2790{
2791    // These are some internal comments
2792    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2793    "name": "myDevContainer-${devcontainerId}",
2794    "remoteUser": "root",
2795    "remoteEnv": {
2796        "DEVCONTAINER_ID": "${devcontainerId}",
2797        "MYVAR2": "myvarothervalue",
2798        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2799        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2800        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2801        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2802        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2803        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2804
2805    }
2806}
2807                    "#;
2808        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2809            cx,
2810            fs,
2811            fake_http_client(),
2812            Arc::new(FakeDocker::new()),
2813            Arc::new(TestCommandRunner::new()),
2814            HashMap::from([
2815                ("local_env_1".to_string(), "local_env_value1".to_string()),
2816                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2817            ]),
2818            given_devcontainer_contents,
2819        )
2820        .await
2821        .unwrap();
2822
2823        devcontainer_manifest.parse_nonremote_vars().unwrap();
2824
2825        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2826            &devcontainer_manifest.config
2827        else {
2828            panic!("Config not parsed");
2829        };
2830
2831        // ${devcontainerId}
2832        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2833        assert_eq!(
2834            variable_replaced_devcontainer.name,
2835            Some(format!("myDevContainer-{devcontainer_id}"))
2836        );
2837        assert_eq!(
2838            variable_replaced_devcontainer
2839                .remote_env
2840                .as_ref()
2841                .and_then(|env| env.get("DEVCONTAINER_ID")),
2842            Some(&devcontainer_id)
2843        );
2844
2845        // ${containerWorkspaceFolderBasename}
2846        assert_eq!(
2847            variable_replaced_devcontainer
2848                .remote_env
2849                .as_ref()
2850                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2851            Some(&test_project_filename())
2852        );
2853
2854        // ${localWorkspaceFolderBasename}
2855        assert_eq!(
2856            variable_replaced_devcontainer
2857                .remote_env
2858                .as_ref()
2859                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2860            Some(&test_project_filename())
2861        );
2862
2863        // ${containerWorkspaceFolder}
2864        assert_eq!(
2865            variable_replaced_devcontainer
2866                .remote_env
2867                .as_ref()
2868                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2869            Some(&format!("/workspaces/{}", test_project_filename()))
2870        );
2871
2872        // ${localWorkspaceFolder}
2873        assert_eq!(
2874            variable_replaced_devcontainer
2875                .remote_env
2876                .as_ref()
2877                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2878            Some(&TEST_PROJECT_PATH.to_string())
2879        );
2880
2881        // ${localEnv:VARIABLE_NAME}
2882        assert_eq!(
2883            variable_replaced_devcontainer
2884                .remote_env
2885                .as_ref()
2886                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2887            Some(&"local_env_value1".to_string())
2888        );
2889        assert_eq!(
2890            variable_replaced_devcontainer
2891                .remote_env
2892                .as_ref()
2893                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2894            Some(&"THISVALUEHERE".to_string())
2895        );
2896    }
2897
2898    #[gpui::test]
2899    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2900        let given_devcontainer_contents = r#"
2901                // These are some external comments. serde_lenient should handle them
2902                {
2903                    // These are some internal comments
2904                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2905                    "name": "myDevContainer-${devcontainerId}",
2906                    "remoteUser": "root",
2907                    "remoteEnv": {
2908                        "DEVCONTAINER_ID": "${devcontainerId}",
2909                        "MYVAR2": "myvarothervalue",
2910                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2911                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2912                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2913                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2914
2915                    },
2916                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2917                    "workspaceFolder": "/workspace/customfolder"
2918                }
2919            "#;
2920
2921        let (_, mut devcontainer_manifest) =
2922            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2923                .await
2924                .unwrap();
2925
2926        devcontainer_manifest.parse_nonremote_vars().unwrap();
2927
2928        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2929            &devcontainer_manifest.config
2930        else {
2931            panic!("Config not parsed");
2932        };
2933
2934        // ${devcontainerId}
2935        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2936        assert_eq!(
2937            variable_replaced_devcontainer.name,
2938            Some(format!("myDevContainer-{devcontainer_id}"))
2939        );
2940        assert_eq!(
2941            variable_replaced_devcontainer
2942                .remote_env
2943                .as_ref()
2944                .and_then(|env| env.get("DEVCONTAINER_ID")),
2945            Some(&devcontainer_id)
2946        );
2947
2948        // ${containerWorkspaceFolderBasename}
2949        assert_eq!(
2950            variable_replaced_devcontainer
2951                .remote_env
2952                .as_ref()
2953                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2954            Some(&"customfolder".to_string())
2955        );
2956
2957        // ${localWorkspaceFolderBasename}
2958        assert_eq!(
2959            variable_replaced_devcontainer
2960                .remote_env
2961                .as_ref()
2962                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2963            Some(&"project".to_string())
2964        );
2965
2966        // ${containerWorkspaceFolder}
2967        assert_eq!(
2968            variable_replaced_devcontainer
2969                .remote_env
2970                .as_ref()
2971                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2972            Some(&"/workspace/customfolder".to_string())
2973        );
2974
2975        // ${localWorkspaceFolder}
2976        assert_eq!(
2977            variable_replaced_devcontainer
2978                .remote_env
2979                .as_ref()
2980                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2981            Some(&TEST_PROJECT_PATH.to_string())
2982        );
2983    }
2984
2985    // updateRemoteUserUID is treated as false in Windows, so this test will fail
2986    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2987    #[cfg(not(target_os = "windows"))]
2988    #[gpui::test]
2989    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2990        cx.executor().allow_parking();
2991        env_logger::try_init().ok();
2992        let given_devcontainer_contents = r#"
2993            /*---------------------------------------------------------------------------------------------
2994             *  Copyright (c) Microsoft Corporation. All rights reserved.
2995             *  Licensed under the MIT License. See License.txt in the project root for license information.
2996             *--------------------------------------------------------------------------------------------*/
2997            {
2998              "name": "cli-${devcontainerId}",
2999              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3000              "build": {
3001                "dockerfile": "Dockerfile",
3002                "args": {
3003                  "VARIANT": "18-bookworm",
3004                  "FOO": "bar",
3005                },
3006              },
3007              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3008              "workspaceFolder": "/workspace2",
3009              "mounts": [
3010                // Keep command history across instances
3011                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3012              ],
3013
3014              "forwardPorts": [
3015                8082,
3016                8083,
3017              ],
3018              "appPort": [
3019                8084,
3020                "8085:8086",
3021              ],
3022
3023              "containerEnv": {
3024                "VARIABLE_VALUE": "value",
3025              },
3026
3027              "initializeCommand": "touch IAM.md",
3028
3029              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3030
3031              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3032
3033              "postCreateCommand": {
3034                "yarn": "yarn install",
3035                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3036              },
3037
3038              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3039
3040              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3041
3042              "remoteUser": "node",
3043
3044              "remoteEnv": {
3045                "PATH": "${containerEnv:PATH}:/some/other/path",
3046                "OTHER_ENV": "other_env_value"
3047              },
3048
3049              "features": {
3050                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3051                  "moby": false,
3052                },
3053                "ghcr.io/devcontainers/features/go:1": {},
3054              },
3055
3056              "customizations": {
3057                "vscode": {
3058                  "extensions": [
3059                    "dbaeumer.vscode-eslint",
3060                    "GitHub.vscode-pull-request-github",
3061                  ],
3062                },
3063                "zed": {
3064                  "extensions": ["vue", "ruby"],
3065                },
3066                "codespaces": {
3067                  "repositories": {
3068                    "devcontainers/features": {
3069                      "permissions": {
3070                        "contents": "write",
3071                        "workflows": "write",
3072                      },
3073                    },
3074                  },
3075                },
3076              },
3077            }
3078            "#;
3079
3080        let (test_dependencies, mut devcontainer_manifest) =
3081            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3082                .await
3083                .unwrap();
3084
3085        test_dependencies
3086            .fs
3087            .atomic_write(
3088                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3089                r#"
3090#  Copyright (c) Microsoft Corporation. All rights reserved.
3091#  Licensed under the MIT License. See License.txt in the project root for license information.
3092ARG VARIANT="16-bullseye"
3093FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3094
3095RUN mkdir -p /workspaces && chown node:node /workspaces
3096
3097ARG USERNAME=node
3098USER $USERNAME
3099
3100# Save command line history
3101RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3102&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3103&& mkdir -p /home/$USERNAME/commandhistory \
3104&& touch /home/$USERNAME/commandhistory/.bash_history \
3105&& chown -R $USERNAME /home/$USERNAME/commandhistory
3106                    "#.trim().to_string(),
3107            )
3108            .await
3109            .unwrap();
3110
3111        devcontainer_manifest.parse_nonremote_vars().unwrap();
3112
3113        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3114
3115        assert_eq!(
3116            devcontainer_up.extension_ids,
3117            vec!["vue".to_string(), "ruby".to_string()]
3118        );
3119
3120        let files = test_dependencies.fs.files();
3121        let feature_dockerfile = files
3122            .iter()
3123            .find(|f| {
3124                f.file_name()
3125                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3126            })
3127            .expect("to be found");
3128        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3129        assert_eq!(
3130            &feature_dockerfile,
3131            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3132
3133#  Copyright (c) Microsoft Corporation. All rights reserved.
3134#  Licensed under the MIT License. See License.txt in the project root for license information.
3135ARG VARIANT="16-bullseye"
3136FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3137
3138RUN mkdir -p /workspaces && chown node:node /workspaces
3139
3140ARG USERNAME=node
3141USER $USERNAME
3142
3143# Save command line history
3144RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3145&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3146&& mkdir -p /home/$USERNAME/commandhistory \
3147&& touch /home/$USERNAME/commandhistory/.bash_history \
3148&& chown -R $USERNAME /home/$USERNAME/commandhistory
3149FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3150
3151FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3152USER root
3153COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3154RUN chmod -R 0755 /tmp/build-features/
3155
3156FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3157
3158USER root
3159
3160RUN mkdir -p /tmp/dev-container-features
3161COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3162
3163RUN \
3164echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3165echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3166
3167
3168RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3169cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3170&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3171&& cd /tmp/dev-container-features/docker-in-docker_0 \
3172&& chmod +x ./devcontainer-features-install.sh \
3173&& ./devcontainer-features-install.sh \
3174&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3175
3176RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3177cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3178&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3179&& cd /tmp/dev-container-features/go_1 \
3180&& chmod +x ./devcontainer-features-install.sh \
3181&& ./devcontainer-features-install.sh \
3182&& rm -rf /tmp/dev-container-features/go_1
3183
3184
3185ARG _DEV_CONTAINERS_IMAGE_USER=root
3186USER $_DEV_CONTAINERS_IMAGE_USER
3187"#
3188        );
3189
3190        let uid_dockerfile = files
3191            .iter()
3192            .find(|f| {
3193                f.file_name()
3194                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3195            })
3196            .expect("to be found");
3197        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3198
3199        assert_eq!(
3200            &uid_dockerfile,
3201            r#"ARG BASE_IMAGE
3202FROM $BASE_IMAGE
3203
3204USER root
3205
3206ARG REMOTE_USER
3207ARG NEW_UID
3208ARG NEW_GID
3209SHELL ["/bin/sh", "-c"]
3210RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3211	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3212	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3213	if [ -z "$OLD_UID" ]; then \
3214		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3215	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3216		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3217	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3218		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3219	else \
3220		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3221			FREE_GID=65532; \
3222			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3223			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3224			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3225		fi; \
3226		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3227		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3228		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3229			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3230		fi; \
3231		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3232	fi;
3233
3234ARG IMAGE_USER
3235USER $IMAGE_USER
3236
3237# Ensure that /etc/profile does not clobber the existing path
3238RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3239
3240ENV DOCKER_BUILDKIT=1
3241
3242ENV GOPATH=/go
3243ENV GOROOT=/usr/local/go
3244ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3245ENV VARIABLE_VALUE=value
3246"#
3247        );
3248
3249        let golang_install_wrapper = files
3250            .iter()
3251            .find(|f| {
3252                f.file_name()
3253                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3254                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3255            })
3256            .expect("to be found");
3257        let golang_install_wrapper = test_dependencies
3258            .fs
3259            .load(golang_install_wrapper)
3260            .await
3261            .unwrap();
3262        assert_eq!(
3263            &golang_install_wrapper,
3264            r#"#!/bin/sh
3265set -e
3266
3267on_exit () {
3268    [ $? -eq 0 ] && exit
3269    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3270}
3271
3272trap on_exit EXIT
3273
3274echo ===========================================================================
3275echo 'Feature       : go'
3276echo 'Id            : ghcr.io/devcontainers/features/go:1'
3277echo 'Options       :'
3278echo '    GOLANGCILINTVERSION=latest
3279    VERSION=latest'
3280echo ===========================================================================
3281
3282set -a
3283. ../devcontainer-features.builtin.env
3284. ./devcontainer-features.env
3285set +a
3286
3287chmod +x ./install.sh
3288./install.sh
3289"#
3290        );
3291
3292        let docker_commands = test_dependencies
3293            .command_runner
3294            .commands_by_program("docker");
3295
3296        let docker_run_command = docker_commands
3297            .iter()
3298            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3299            .expect("found");
3300
3301        assert_eq!(
3302            docker_run_command.args,
3303            vec![
3304                "run".to_string(),
3305                "--privileged".to_string(),
3306                "--sig-proxy=false".to_string(),
3307                "-d".to_string(),
3308                "--mount".to_string(),
3309                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3310                "--mount".to_string(),
3311                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3312                "--mount".to_string(),
3313                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3314                "-l".to_string(),
3315                "devcontainer.local_folder=/path/to/local/project".to_string(),
3316                "-l".to_string(),
3317                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3318                "-l".to_string(),
3319                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3320                "-p".to_string(),
3321                "8082:8082".to_string(),
3322                "-p".to_string(),
3323                "8083:8083".to_string(),
3324                "-p".to_string(),
3325                "8084:8084".to_string(),
3326                "-p".to_string(),
3327                "8085:8086".to_string(),
3328                "--entrypoint".to_string(),
3329                "/bin/sh".to_string(),
3330                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3331                "-c".to_string(),
3332                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3333                "-".to_string()
3334            ]
3335        );
3336
3337        let docker_exec_commands = test_dependencies
3338            .docker
3339            .exec_commands_recorded
3340            .lock()
3341            .unwrap();
3342
3343        assert!(docker_exec_commands.iter().all(|exec| {
3344            exec.env
3345                == HashMap::from([
3346                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3347                    (
3348                        "PATH".to_string(),
3349                        "/initial/path:/some/other/path".to_string(),
3350                    ),
3351                ])
3352        }))
3353    }
3354
3355    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3356    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3357    #[cfg(not(target_os = "windows"))]
3358    #[gpui::test]
3359    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3360        cx.executor().allow_parking();
3361        env_logger::try_init().ok();
3362        let given_devcontainer_contents = r#"
3363            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3364            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3365            {
3366              "features": {
3367                "ghcr.io/devcontainers/features/aws-cli:1": {},
3368                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3369              },
3370              "name": "Rust and PostgreSQL",
3371              "dockerComposeFile": "docker-compose.yml",
3372              "service": "app",
3373              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3374
3375              // Features to add to the dev container. More info: https://containers.dev/features.
3376              // "features": {},
3377
3378              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3379              "forwardPorts": [
3380                8083,
3381                "db:5432",
3382                "db:1234",
3383              ],
3384
3385              // Use 'postCreateCommand' to run commands after the container is created.
3386              // "postCreateCommand": "rustc --version",
3387
3388              // Configure tool-specific properties.
3389              // "customizations": {},
3390
3391              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3392              // "remoteUser": "root"
3393            }
3394            "#;
3395        let (test_dependencies, mut devcontainer_manifest) =
3396            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3397                .await
3398                .unwrap();
3399
3400        test_dependencies
3401            .fs
3402            .atomic_write(
3403                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3404                r#"
3405version: '3.8'
3406
3407volumes:
3408    postgres-data:
3409
3410services:
3411    app:
3412        build:
3413            context: .
3414            dockerfile: Dockerfile
3415        env_file:
3416            # Ensure that the variables in .env match the same variables in devcontainer.json
3417            - .env
3418
3419        volumes:
3420            - ../..:/workspaces:cached
3421
3422        # Overrides default command so things don't shut down after the process ends.
3423        command: sleep infinity
3424
3425        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3426        network_mode: service:db
3427
3428        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3429        # (Adding the "ports" property to this file will not forward from a Codespace.)
3430
3431    db:
3432        image: postgres:14.1
3433        restart: unless-stopped
3434        volumes:
3435            - postgres-data:/var/lib/postgresql/data
3436        env_file:
3437            # Ensure that the variables in .env match the same variables in devcontainer.json
3438            - .env
3439
3440        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3441        # (Adding the "ports" property to this file will not forward from a Codespace.)
3442                    "#.trim().to_string(),
3443            )
3444            .await
3445            .unwrap();
3446
3447        test_dependencies.fs.atomic_write(
3448            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3449            r#"
3450FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3451
3452# Include lld linker to improve build times either by using environment variable
3453# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3454RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3455    && apt-get -y install clang lld \
3456    && apt-get autoremove -y && apt-get clean -y
3457            "#.trim().to_string()).await.unwrap();
3458
3459        devcontainer_manifest.parse_nonremote_vars().unwrap();
3460
3461        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3462
3463        let files = test_dependencies.fs.files();
3464        let feature_dockerfile = files
3465            .iter()
3466            .find(|f| {
3467                f.file_name()
3468                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3469            })
3470            .expect("to be found");
3471        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3472        assert_eq!(
3473            &feature_dockerfile,
3474            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3475
3476FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3477
3478# Include lld linker to improve build times either by using environment variable
3479# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3480RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3481    && apt-get -y install clang lld \
3482    && apt-get autoremove -y && apt-get clean -y
3483FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3484
3485FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3486USER root
3487COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3488RUN chmod -R 0755 /tmp/build-features/
3489
3490FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3491
3492USER root
3493
3494RUN mkdir -p /tmp/dev-container-features
3495COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3496
3497RUN \
3498echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3499echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3500
3501
3502RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3503cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3504&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3505&& cd /tmp/dev-container-features/aws-cli_0 \
3506&& chmod +x ./devcontainer-features-install.sh \
3507&& ./devcontainer-features-install.sh \
3508&& rm -rf /tmp/dev-container-features/aws-cli_0
3509
3510RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3511cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3512&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3513&& cd /tmp/dev-container-features/docker-in-docker_1 \
3514&& chmod +x ./devcontainer-features-install.sh \
3515&& ./devcontainer-features-install.sh \
3516&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3517
3518
3519ARG _DEV_CONTAINERS_IMAGE_USER=root
3520USER $_DEV_CONTAINERS_IMAGE_USER
3521"#
3522        );
3523
3524        let uid_dockerfile = files
3525            .iter()
3526            .find(|f| {
3527                f.file_name()
3528                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3529            })
3530            .expect("to be found");
3531        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3532
3533        assert_eq!(
3534            &uid_dockerfile,
3535            r#"ARG BASE_IMAGE
3536FROM $BASE_IMAGE
3537
3538USER root
3539
3540ARG REMOTE_USER
3541ARG NEW_UID
3542ARG NEW_GID
3543SHELL ["/bin/sh", "-c"]
3544RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3545	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3546	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3547	if [ -z "$OLD_UID" ]; then \
3548		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3549	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3550		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3551	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3552		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3553	else \
3554		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3555			FREE_GID=65532; \
3556			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3557			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3558			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3559		fi; \
3560		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3561		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3562		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3563			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3564		fi; \
3565		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3566	fi;
3567
3568ARG IMAGE_USER
3569USER $IMAGE_USER
3570
3571# Ensure that /etc/profile does not clobber the existing path
3572RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3573
3574
3575ENV DOCKER_BUILDKIT=1
3576"#
3577        );
3578
3579        let build_override = files
3580            .iter()
3581            .find(|f| {
3582                f.file_name()
3583                    .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3584            })
3585            .expect("to be found");
3586        let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3587        let build_config: DockerComposeConfig =
3588            serde_json_lenient::from_str(&build_override).unwrap();
3589        let build_context = build_config
3590            .services
3591            .get("app")
3592            .and_then(|s| s.build.as_ref())
3593            .and_then(|b| b.context.clone())
3594            .expect("build override should have a context");
3595        assert_eq!(
3596            build_context, ".",
3597            "build override should preserve the original build context from docker-compose.yml"
3598        );
3599
3600        let runtime_override = files
3601            .iter()
3602            .find(|f| {
3603                f.file_name()
3604                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3605            })
3606            .expect("to be found");
3607        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3608
3609        let expected_runtime_override = DockerComposeConfig {
3610            name: None,
3611            services: HashMap::from([
3612                (
3613                    "app".to_string(),
3614                    DockerComposeService {
3615                        entrypoint: Some(vec![
3616                            "/bin/sh".to_string(),
3617                            "-c".to_string(),
3618                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3619                            "-".to_string(),
3620                        ]),
3621                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3622                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3623                        privileged: Some(true),
3624                        labels: Some(HashMap::from([
3625                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3626                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3627                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3628                        ])),
3629                        volumes: vec![
3630                            MountDefinition {
3631                                source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3632                                target: "/var/lib/docker".to_string(),
3633                                mount_type: Some("volume".to_string())
3634                            }
3635                        ],
3636                        ..Default::default()
3637                    },
3638                ),
3639                (
3640                    "db".to_string(),
3641                    DockerComposeService {
3642                        ports: vec![
3643                            DockerComposeServicePort {
3644                                target: "8083".to_string(),
3645                                published: "8083".to_string(),
3646                                ..Default::default()
3647                            },
3648                            DockerComposeServicePort {
3649                                target: "5432".to_string(),
3650                                published: "5432".to_string(),
3651                                ..Default::default()
3652                            },
3653                            DockerComposeServicePort {
3654                                target: "1234".to_string(),
3655                                published: "1234".to_string(),
3656                                ..Default::default()
3657                            },
3658                        ],
3659                        ..Default::default()
3660                    },
3661                ),
3662            ]),
3663            volumes: HashMap::from([(
3664                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3665                DockerComposeVolume {
3666                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3667                },
3668            )]),
3669        };
3670
3671        assert_eq!(
3672            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3673            expected_runtime_override
3674        )
3675    }
3676
3677    #[gpui::test]
3678    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3679        cx: &mut TestAppContext,
3680    ) {
3681        cx.executor().allow_parking();
3682        env_logger::try_init().ok();
3683        let given_devcontainer_contents = r#"
3684        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3685        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3686        {
3687          "features": {
3688            "ghcr.io/devcontainers/features/aws-cli:1": {},
3689            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3690          },
3691          "name": "Rust and PostgreSQL",
3692          "dockerComposeFile": "docker-compose.yml",
3693          "service": "app",
3694          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3695
3696          // Features to add to the dev container. More info: https://containers.dev/features.
3697          // "features": {},
3698
3699          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3700          "forwardPorts": [
3701            8083,
3702            "db:5432",
3703            "db:1234",
3704          ],
3705          "updateRemoteUserUID": false,
3706          "appPort": "8084",
3707
3708          // Use 'postCreateCommand' to run commands after the container is created.
3709          // "postCreateCommand": "rustc --version",
3710
3711          // Configure tool-specific properties.
3712          // "customizations": {},
3713
3714          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3715          // "remoteUser": "root"
3716        }
3717        "#;
3718        let (test_dependencies, mut devcontainer_manifest) =
3719            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3720                .await
3721                .unwrap();
3722
3723        test_dependencies
3724        .fs
3725        .atomic_write(
3726            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3727            r#"
3728version: '3.8'
3729
3730volumes:
3731postgres-data:
3732
3733services:
3734app:
3735    build:
3736        context: .
3737        dockerfile: Dockerfile
3738    env_file:
3739        # Ensure that the variables in .env match the same variables in devcontainer.json
3740        - .env
3741
3742    volumes:
3743        - ../..:/workspaces:cached
3744
3745    # Overrides default command so things don't shut down after the process ends.
3746    command: sleep infinity
3747
3748    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3749    network_mode: service:db
3750
3751    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3752    # (Adding the "ports" property to this file will not forward from a Codespace.)
3753
3754db:
3755    image: postgres:14.1
3756    restart: unless-stopped
3757    volumes:
3758        - postgres-data:/var/lib/postgresql/data
3759    env_file:
3760        # Ensure that the variables in .env match the same variables in devcontainer.json
3761        - .env
3762
3763    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3764    # (Adding the "ports" property to this file will not forward from a Codespace.)
3765                "#.trim().to_string(),
3766        )
3767        .await
3768        .unwrap();
3769
3770        test_dependencies.fs.atomic_write(
3771        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3772        r#"
3773FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3774
3775# Include lld linker to improve build times either by using environment variable
3776# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3777RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3778&& apt-get -y install clang lld \
3779&& apt-get autoremove -y && apt-get clean -y
3780        "#.trim().to_string()).await.unwrap();
3781
3782        devcontainer_manifest.parse_nonremote_vars().unwrap();
3783
3784        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3785
3786        let files = test_dependencies.fs.files();
3787        let feature_dockerfile = files
3788            .iter()
3789            .find(|f| {
3790                f.file_name()
3791                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3792            })
3793            .expect("to be found");
3794        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3795        assert_eq!(
3796            &feature_dockerfile,
3797            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3798
3799FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3800
3801# Include lld linker to improve build times either by using environment variable
3802# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3803RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3804&& apt-get -y install clang lld \
3805&& apt-get autoremove -y && apt-get clean -y
3806FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3807
3808FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3809USER root
3810COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3811RUN chmod -R 0755 /tmp/build-features/
3812
3813FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3814
3815USER root
3816
3817RUN mkdir -p /tmp/dev-container-features
3818COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3819
3820RUN \
3821echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3822echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3823
3824
3825RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3826cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3827&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3828&& cd /tmp/dev-container-features/aws-cli_0 \
3829&& chmod +x ./devcontainer-features-install.sh \
3830&& ./devcontainer-features-install.sh \
3831&& rm -rf /tmp/dev-container-features/aws-cli_0
3832
3833RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3834cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3835&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3836&& cd /tmp/dev-container-features/docker-in-docker_1 \
3837&& chmod +x ./devcontainer-features-install.sh \
3838&& ./devcontainer-features-install.sh \
3839&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3840
3841
3842ARG _DEV_CONTAINERS_IMAGE_USER=root
3843USER $_DEV_CONTAINERS_IMAGE_USER
3844
3845# Ensure that /etc/profile does not clobber the existing path
3846RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3847
3848
3849ENV DOCKER_BUILDKIT=1
3850"#
3851        );
3852    }
3853
3854    #[cfg(not(target_os = "windows"))]
3855    #[gpui::test]
3856    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3857        cx.executor().allow_parking();
3858        env_logger::try_init().ok();
3859        let given_devcontainer_contents = r#"
3860        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3861        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3862        {
3863          "features": {
3864            "ghcr.io/devcontainers/features/aws-cli:1": {},
3865            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3866          },
3867          "name": "Rust and PostgreSQL",
3868          "dockerComposeFile": "docker-compose.yml",
3869          "service": "app",
3870          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3871
3872          // Features to add to the dev container. More info: https://containers.dev/features.
3873          // "features": {},
3874
3875          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3876          // "forwardPorts": [5432],
3877
3878          // Use 'postCreateCommand' to run commands after the container is created.
3879          // "postCreateCommand": "rustc --version",
3880
3881          // Configure tool-specific properties.
3882          // "customizations": {},
3883
3884          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3885          // "remoteUser": "root"
3886        }
3887        "#;
3888        let mut fake_docker = FakeDocker::new();
3889        fake_docker.set_podman(true);
3890        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3891            cx,
3892            FakeFs::new(cx.executor()),
3893            fake_http_client(),
3894            Arc::new(fake_docker),
3895            Arc::new(TestCommandRunner::new()),
3896            HashMap::new(),
3897            given_devcontainer_contents,
3898        )
3899        .await
3900        .unwrap();
3901
3902        test_dependencies
3903        .fs
3904        .atomic_write(
3905            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3906            r#"
3907version: '3.8'
3908
3909volumes:
3910postgres-data:
3911
3912services:
3913app:
3914build:
3915    context: .
3916    dockerfile: Dockerfile
3917env_file:
3918    # Ensure that the variables in .env match the same variables in devcontainer.json
3919    - .env
3920
3921volumes:
3922    - ../..:/workspaces:cached
3923
3924# Overrides default command so things don't shut down after the process ends.
3925command: sleep infinity
3926
3927# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3928network_mode: service:db
3929
3930# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3931# (Adding the "ports" property to this file will not forward from a Codespace.)
3932
3933db:
3934image: postgres:14.1
3935restart: unless-stopped
3936volumes:
3937    - postgres-data:/var/lib/postgresql/data
3938env_file:
3939    # Ensure that the variables in .env match the same variables in devcontainer.json
3940    - .env
3941
3942# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3943# (Adding the "ports" property to this file will not forward from a Codespace.)
3944                "#.trim().to_string(),
3945        )
3946        .await
3947        .unwrap();
3948
3949        test_dependencies.fs.atomic_write(
3950        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3951        r#"
3952FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3953
3954# Include lld linker to improve build times either by using environment variable
3955# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3956RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3957&& apt-get -y install clang lld \
3958&& apt-get autoremove -y && apt-get clean -y
3959        "#.trim().to_string()).await.unwrap();
3960
3961        devcontainer_manifest.parse_nonremote_vars().unwrap();
3962
3963        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3964
3965        let files = test_dependencies.fs.files();
3966
3967        let feature_dockerfile = files
3968            .iter()
3969            .find(|f| {
3970                f.file_name()
3971                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3972            })
3973            .expect("to be found");
3974        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3975        assert_eq!(
3976            &feature_dockerfile,
3977            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3978
3979FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3980
3981# Include lld linker to improve build times either by using environment variable
3982# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3983RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3984&& apt-get -y install clang lld \
3985&& apt-get autoremove -y && apt-get clean -y
3986FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3987
3988FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3989
3990FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3991USER root
3992COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3993RUN chmod -R 0755 /tmp/build-features/
3994
3995FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3996
3997USER root
3998
3999RUN mkdir -p /tmp/dev-container-features
4000COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4001
4002RUN \
4003echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4004echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4005
4006
4007COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4008RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4009&& cd /tmp/dev-container-features/aws-cli_0 \
4010&& chmod +x ./devcontainer-features-install.sh \
4011&& ./devcontainer-features-install.sh
4012
4013COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4014RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4015&& cd /tmp/dev-container-features/docker-in-docker_1 \
4016&& chmod +x ./devcontainer-features-install.sh \
4017&& ./devcontainer-features-install.sh
4018
4019
4020ARG _DEV_CONTAINERS_IMAGE_USER=root
4021USER $_DEV_CONTAINERS_IMAGE_USER
4022"#
4023        );
4024
4025        let uid_dockerfile = files
4026            .iter()
4027            .find(|f| {
4028                f.file_name()
4029                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4030            })
4031            .expect("to be found");
4032        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4033
4034        assert_eq!(
4035            &uid_dockerfile,
4036            r#"ARG BASE_IMAGE
4037FROM $BASE_IMAGE
4038
4039USER root
4040
4041ARG REMOTE_USER
4042ARG NEW_UID
4043ARG NEW_GID
4044SHELL ["/bin/sh", "-c"]
4045RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4046	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4047	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4048	if [ -z "$OLD_UID" ]; then \
4049		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4050	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4051		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4052	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4053		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4054	else \
4055		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4056			FREE_GID=65532; \
4057			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4058			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4059			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4060		fi; \
4061		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4062		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4063		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4064			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4065		fi; \
4066		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4067	fi;
4068
4069ARG IMAGE_USER
4070USER $IMAGE_USER
4071
4072# Ensure that /etc/profile does not clobber the existing path
4073RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4074
4075
4076ENV DOCKER_BUILDKIT=1
4077"#
4078        );
4079    }
4080
4081    #[gpui::test]
4082    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4083        cx.executor().allow_parking();
4084        env_logger::try_init().ok();
4085        let given_devcontainer_contents = r#"
4086            /*---------------------------------------------------------------------------------------------
4087             *  Copyright (c) Microsoft Corporation. All rights reserved.
4088             *  Licensed under the MIT License. See License.txt in the project root for license information.
4089             *--------------------------------------------------------------------------------------------*/
4090            {
4091              "name": "cli-${devcontainerId}",
4092              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4093              "build": {
4094                "dockerfile": "Dockerfile",
4095                "args": {
4096                  "VARIANT": "18-bookworm",
4097                  "FOO": "bar",
4098                },
4099                "target": "development",
4100              },
4101              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4102              "workspaceFolder": "/workspace2",
4103              "mounts": [
4104                // Keep command history across instances
4105                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4106              ],
4107
4108              "forwardPorts": [
4109                8082,
4110                8083,
4111              ],
4112              "appPort": "8084",
4113              "updateRemoteUserUID": false,
4114
4115              "containerEnv": {
4116                "VARIABLE_VALUE": "value",
4117              },
4118
4119              "initializeCommand": "touch IAM.md",
4120
4121              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4122
4123              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4124
4125              "postCreateCommand": {
4126                "yarn": "yarn install",
4127                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4128              },
4129
4130              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4131
4132              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4133
4134              "remoteUser": "node",
4135
4136              "remoteEnv": {
4137                "PATH": "${containerEnv:PATH}:/some/other/path",
4138                "OTHER_ENV": "other_env_value"
4139              },
4140
4141              "features": {
4142                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4143                  "moby": false,
4144                },
4145                "ghcr.io/devcontainers/features/go:1": {},
4146              },
4147
4148              "customizations": {
4149                "vscode": {
4150                  "extensions": [
4151                    "dbaeumer.vscode-eslint",
4152                    "GitHub.vscode-pull-request-github",
4153                  ],
4154                },
4155                "zed": {
4156                  "extensions": ["vue", "ruby"],
4157                },
4158                "codespaces": {
4159                  "repositories": {
4160                    "devcontainers/features": {
4161                      "permissions": {
4162                        "contents": "write",
4163                        "workflows": "write",
4164                      },
4165                    },
4166                  },
4167                },
4168              },
4169            }
4170            "#;
4171
4172        let (test_dependencies, mut devcontainer_manifest) =
4173            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4174                .await
4175                .unwrap();
4176
4177        test_dependencies
4178            .fs
4179            .atomic_write(
4180                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4181                r#"
4182#  Copyright (c) Microsoft Corporation. All rights reserved.
4183#  Licensed under the MIT License. See License.txt in the project root for license information.
4184ARG VARIANT="16-bullseye"
4185FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4186FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4187
4188RUN mkdir -p /workspaces && chown node:node /workspaces
4189
4190ARG USERNAME=node
4191USER $USERNAME
4192
4193# Save command line history
4194RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4195&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4196&& mkdir -p /home/$USERNAME/commandhistory \
4197&& touch /home/$USERNAME/commandhistory/.bash_history \
4198&& chown -R $USERNAME /home/$USERNAME/commandhistory
4199                    "#.trim().to_string(),
4200            )
4201            .await
4202            .unwrap();
4203
4204        devcontainer_manifest.parse_nonremote_vars().unwrap();
4205
4206        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4207
4208        assert_eq!(
4209            devcontainer_up.extension_ids,
4210            vec!["vue".to_string(), "ruby".to_string()]
4211        );
4212
4213        let files = test_dependencies.fs.files();
4214        let feature_dockerfile = files
4215            .iter()
4216            .find(|f| {
4217                f.file_name()
4218                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4219            })
4220            .expect("to be found");
4221        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4222        assert_eq!(
4223            &feature_dockerfile,
4224            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4225
4226#  Copyright (c) Microsoft Corporation. All rights reserved.
4227#  Licensed under the MIT License. See License.txt in the project root for license information.
4228ARG VARIANT="16-bullseye"
4229FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4230FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4231
4232RUN mkdir -p /workspaces && chown node:node /workspaces
4233
4234ARG USERNAME=node
4235USER $USERNAME
4236
4237# Save command line history
4238RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4239&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4240&& mkdir -p /home/$USERNAME/commandhistory \
4241&& touch /home/$USERNAME/commandhistory/.bash_history \
4242&& chown -R $USERNAME /home/$USERNAME/commandhistory
4243FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4244
4245FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4246USER root
4247COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4248RUN chmod -R 0755 /tmp/build-features/
4249
4250FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4251
4252USER root
4253
4254RUN mkdir -p /tmp/dev-container-features
4255COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4256
4257RUN \
4258echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4259echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4260
4261
4262RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4263cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4264&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4265&& cd /tmp/dev-container-features/docker-in-docker_0 \
4266&& chmod +x ./devcontainer-features-install.sh \
4267&& ./devcontainer-features-install.sh \
4268&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4269
4270RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4271cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4272&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4273&& cd /tmp/dev-container-features/go_1 \
4274&& chmod +x ./devcontainer-features-install.sh \
4275&& ./devcontainer-features-install.sh \
4276&& rm -rf /tmp/dev-container-features/go_1
4277
4278
4279ARG _DEV_CONTAINERS_IMAGE_USER=root
4280USER $_DEV_CONTAINERS_IMAGE_USER
4281
4282# Ensure that /etc/profile does not clobber the existing path
4283RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4284
4285ENV DOCKER_BUILDKIT=1
4286
4287ENV GOPATH=/go
4288ENV GOROOT=/usr/local/go
4289ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4290ENV VARIABLE_VALUE=value
4291"#
4292        );
4293
4294        let golang_install_wrapper = files
4295            .iter()
4296            .find(|f| {
4297                f.file_name()
4298                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4299                    && f.to_str().is_some_and(|s| s.contains("go_"))
4300            })
4301            .expect("to be found");
4302        let golang_install_wrapper = test_dependencies
4303            .fs
4304            .load(golang_install_wrapper)
4305            .await
4306            .unwrap();
4307        assert_eq!(
4308            &golang_install_wrapper,
4309            r#"#!/bin/sh
4310set -e
4311
4312on_exit () {
4313    [ $? -eq 0 ] && exit
4314    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4315}
4316
4317trap on_exit EXIT
4318
4319echo ===========================================================================
4320echo 'Feature       : go'
4321echo 'Id            : ghcr.io/devcontainers/features/go:1'
4322echo 'Options       :'
4323echo '    GOLANGCILINTVERSION=latest
4324    VERSION=latest'
4325echo ===========================================================================
4326
4327set -a
4328. ../devcontainer-features.builtin.env
4329. ./devcontainer-features.env
4330set +a
4331
4332chmod +x ./install.sh
4333./install.sh
4334"#
4335        );
4336
4337        let docker_commands = test_dependencies
4338            .command_runner
4339            .commands_by_program("docker");
4340
4341        let docker_run_command = docker_commands
4342            .iter()
4343            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4344
4345        assert!(docker_run_command.is_some());
4346
4347        let docker_exec_commands = test_dependencies
4348            .docker
4349            .exec_commands_recorded
4350            .lock()
4351            .unwrap();
4352
4353        assert!(docker_exec_commands.iter().all(|exec| {
4354            exec.env
4355                == HashMap::from([
4356                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4357                    (
4358                        "PATH".to_string(),
4359                        "/initial/path:/some/other/path".to_string(),
4360                    ),
4361                ])
4362        }))
4363    }
4364
4365    #[cfg(not(target_os = "windows"))]
4366    #[gpui::test]
4367    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4368        cx.executor().allow_parking();
4369        env_logger::try_init().ok();
4370        let given_devcontainer_contents = r#"
4371            {
4372              "name": "cli-${devcontainerId}",
4373              "image": "test_image:latest",
4374            }
4375            "#;
4376
4377        let (test_dependencies, mut devcontainer_manifest) =
4378            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4379                .await
4380                .unwrap();
4381
4382        devcontainer_manifest.parse_nonremote_vars().unwrap();
4383
4384        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4385
4386        let files = test_dependencies.fs.files();
4387        let uid_dockerfile = files
4388            .iter()
4389            .find(|f| {
4390                f.file_name()
4391                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4392            })
4393            .expect("to be found");
4394        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4395
4396        assert_eq!(
4397            &uid_dockerfile,
4398            r#"ARG BASE_IMAGE
4399FROM $BASE_IMAGE
4400
4401USER root
4402
4403ARG REMOTE_USER
4404ARG NEW_UID
4405ARG NEW_GID
4406SHELL ["/bin/sh", "-c"]
4407RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4408	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4409	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4410	if [ -z "$OLD_UID" ]; then \
4411		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4412	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4413		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4414	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4415		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4416	else \
4417		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4418			FREE_GID=65532; \
4419			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4420			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4421			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4422		fi; \
4423		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4424		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4425		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4426			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4427		fi; \
4428		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4429	fi;
4430
4431ARG IMAGE_USER
4432USER $IMAGE_USER
4433
4434# Ensure that /etc/profile does not clobber the existing path
4435RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4436"#
4437        );
4438    }
4439
4440    #[cfg(not(target_os = "windows"))]
4441    #[gpui::test]
4442    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4443        cx.executor().allow_parking();
4444        env_logger::try_init().ok();
4445        let given_devcontainer_contents = r#"
4446            {
4447              "name": "cli-${devcontainerId}",
4448              "dockerComposeFile": "docker-compose-plain.yml",
4449              "service": "app",
4450            }
4451            "#;
4452
4453        let (test_dependencies, mut devcontainer_manifest) =
4454            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4455                .await
4456                .unwrap();
4457
4458        test_dependencies
4459            .fs
4460            .atomic_write(
4461                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4462                r#"
4463services:
4464    app:
4465        image: test_image:latest
4466        command: sleep infinity
4467        volumes:
4468            - ..:/workspace:cached
4469                "#
4470                .trim()
4471                .to_string(),
4472            )
4473            .await
4474            .unwrap();
4475
4476        devcontainer_manifest.parse_nonremote_vars().unwrap();
4477
4478        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4479
4480        let files = test_dependencies.fs.files();
4481        let uid_dockerfile = files
4482            .iter()
4483            .find(|f| {
4484                f.file_name()
4485                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4486            })
4487            .expect("to be found");
4488        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4489
4490        assert_eq!(
4491            &uid_dockerfile,
4492            r#"ARG BASE_IMAGE
4493FROM $BASE_IMAGE
4494
4495USER root
4496
4497ARG REMOTE_USER
4498ARG NEW_UID
4499ARG NEW_GID
4500SHELL ["/bin/sh", "-c"]
4501RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4502	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4503	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4504	if [ -z "$OLD_UID" ]; then \
4505		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4506	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4507		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4508	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4509		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4510	else \
4511		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4512			FREE_GID=65532; \
4513			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4514			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4515			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4516		fi; \
4517		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4518		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4519		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4520			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4521		fi; \
4522		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4523	fi;
4524
4525ARG IMAGE_USER
4526USER $IMAGE_USER
4527
4528# Ensure that /etc/profile does not clobber the existing path
4529RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4530"#
4531        );
4532    }
4533
4534    #[gpui::test]
4535    async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4536        cx.executor().allow_parking();
4537        env_logger::try_init().ok();
4538        let given_devcontainer_contents = r#"
4539            {
4540              "name": "cli-${devcontainerId}",
4541              "build": {
4542                "dockerfile": "Dockerfile",
4543                "args": {
4544                    "VERSION": "1.22",
4545                }
4546              },
4547            }
4548            "#;
4549
4550        let (test_dependencies, mut devcontainer_manifest) =
4551            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4552                .await
4553                .unwrap();
4554
4555        test_dependencies
4556            .fs
4557            .atomic_write(
4558                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4559                r#"
4560FROM dontgrabme as build_context
4561ARG VERSION=1.21
4562ARG REPOSITORY=mybuild
4563ARG REGISTRY=docker.io/stuff
4564
4565ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4566
4567FROM ${IMAGE} AS devcontainer
4568                    "#
4569                .trim()
4570                .to_string(),
4571            )
4572            .await
4573            .unwrap();
4574
4575        devcontainer_manifest.parse_nonremote_vars().unwrap();
4576
4577        let dockerfile_contents = devcontainer_manifest
4578            .expanded_dockerfile_content()
4579            .await
4580            .unwrap();
4581        let base_image = image_from_dockerfile(
4582            dockerfile_contents,
4583            &devcontainer_manifest
4584                .dev_container()
4585                .build
4586                .as_ref()
4587                .and_then(|b| b.target.clone()),
4588        )
4589        .unwrap();
4590
4591        assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4592    }
4593
4594    #[gpui::test]
4595    async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4596        cx.executor().allow_parking();
4597        env_logger::try_init().ok();
4598        let given_devcontainer_contents = r#"
4599            {
4600              "name": "cli-${devcontainerId}",
4601              "build": {
4602                "dockerfile": "Dockerfile",
4603                "args": {
4604                    "VERSION": "1.22",
4605                },
4606                "target": "development"
4607              },
4608            }
4609            "#;
4610
4611        let (test_dependencies, mut devcontainer_manifest) =
4612            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4613                .await
4614                .unwrap();
4615
4616        test_dependencies
4617            .fs
4618            .atomic_write(
4619                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4620                r#"
4621FROM dontgrabme as build_context
4622ARG VERSION=1.21
4623ARG REPOSITORY=mybuild
4624ARG REGISTRY=docker.io/stuff
4625
4626ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4627ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4628
4629FROM ${DEV_IMAGE} AS development
4630FROM ${IMAGE} AS production
4631                    "#
4632                .trim()
4633                .to_string(),
4634            )
4635            .await
4636            .unwrap();
4637
4638        devcontainer_manifest.parse_nonremote_vars().unwrap();
4639
4640        let dockerfile_contents = devcontainer_manifest
4641            .expanded_dockerfile_content()
4642            .await
4643            .unwrap();
4644        let base_image = image_from_dockerfile(
4645            dockerfile_contents,
4646            &devcontainer_manifest
4647                .dev_container()
4648                .build
4649                .as_ref()
4650                .and_then(|b| b.target.clone()),
4651        )
4652        .unwrap();
4653
4654        assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4655    }
4656
4657    #[gpui::test]
4658    async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4659        cx.executor().allow_parking();
4660        env_logger::try_init().ok();
4661        let given_devcontainer_contents = r#"
4662            {
4663              "name": "cli-${devcontainerId}",
4664              "build": {
4665                "dockerfile": "Dockerfile",
4666                "args": {
4667                    "JSON_ARG": "some-value",
4668                    "ELIXIR_VERSION": "1.21",
4669                }
4670              },
4671            }
4672            "#;
4673
4674        let (test_dependencies, mut devcontainer_manifest) =
4675            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4676                .await
4677                .unwrap();
4678
4679        test_dependencies
4680            .fs
4681            .atomic_write(
4682                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4683                r#"
4684ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4685ARG ELIXIR_VERSION=1.20.0-rc.4
4686ARG FOO=foo BAR=bar
4687ARG FOOBAR=${FOO}${BAR}
4688ARG OTP_VERSION=28.4.1
4689ARG DEBIAN_VERSION=trixie-20260316-slim
4690ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4691ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4692ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4693ARG FROM_JSON=${JSON_ARG}
4694
4695FROM ${IMAGE} AS devcontainer
4696                    "#
4697                .trim()
4698                .to_string(),
4699            )
4700            .await
4701            .unwrap();
4702
4703        devcontainer_manifest.parse_nonremote_vars().unwrap();
4704
4705        let expanded_dockerfile = devcontainer_manifest
4706            .expanded_dockerfile_content()
4707            .await
4708            .unwrap();
4709
4710        assert_eq!(
4711            &expanded_dockerfile,
4712            r#"
4713ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4714ARG ELIXIR_VERSION=1.20.0-rc.4
4715ARG FOO=foo BAR=bar
4716ARG FOOBAR=foobar
4717ARG OTP_VERSION=28.4.1
4718ARG DEBIAN_VERSION=trixie-20260316-slim
4719ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4720ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4721ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4722ARG FROM_JSON=some-value
4723
4724FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4725            "#
4726            .trim()
4727        )
4728    }
4729
4730    #[test]
4731    fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4732
4733    #[test]
4734    fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4735
4736    #[test]
4737    fn test_aliases_dockerfile_with_build_target_specified() {}
4738
4739    pub(crate) struct RecordedExecCommand {
4740        pub(crate) _container_id: String,
4741        pub(crate) _remote_folder: String,
4742        pub(crate) _user: String,
4743        pub(crate) env: HashMap<String, String>,
4744        pub(crate) _inner_command: Command,
4745    }
4746
4747    pub(crate) struct FakeDocker {
4748        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4749        podman: bool,
4750    }
4751
4752    impl FakeDocker {
4753        pub(crate) fn new() -> Self {
4754            Self {
4755                podman: false,
4756                exec_commands_recorded: Mutex::new(Vec::new()),
4757            }
4758        }
4759        #[cfg(not(target_os = "windows"))]
4760        fn set_podman(&mut self, podman: bool) {
4761            self.podman = podman;
4762        }
4763    }
4764
4765    #[async_trait]
4766    impl DockerClient for FakeDocker {
4767        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4768            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4769                return Ok(DockerInspect {
4770                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4771                        .to_string(),
4772                    config: DockerInspectConfig {
4773                        labels: DockerConfigLabels {
4774                            metadata: Some(vec![HashMap::from([(
4775                                "remoteUser".to_string(),
4776                                Value::String("node".to_string()),
4777                            )])]),
4778                        },
4779                        env: Vec::new(),
4780                        image_user: Some("root".to_string()),
4781                    },
4782                    mounts: None,
4783                    state: None,
4784                });
4785            }
4786            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4787                return Ok(DockerInspect {
4788                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4789                        .to_string(),
4790                    config: DockerInspectConfig {
4791                        labels: DockerConfigLabels {
4792                            metadata: Some(vec![HashMap::from([(
4793                                "remoteUser".to_string(),
4794                                Value::String("vscode".to_string()),
4795                            )])]),
4796                        },
4797                        image_user: Some("root".to_string()),
4798                        env: Vec::new(),
4799                    },
4800                    mounts: None,
4801                    state: None,
4802                });
4803            }
4804            if id.starts_with("cli_") {
4805                return Ok(DockerInspect {
4806                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4807                        .to_string(),
4808                    config: DockerInspectConfig {
4809                        labels: DockerConfigLabels {
4810                            metadata: Some(vec![HashMap::from([(
4811                                "remoteUser".to_string(),
4812                                Value::String("node".to_string()),
4813                            )])]),
4814                        },
4815                        image_user: Some("root".to_string()),
4816                        env: vec!["PATH=/initial/path".to_string()],
4817                    },
4818                    mounts: None,
4819                    state: None,
4820                });
4821            }
4822            if id == "found_docker_ps" {
4823                return Ok(DockerInspect {
4824                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4825                        .to_string(),
4826                    config: DockerInspectConfig {
4827                        labels: DockerConfigLabels {
4828                            metadata: Some(vec![HashMap::from([(
4829                                "remoteUser".to_string(),
4830                                Value::String("node".to_string()),
4831                            )])]),
4832                        },
4833                        image_user: Some("root".to_string()),
4834                        env: vec!["PATH=/initial/path".to_string()],
4835                    },
4836                    mounts: Some(vec![DockerInspectMount {
4837                        source: "/path/to/local/project".to_string(),
4838                        destination: "/workspaces/project".to_string(),
4839                    }]),
4840                    state: None,
4841                });
4842            }
4843            if id.starts_with("rust_a-") {
4844                return Ok(DockerInspect {
4845                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4846                        .to_string(),
4847                    config: DockerInspectConfig {
4848                        labels: DockerConfigLabels {
4849                            metadata: Some(vec![HashMap::from([(
4850                                "remoteUser".to_string(),
4851                                Value::String("vscode".to_string()),
4852                            )])]),
4853                        },
4854                        image_user: Some("root".to_string()),
4855                        env: Vec::new(),
4856                    },
4857                    mounts: None,
4858                    state: None,
4859                });
4860            }
4861            if id == "test_image:latest" {
4862                return Ok(DockerInspect {
4863                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4864                        .to_string(),
4865                    config: DockerInspectConfig {
4866                        labels: DockerConfigLabels {
4867                            metadata: Some(vec![HashMap::from([(
4868                                "remoteUser".to_string(),
4869                                Value::String("node".to_string()),
4870                            )])]),
4871                        },
4872                        env: Vec::new(),
4873                        image_user: Some("root".to_string()),
4874                    },
4875                    mounts: None,
4876                    state: None,
4877                });
4878            }
4879
4880            Err(DevContainerError::DockerNotAvailable)
4881        }
4882        async fn get_docker_compose_config(
4883            &self,
4884            config_files: &Vec<PathBuf>,
4885        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4886            if config_files.len() == 1
4887                && config_files.get(0)
4888                    == Some(&PathBuf::from(
4889                        "/path/to/local/project/.devcontainer/docker-compose.yml",
4890                    ))
4891            {
4892                return Ok(Some(DockerComposeConfig {
4893                    name: None,
4894                    services: HashMap::from([
4895                        (
4896                            "app".to_string(),
4897                            DockerComposeService {
4898                                build: Some(DockerComposeServiceBuild {
4899                                    context: Some(".".to_string()),
4900                                    dockerfile: Some("Dockerfile".to_string()),
4901                                    args: None,
4902                                    additional_contexts: None,
4903                                    target: None,
4904                                }),
4905                                volumes: vec![MountDefinition {
4906                                    source: Some("../..".to_string()),
4907                                    target: "/workspaces".to_string(),
4908                                    mount_type: Some("bind".to_string()),
4909                                }],
4910                                network_mode: Some("service:db".to_string()),
4911                                ..Default::default()
4912                            },
4913                        ),
4914                        (
4915                            "db".to_string(),
4916                            DockerComposeService {
4917                                image: Some("postgres:14.1".to_string()),
4918                                volumes: vec![MountDefinition {
4919                                    source: Some("postgres-data".to_string()),
4920                                    target: "/var/lib/postgresql/data".to_string(),
4921                                    mount_type: Some("volume".to_string()),
4922                                }],
4923                                env_file: Some(vec![".env".to_string()]),
4924                                ..Default::default()
4925                            },
4926                        ),
4927                    ]),
4928                    volumes: HashMap::from([(
4929                        "postgres-data".to_string(),
4930                        DockerComposeVolume::default(),
4931                    )]),
4932                }));
4933            }
4934            if config_files.len() == 1
4935                && config_files.get(0)
4936                    == Some(&PathBuf::from(
4937                        "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4938                    ))
4939            {
4940                return Ok(Some(DockerComposeConfig {
4941                    name: None,
4942                    services: HashMap::from([(
4943                        "app".to_string(),
4944                        DockerComposeService {
4945                            image: Some("test_image:latest".to_string()),
4946                            command: vec!["sleep".to_string(), "infinity".to_string()],
4947                            ..Default::default()
4948                        },
4949                    )]),
4950                    ..Default::default()
4951                }));
4952            }
4953            Err(DevContainerError::DockerNotAvailable)
4954        }
4955        async fn docker_compose_build(
4956            &self,
4957            _config_files: &Vec<PathBuf>,
4958            _project_name: &str,
4959        ) -> Result<(), DevContainerError> {
4960            Ok(())
4961        }
4962        async fn run_docker_exec(
4963            &self,
4964            container_id: &str,
4965            remote_folder: &str,
4966            user: &str,
4967            env: &HashMap<String, String>,
4968            inner_command: Command,
4969        ) -> Result<(), DevContainerError> {
4970            let mut record = self
4971                .exec_commands_recorded
4972                .lock()
4973                .expect("should be available");
4974            record.push(RecordedExecCommand {
4975                _container_id: container_id.to_string(),
4976                _remote_folder: remote_folder.to_string(),
4977                _user: user.to_string(),
4978                env: env.clone(),
4979                _inner_command: inner_command,
4980            });
4981            Ok(())
4982        }
4983        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4984            Err(DevContainerError::DockerNotAvailable)
4985        }
4986        async fn find_process_by_filters(
4987            &self,
4988            _filters: Vec<String>,
4989        ) -> Result<Option<DockerPs>, DevContainerError> {
4990            Ok(Some(DockerPs {
4991                id: "found_docker_ps".to_string(),
4992            }))
4993        }
4994        fn supports_compose_buildkit(&self) -> bool {
4995            !self.podman
4996        }
4997        fn docker_cli(&self) -> String {
4998            if self.podman {
4999                "podman".to_string()
5000            } else {
5001                "docker".to_string()
5002            }
5003        }
5004    }
5005
5006    #[derive(Debug, Clone)]
5007    pub(crate) struct TestCommand {
5008        pub(crate) program: String,
5009        pub(crate) args: Vec<String>,
5010    }
5011
5012    pub(crate) struct TestCommandRunner {
5013        commands_recorded: Mutex<Vec<TestCommand>>,
5014    }
5015
5016    impl TestCommandRunner {
5017        fn new() -> Self {
5018            Self {
5019                commands_recorded: Mutex::new(Vec::new()),
5020            }
5021        }
5022
5023        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5024            let record = self.commands_recorded.lock().expect("poisoned");
5025            record
5026                .iter()
5027                .filter(|r| r.program == program)
5028                .map(|r| r.clone())
5029                .collect()
5030        }
5031    }
5032
5033    #[async_trait]
5034    impl CommandRunner for TestCommandRunner {
5035        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5036            let mut record = self.commands_recorded.lock().expect("poisoned");
5037
5038            record.push(TestCommand {
5039                program: command.get_program().display().to_string(),
5040                args: command
5041                    .get_args()
5042                    .map(|a| a.display().to_string())
5043                    .collect(),
5044            });
5045
5046            Ok(Output {
5047                status: ExitStatus::default(),
5048                stdout: vec![],
5049                stderr: vec![],
5050            })
5051        }
5052    }
5053
5054    fn fake_http_client() -> Arc<dyn HttpClient> {
5055        FakeHttpClient::create(|request| async move {
5056            let (parts, _body) = request.into_parts();
5057            if parts.uri.path() == "/token" {
5058                let token_response = TokenResponse {
5059                    token: "token".to_string(),
5060                };
5061                return Ok(http::Response::builder()
5062                    .status(200)
5063                    .body(http_client::AsyncBody::from(
5064                        serde_json_lenient::to_string(&token_response).unwrap(),
5065                    ))
5066                    .unwrap());
5067            }
5068
5069            // OCI specific things
5070            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5071                let response = r#"
5072                    {
5073                        "schemaVersion": 2,
5074                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
5075                        "config": {
5076                            "mediaType": "application/vnd.devcontainers",
5077                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5078                            "size": 2
5079                        },
5080                        "layers": [
5081                            {
5082                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5083                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5084                                "size": 59392,
5085                                "annotations": {
5086                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5087                                }
5088                            }
5089                        ],
5090                        "annotations": {
5091                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5092                            "com.github.package.type": "devcontainer_feature"
5093                        }
5094                    }
5095                    "#;
5096                return Ok(http::Response::builder()
5097                    .status(200)
5098                    .body(http_client::AsyncBody::from(response))
5099                    .unwrap());
5100            }
5101
5102            if parts.uri.path()
5103                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5104            {
5105                let response = build_tarball(vec![
5106                    ("./NOTES.md", r#"
5107                        ## Limitations
5108
5109                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5110                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5111                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5112                          ```
5113                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5114                          ```
5115                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5116
5117
5118                        ## OS Support
5119
5120                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5121
5122                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5123
5124                        `bash` is required to execute the `install.sh` script."#),
5125                    ("./README.md", r#"
5126                        # Docker (Docker-in-Docker) (docker-in-docker)
5127
5128                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5129
5130                        ## Example Usage
5131
5132                        ```json
5133                        "features": {
5134                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5135                        }
5136                        ```
5137
5138                        ## Options
5139
5140                        | Options Id | Description | Type | Default Value |
5141                        |-----|-----|-----|-----|
5142                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5143                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5144                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5145                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5146                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5147                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5148                        | installDockerBuildx | Install Docker Buildx | boolean | true |
5149                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5150                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5151
5152                        ## Customizations
5153
5154                        ### VS Code Extensions
5155
5156                        - `ms-azuretools.vscode-containers`
5157
5158                        ## Limitations
5159
5160                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5161                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5162                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5163                          ```
5164                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5165                          ```
5166                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5167
5168
5169                        ## OS Support
5170
5171                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5172
5173                        `bash` is required to execute the `install.sh` script.
5174
5175
5176                        ---
5177
5178                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
5179                    ("./devcontainer-feature.json", r#"
5180                        {
5181                          "id": "docker-in-docker",
5182                          "version": "2.16.1",
5183                          "name": "Docker (Docker-in-Docker)",
5184                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5185                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5186                          "options": {
5187                            "version": {
5188                              "type": "string",
5189                              "proposals": [
5190                                "latest",
5191                                "none",
5192                                "20.10"
5193                              ],
5194                              "default": "latest",
5195                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5196                            },
5197                            "moby": {
5198                              "type": "boolean",
5199                              "default": true,
5200                              "description": "Install OSS Moby build instead of Docker CE"
5201                            },
5202                            "mobyBuildxVersion": {
5203                              "type": "string",
5204                              "default": "latest",
5205                              "description": "Install a specific version of moby-buildx when using Moby"
5206                            },
5207                            "dockerDashComposeVersion": {
5208                              "type": "string",
5209                              "enum": [
5210                                "none",
5211                                "v1",
5212                                "v2"
5213                              ],
5214                              "default": "v2",
5215                              "description": "Default version of Docker Compose (v1, v2 or none)"
5216                            },
5217                            "azureDnsAutoDetection": {
5218                              "type": "boolean",
5219                              "default": true,
5220                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5221                            },
5222                            "dockerDefaultAddressPool": {
5223                              "type": "string",
5224                              "default": "",
5225                              "proposals": [],
5226                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5227                            },
5228                            "installDockerBuildx": {
5229                              "type": "boolean",
5230                              "default": true,
5231                              "description": "Install Docker Buildx"
5232                            },
5233                            "installDockerComposeSwitch": {
5234                              "type": "boolean",
5235                              "default": false,
5236                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5237                            },
5238                            "disableIp6tables": {
5239                              "type": "boolean",
5240                              "default": false,
5241                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5242                            }
5243                          },
5244                          "entrypoint": "/usr/local/share/docker-init.sh",
5245                          "privileged": true,
5246                          "containerEnv": {
5247                            "DOCKER_BUILDKIT": "1"
5248                          },
5249                          "customizations": {
5250                            "vscode": {
5251                              "extensions": [
5252                                "ms-azuretools.vscode-containers"
5253                              ],
5254                              "settings": {
5255                                "github.copilot.chat.codeGeneration.instructions": [
5256                                  {
5257                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5258                                  }
5259                                ]
5260                              }
5261                            }
5262                          },
5263                          "mounts": [
5264                            {
5265                              "source": "dind-var-lib-docker-${devcontainerId}",
5266                              "target": "/var/lib/docker",
5267                              "type": "volume"
5268                            }
5269                          ],
5270                          "installsAfter": [
5271                            "ghcr.io/devcontainers/features/common-utils"
5272                          ]
5273                        }"#),
5274                    ("./install.sh", r#"
5275                    #!/usr/bin/env bash
5276                    #-------------------------------------------------------------------------------------------------------------
5277                    # Copyright (c) Microsoft Corporation. All rights reserved.
5278                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5279                    #-------------------------------------------------------------------------------------------------------------
5280                    #
5281                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5282                    # Maintainer: The Dev Container spec maintainers
5283
5284
5285                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5286                    USE_MOBY="${MOBY:-"true"}"
5287                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5288                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5289                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5290                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5291                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5292                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5293                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5294                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5295                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5296                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5297                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5298                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5299
5300                    # Default: Exit on any failure.
5301                    set -e
5302
5303                    # Clean up
5304                    rm -rf /var/lib/apt/lists/*
5305
5306                    # Setup STDERR.
5307                    err() {
5308                        echo "(!) $*" >&2
5309                    }
5310
5311                    if [ "$(id -u)" -ne 0 ]; then
5312                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5313                        exit 1
5314                    fi
5315
5316                    ###################
5317                    # Helper Functions
5318                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5319                    ###################
5320
5321                    # Determine the appropriate non-root user
5322                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5323                        USERNAME=""
5324                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5325                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5326                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5327                                USERNAME=${CURRENT_USER}
5328                                break
5329                            fi
5330                        done
5331                        if [ "${USERNAME}" = "" ]; then
5332                            USERNAME=root
5333                        fi
5334                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5335                        USERNAME=root
5336                    fi
5337
5338                    # Package manager update function
5339                    pkg_mgr_update() {
5340                        case ${ADJUSTED_ID} in
5341                            debian)
5342                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5343                                    echo "Running apt-get update..."
5344                                    apt-get update -y
5345                                fi
5346                                ;;
5347                            rhel)
5348                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5349                                    cache_check_dir="/var/cache/yum"
5350                                else
5351                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5352                                fi
5353                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5354                                    echo "Running ${PKG_MGR_CMD} makecache ..."
5355                                    ${PKG_MGR_CMD} makecache
5356                                fi
5357                                ;;
5358                        esac
5359                    }
5360
5361                    # Checks if packages are installed and installs them if not
5362                    check_packages() {
5363                        case ${ADJUSTED_ID} in
5364                            debian)
5365                                if ! dpkg -s "$@" > /dev/null 2>&1; then
5366                                    pkg_mgr_update
5367                                    apt-get -y install --no-install-recommends "$@"
5368                                fi
5369                                ;;
5370                            rhel)
5371                                if ! rpm -q "$@" > /dev/null 2>&1; then
5372                                    pkg_mgr_update
5373                                    ${PKG_MGR_CMD} -y install "$@"
5374                                fi
5375                                ;;
5376                        esac
5377                    }
5378
5379                    # Figure out correct version of a three part version number is not passed
5380                    find_version_from_git_tags() {
5381                        local variable_name=$1
5382                        local requested_version=${!variable_name}
5383                        if [ "${requested_version}" = "none" ]; then return; fi
5384                        local repository=$2
5385                        local prefix=${3:-"tags/v"}
5386                        local separator=${4:-"."}
5387                        local last_part_optional=${5:-"false"}
5388                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5389                            local escaped_separator=${separator//./\\.}
5390                            local last_part
5391                            if [ "${last_part_optional}" = "true" ]; then
5392                                last_part="(${escaped_separator}[0-9]+)?"
5393                            else
5394                                last_part="${escaped_separator}[0-9]+"
5395                            fi
5396                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5397                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5398                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5399                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5400                            else
5401                                set +e
5402                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5403                                set -e
5404                            fi
5405                        fi
5406                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5407                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5408                            exit 1
5409                        fi
5410                        echo "${variable_name}=${!variable_name}"
5411                    }
5412
5413                    # Use semver logic to decrement a version number then look for the closest match
5414                    find_prev_version_from_git_tags() {
5415                        local variable_name=$1
5416                        local current_version=${!variable_name}
5417                        local repository=$2
5418                        # Normally a "v" is used before the version number, but support alternate cases
5419                        local prefix=${3:-"tags/v"}
5420                        # Some repositories use "_" instead of "." for version number part separation, support that
5421                        local separator=${4:-"."}
5422                        # Some tools release versions that omit the last digit (e.g. go)
5423                        local last_part_optional=${5:-"false"}
5424                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5425                        local version_suffix_regex=$6
5426                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5427                        set +e
5428                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5429                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5430                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5431
5432                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5433                                ((major=major-1))
5434                                declare -g ${variable_name}="${major}"
5435                                # Look for latest version from previous major release
5436                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5437                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
5438                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5439                                ((minor=minor-1))
5440                                declare -g ${variable_name}="${major}.${minor}"
5441                                # Look for latest version from previous minor release
5442                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5443                            else
5444                                ((breakfix=breakfix-1))
5445                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5446                                    declare -g ${variable_name}="${major}.${minor}"
5447                                else
5448                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5449                                fi
5450                            fi
5451                        set -e
5452                    }
5453
5454                    # Function to fetch the version released prior to the latest version
5455                    get_previous_version() {
5456                        local url=$1
5457                        local repo_url=$2
5458                        local variable_name=$3
5459                        prev_version=${!variable_name}
5460
5461                        output=$(curl -s "$repo_url");
5462                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5463                          message=$(echo "$output" | jq -r '.message')
5464
5465                          if [[ $message == "API rate limit exceeded"* ]]; then
5466                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5467                                echo -e "\nAttempting to find latest version using GitHub tags."
5468                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
5469                                declare -g ${variable_name}="${prev_version}"
5470                           fi
5471                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5472                            echo -e "\nAttempting to find latest version using GitHub Api."
5473                            version=$(echo "$output" | jq -r '.[1].tag_name')
5474                            declare -g ${variable_name}="${version#v}"
5475                        fi
5476                        echo "${variable_name}=${!variable_name}"
5477                    }
5478
5479                    get_github_api_repo_url() {
5480                        local url=$1
5481                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5482                    }
5483
5484                    ###########################################
5485                    # Start docker-in-docker installation
5486                    ###########################################
5487
5488                    # Ensure apt is in non-interactive to avoid prompts
5489                    export DEBIAN_FRONTEND=noninteractive
5490
5491                    # Source /etc/os-release to get OS info
5492                    . /etc/os-release
5493
5494                    # Determine adjusted ID and package manager
5495                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5496                        ADJUSTED_ID="debian"
5497                        PKG_MGR_CMD="apt-get"
5498                        # Use dpkg for Debian-based systems
5499                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5500                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5501                        ADJUSTED_ID="rhel"
5502                        # Determine the appropriate package manager for RHEL-based systems
5503                        for pkg_mgr in tdnf dnf microdnf yum; do
5504                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
5505                                PKG_MGR_CMD="$pkg_mgr"
5506                                break
5507                            fi
5508                        done
5509
5510                        if [ -z "${PKG_MGR_CMD}" ]; then
5511                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5512                            exit 1
5513                        fi
5514
5515                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5516                    else
5517                        err "Linux distro ${ID} not supported."
5518                        exit 1
5519                    fi
5520
5521                    # Azure Linux specific setup
5522                    if [ "${ID}" = "azurelinux" ]; then
5523                        VERSION_CODENAME="azurelinux${VERSION_ID}"
5524                    fi
5525
5526                    # Prevent attempting to install Moby on Debian trixie (packages removed)
5527                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5528                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5529                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5530                        exit 1
5531                    fi
5532
5533                    # Check if distro is supported
5534                    if [ "${USE_MOBY}" = "true" ]; then
5535                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5536                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5537                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5538                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5539                                exit 1
5540                            fi
5541                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
5542                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5543                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5544                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5545                            else
5546                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5547                            fi
5548                        fi
5549                    else
5550                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5551                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5552                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5553                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5554                                exit 1
5555                            fi
5556                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5557                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5558
5559                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5560                        fi
5561                    fi
5562
5563                    # Install base dependencies
5564                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5565                    case ${ADJUSTED_ID} in
5566                        debian)
5567                            check_packages apt-transport-https $base_packages dirmngr
5568                            ;;
5569                        rhel)
5570                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
5571
5572                            ;;
5573                    esac
5574
5575                    # Install git if not already present
5576                    if ! command -v git >/dev/null 2>&1; then
5577                        check_packages git
5578                    fi
5579
5580                    # Update CA certificates to ensure HTTPS connections work properly
5581                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5582                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5583                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5584                        update-ca-certificates
5585                    fi
5586
5587                    # Swap to legacy iptables for compatibility (Debian only)
5588                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5589                        update-alternatives --set iptables /usr/sbin/iptables-legacy
5590                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5591                    fi
5592
5593                    # Set up the necessary repositories
5594                    if [ "${USE_MOBY}" = "true" ]; then
5595                        # Name of open source engine/cli
5596                        engine_package_name="moby-engine"
5597                        cli_package_name="moby-cli"
5598
5599                        case ${ADJUSTED_ID} in
5600                            debian)
5601                                # Import key safely and import Microsoft apt repo
5602                                {
5603                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5604                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5605                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5606                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5607                                ;;
5608                            rhel)
5609                                echo "(*) ${ID} detected - checking for Moby packages..."
5610
5611                                # Check if moby packages are available in default repos
5612                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5613                                    echo "(*) Using built-in ${ID} Moby packages"
5614                                else
5615                                    case "${ID}" in
5616                                        azurelinux)
5617                                            echo "(*) Moby packages not found in Azure Linux repositories"
5618                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5619                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5620                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5621                                            exit 1
5622                                            ;;
5623                                        mariner)
5624                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
5625                                            # Add Microsoft repository if packages aren't available locally
5626                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5627                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
5628                    [microsoft]
5629                    name=Microsoft Repository
5630                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5631                    enabled=1
5632                    gpgcheck=1
5633                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5634                    EOF
5635                                    # Verify packages are available after adding repo
5636                                    pkg_mgr_update
5637                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5638                                        echo "(*) Moby packages not found in Microsoft repository either"
5639                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5640                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5641                                        exit 1
5642                                    fi
5643                                    ;;
5644                                *)
5645                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5646                                    exit 1
5647                                    ;;
5648                                esac
5649                            fi
5650                            ;;
5651                        esac
5652                    else
5653                        # Name of licensed engine/cli
5654                        engine_package_name="docker-ce"
5655                        cli_package_name="docker-ce-cli"
5656                        case ${ADJUSTED_ID} in
5657                            debian)
5658                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5659                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5660                                ;;
5661                            rhel)
5662                                # Docker CE repository setup for RHEL-based systems
5663                                setup_docker_ce_repo() {
5664                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5665                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
5666                    [docker-ce-stable]
5667                    name=Docker CE Stable
5668                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5669                    enabled=1
5670                    gpgcheck=1
5671                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5672                    skip_if_unavailable=1
5673                    module_hotfixes=1
5674                    EOF
5675                                }
5676                                install_azure_linux_deps() {
5677                                    echo "(*) Installing device-mapper libraries for Docker CE..."
5678                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5679                                    echo "(*) Installing additional Docker CE dependencies..."
5680                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5681                                        echo "(*) Some optional dependencies could not be installed, continuing..."
5682                                    }
5683                                }
5684                                setup_selinux_context() {
5685                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5686                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
5687                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5688                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5689                                    fi
5690                                }
5691
5692                                # Special handling for RHEL Docker CE installation
5693                                case "${ID}" in
5694                                    azurelinux|mariner)
5695                                        echo "(*) ${ID} detected"
5696                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5697                                        echo "(*) Setting up Docker CE repository..."
5698
5699                                        setup_docker_ce_repo
5700                                        install_azure_linux_deps
5701
5702                                        if [ "${USE_MOBY}" != "true" ]; then
5703                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5704                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5705                                            setup_selinux_context
5706                                        else
5707                                            echo "(*) Using Moby - container-selinux not required"
5708                                        fi
5709                                        ;;
5710                                    *)
5711                                        # Standard RHEL/CentOS/Fedora approach
5712                                        if command -v dnf >/dev/null 2>&1; then
5713                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5714                                        elif command -v yum-config-manager >/dev/null 2>&1; then
5715                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5716                                        else
5717                                            # Manual fallback
5718                                            setup_docker_ce_repo
5719                                fi
5720                                ;;
5721                            esac
5722                            ;;
5723                        esac
5724                    fi
5725
5726                    # Refresh package database
5727                    case ${ADJUSTED_ID} in
5728                        debian)
5729                            apt-get update
5730                            ;;
5731                        rhel)
5732                            pkg_mgr_update
5733                            ;;
5734                    esac
5735
5736                    # Soft version matching
5737                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5738                        # Empty, meaning grab whatever "latest" is in apt repo
5739                        engine_version_suffix=""
5740                        cli_version_suffix=""
5741                    else
5742                        case ${ADJUSTED_ID} in
5743                            debian)
5744                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5745                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5746                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5747                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5748                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5749                        set +e # Don't exit if finding version fails - will handle gracefully
5750                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5751                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5752                        set -e
5753                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5754                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5755                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5756                            exit 1
5757                        fi
5758                        ;;
5759                    rhel)
5760                         # For RHEL-based systems, use dnf/yum to find versions
5761                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
5762                                set +e # Don't exit if finding version fails - will handle gracefully
5763                                    if [ "${USE_MOBY}" = "true" ]; then
5764                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5765                                    else
5766                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5767                                    fi
5768                                set -e
5769                                if [ -n "${available_versions}" ]; then
5770                                    engine_version_suffix="-${available_versions}"
5771                                    cli_version_suffix="-${available_versions}"
5772                                else
5773                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5774                                    engine_version_suffix=""
5775                                    cli_version_suffix=""
5776                                fi
5777                                ;;
5778                        esac
5779                    fi
5780
5781                    # Version matching for moby-buildx
5782                    if [ "${USE_MOBY}" = "true" ]; then
5783                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5784                            # Empty, meaning grab whatever "latest" is in apt repo
5785                            buildx_version_suffix=""
5786                        else
5787                            case ${ADJUSTED_ID} in
5788                                debian)
5789                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5790                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5791                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5792                            set +e
5793                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5794                            set -e
5795                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5796                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5797                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5798                                exit 1
5799                            fi
5800                            ;;
5801                                rhel)
5802                                    # For RHEL-based systems, try to find buildx version or use latest
5803                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5804                                    set +e
5805                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5806                                    set -e
5807                                    if [ -n "${available_buildx}" ]; then
5808                                        buildx_version_suffix="-${available_buildx}"
5809                                    else
5810                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5811                                        buildx_version_suffix=""
5812                                    fi
5813                                    ;;
5814                            esac
5815                            echo "buildx_version_suffix ${buildx_version_suffix}"
5816                        fi
5817                    fi
5818
5819                    # Install Docker / Moby CLI if not already installed
5820                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5821                        echo "Docker / Moby CLI and Engine already installed."
5822                    else
5823                            case ${ADJUSTED_ID} in
5824                            debian)
5825                                if [ "${USE_MOBY}" = "true" ]; then
5826                                    # Install engine
5827                                    set +e # Handle error gracefully
5828                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5829                                        exit_code=$?
5830                                    set -e
5831
5832                                    if [ ${exit_code} -ne 0 ]; then
5833                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5834                                        exit 1
5835                                    fi
5836
5837                                    # Install compose
5838                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5839                                else
5840                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5841                                    # Install compose
5842                                    apt-mark hold docker-ce docker-ce-cli
5843                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5844                                fi
5845                                ;;
5846                            rhel)
5847                                if [ "${USE_MOBY}" = "true" ]; then
5848                                    set +e # Handle error gracefully
5849                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5850                                        exit_code=$?
5851                                    set -e
5852
5853                                    if [ ${exit_code} -ne 0 ]; then
5854                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5855                                        exit 1
5856                                    fi
5857
5858                                    # Install compose
5859                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5860                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5861                                    fi
5862                                else
5863                                                   # Special handling for Azure Linux Docker CE installation
5864                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5865                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5866
5867                                        # Use rpm with --force and --nodeps for Azure Linux
5868                                        set +e  # Don't exit on error for this section
5869                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5870                                        install_result=$?
5871                                        set -e
5872
5873                                        if [ $install_result -ne 0 ]; then
5874                                            echo "(*) Standard installation failed, trying manual installation..."
5875
5876                                            echo "(*) Standard installation failed, trying manual installation..."
5877
5878                                            # Create directory for downloading packages
5879                                            mkdir -p /tmp/docker-ce-install
5880
5881                                            # Download packages manually using curl since tdnf doesn't support download
5882                                            echo "(*) Downloading Docker CE packages manually..."
5883
5884                                            # Get the repository baseurl
5885                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5886
5887                                            # Download packages directly
5888                                            cd /tmp/docker-ce-install
5889
5890                                            # Get package names with versions
5891                                            if [ -n "${cli_version_suffix}" ]; then
5892                                                docker_ce_version="${cli_version_suffix#-}"
5893                                                docker_cli_version="${engine_version_suffix#-}"
5894                                            else
5895                                                # Get latest version from repository
5896                                                docker_ce_version="latest"
5897                                            fi
5898
5899                                            echo "(*) Attempting to download Docker CE packages from repository..."
5900
5901                                            # Try to download latest packages if specific version fails
5902                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5903                                                # Fallback: try to get latest available version
5904                                                echo "(*) Specific version not found, trying latest..."
5905                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5906                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5907                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5908
5909                                                if [ -n "${latest_docker}" ]; then
5910                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5911                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5912                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5913                                                else
5914                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
5915                                                    echo "(*) Please check repository configuration or use 'moby': true"
5916                                                    exit 1
5917                                                fi
5918                                            fi
5919                                            # Install systemd libraries required by Docker CE
5920                                            echo "(*) Installing systemd libraries required by Docker CE..."
5921                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5922                                                echo "(*) WARNING: Could not install systemd libraries"
5923                                                echo "(*) Docker may fail to start without these"
5924                                            }
5925
5926                                            # Install with rpm --force --nodeps
5927                                            echo "(*) Installing Docker CE packages with dependency override..."
5928                                            rpm -Uvh --force --nodeps *.rpm
5929
5930                                            # Cleanup
5931                                            cd /
5932                                            rm -rf /tmp/docker-ce-install
5933
5934                                            echo "(*) Docker CE installation completed with dependency bypass"
5935                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5936                                        fi
5937                                    else
5938                                        # Standard installation for other RHEL-based systems
5939                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5940                                    fi
5941                                    # Install compose
5942                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5943                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5944                                    fi
5945                                fi
5946                                ;;
5947                        esac
5948                    fi
5949
5950                    echo "Finished installing docker / moby!"
5951
5952                    docker_home="/usr/libexec/docker"
5953                    cli_plugins_dir="${docker_home}/cli-plugins"
5954
5955                    # fallback for docker-compose
5956                    fallback_compose(){
5957                        local url=$1
5958                        local repo_url=$(get_github_api_repo_url "$url")
5959                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5960                        get_previous_version "${url}" "${repo_url}" compose_version
5961                        echo -e "\nAttempting to install v${compose_version}"
5962                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5963                    }
5964
5965                    # If 'docker-compose' command is to be included
5966                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5967                        case "${architecture}" in
5968                        amd64|x86_64) target_compose_arch=x86_64 ;;
5969                        arm64|aarch64) target_compose_arch=aarch64 ;;
5970                        *)
5971                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5972                            exit 1
5973                        esac
5974
5975                        docker_compose_path="/usr/local/bin/docker-compose"
5976                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5977                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5978                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
5979
5980                            if [ "${target_compose_arch}" = "x86_64" ]; then
5981                                echo "(*) Installing docker compose v1..."
5982                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5983                                chmod +x ${docker_compose_path}
5984
5985                                # Download the SHA256 checksum
5986                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5987                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5988                                sha256sum -c docker-compose.sha256sum --ignore-missing
5989                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5990                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5991                                exit 1
5992                            else
5993                                # Use pip to get a version that runs on this architecture
5994                                check_packages python3-minimal python3-pip libffi-dev python3-venv
5995                                echo "(*) Installing docker compose v1 via pip..."
5996                                export PYTHONUSERBASE=/usr/local
5997                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5998                            fi
5999                        else
6000                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6001                            docker_compose_url="https://github.com/docker/compose"
6002                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6003                            echo "(*) Installing docker-compose ${compose_version}..."
6004                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6005                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6006                                     fallback_compose "$docker_compose_url"
6007                            }
6008
6009                            chmod +x ${docker_compose_path}
6010
6011                            # Download the SHA256 checksum
6012                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6013                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6014                            sha256sum -c docker-compose.sha256sum --ignore-missing
6015
6016                            mkdir -p ${cli_plugins_dir}
6017                            cp ${docker_compose_path} ${cli_plugins_dir}
6018                        fi
6019                    fi
6020
6021                    # fallback method for compose-switch
6022                    fallback_compose-switch() {
6023                        local url=$1
6024                        local repo_url=$(get_github_api_repo_url "$url")
6025                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6026                        get_previous_version "$url" "$repo_url" compose_switch_version
6027                        echo -e "\nAttempting to install v${compose_switch_version}"
6028                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6029                    }
6030                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6031                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6032                        if type docker-compose > /dev/null 2>&1; then
6033                            echo "(*) Installing compose-switch..."
6034                            current_compose_path="$(command -v docker-compose)"
6035                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6036                            compose_switch_version="latest"
6037                            compose_switch_url="https://github.com/docker/compose-switch"
6038                            # Try to get latest version, fallback to known stable version if GitHub API fails
6039                            set +e
6040                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
6041                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6042                                echo "(*) GitHub API rate limited or failed, using fallback method"
6043                                fallback_compose-switch "$compose_switch_url"
6044                            fi
6045                            set -e
6046
6047                            # Map architecture for compose-switch downloads
6048                            case "${architecture}" in
6049                                amd64|x86_64) target_switch_arch=amd64 ;;
6050                                arm64|aarch64) target_switch_arch=arm64 ;;
6051                                *) target_switch_arch=${architecture} ;;
6052                            esac
6053                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6054                            chmod +x /usr/local/bin/compose-switch
6055                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6056                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6057                            mv "${current_compose_path}" "${target_compose_path}"
6058                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6059                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6060                        else
6061                            err "Skipping installation of compose-switch as docker compose is unavailable..."
6062                        fi
6063                    fi
6064
6065                    # If init file already exists, exit
6066                    if [ -f "/usr/local/share/docker-init.sh" ]; then
6067                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
6068                        # Clean up
6069                        rm -rf /var/lib/apt/lists/*
6070                        exit 0
6071                    fi
6072                    echo "docker-init doesn't exist, adding..."
6073
6074                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6075                            groupadd -r docker
6076                    fi
6077
6078                    usermod -aG docker ${USERNAME}
6079
6080                    # fallback for docker/buildx
6081                    fallback_buildx() {
6082                        local url=$1
6083                        local repo_url=$(get_github_api_repo_url "$url")
6084                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6085                        get_previous_version "$url" "$repo_url" buildx_version
6086                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6087                        echo -e "\nAttempting to install v${buildx_version}"
6088                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6089                    }
6090
6091                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6092                        buildx_version="latest"
6093                        docker_buildx_url="https://github.com/docker/buildx"
6094                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6095                        echo "(*) Installing buildx ${buildx_version}..."
6096
6097                          # Map architecture for buildx downloads
6098                        case "${architecture}" in
6099                            amd64|x86_64) target_buildx_arch=amd64 ;;
6100                            arm64|aarch64) target_buildx_arch=arm64 ;;
6101                            *) target_buildx_arch=${architecture} ;;
6102                        esac
6103
6104                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6105
6106                        cd /tmp
6107                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6108
6109                        docker_home="/usr/libexec/docker"
6110                        cli_plugins_dir="${docker_home}/cli-plugins"
6111
6112                        mkdir -p ${cli_plugins_dir}
6113                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6114                        chmod +x ${cli_plugins_dir}/docker-buildx
6115
6116                        chown -R "${USERNAME}:docker" "${docker_home}"
6117                        chmod -R g+r+w "${docker_home}"
6118                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6119                    fi
6120
6121                    DOCKER_DEFAULT_IP6_TABLES=""
6122                    if [ "$DISABLE_IP6_TABLES" == true ]; then
6123                        requested_version=""
6124                        # checking whether the version requested either is in semver format or just a number denoting the major version
6125                        # and, extracting the major version number out of the two scenarios
6126                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6127                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6128                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6129                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6130                            requested_version=$DOCKER_VERSION
6131                        fi
6132                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6133                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6134                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6135                        fi
6136                    fi
6137
6138                    if [ ! -d /usr/local/share ]; then
6139                        mkdir -p /usr/local/share
6140                    fi
6141
6142                    tee /usr/local/share/docker-init.sh > /dev/null \
6143                    << EOF
6144                    #!/bin/sh
6145                    #-------------------------------------------------------------------------------------------------------------
6146                    # Copyright (c) Microsoft Corporation. All rights reserved.
6147                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6148                    #-------------------------------------------------------------------------------------------------------------
6149
6150                    set -e
6151
6152                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6153                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6154                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6155                    EOF
6156
6157                    tee -a /usr/local/share/docker-init.sh > /dev/null \
6158                    << 'EOF'
6159                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6160                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6161                        find /run /var/run -iname 'docker*.pid' -delete || :
6162                        find /run /var/run -iname 'container*.pid' -delete || :
6163
6164                        # -- Start: dind wrapper script --
6165                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6166
6167                        export container=docker
6168
6169                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6170                            mount -t securityfs none /sys/kernel/security || {
6171                                echo >&2 'Could not mount /sys/kernel/security.'
6172                                echo >&2 'AppArmor detection and --privileged mode might break.'
6173                            }
6174                        fi
6175
6176                        # Mount /tmp (conditionally)
6177                        if ! mountpoint -q /tmp; then
6178                            mount -t tmpfs none /tmp
6179                        fi
6180
6181                        set_cgroup_nesting()
6182                        {
6183                            # cgroup v2: enable nesting
6184                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6185                                # move the processes from the root group to the /init group,
6186                                # otherwise writing subtree_control fails with EBUSY.
6187                                # An error during moving non-existent process (i.e., "cat") is ignored.
6188                                mkdir -p /sys/fs/cgroup/init
6189                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6190                                # enable controllers
6191                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6192                                    > /sys/fs/cgroup/cgroup.subtree_control
6193                            fi
6194                        }
6195
6196                        # Set cgroup nesting, retrying if necessary
6197                        retry_cgroup_nesting=0
6198
6199                        until [ "${retry_cgroup_nesting}" -eq "5" ];
6200                        do
6201                            set +e
6202                                set_cgroup_nesting
6203
6204                                if [ $? -ne 0 ]; then
6205                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6206                                else
6207                                    break
6208                                fi
6209
6210                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6211                            set -e
6212                        done
6213
6214                        # -- End: dind wrapper script --
6215
6216                        # Handle DNS
6217                        set +e
6218                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6219                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6220                            then
6221                                echo "Setting dockerd Azure DNS."
6222                                CUSTOMDNS="--dns 168.63.129.16"
6223                            else
6224                                echo "Not setting dockerd DNS manually."
6225                                CUSTOMDNS=""
6226                            fi
6227                        set -e
6228
6229                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6230                        then
6231                            DEFAULT_ADDRESS_POOL=""
6232                        else
6233                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6234                        fi
6235
6236                        # Start docker/moby engine
6237                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6238                    INNEREOF
6239                    )"
6240
6241                    sudo_if() {
6242                        COMMAND="$*"
6243
6244                        if [ "$(id -u)" -ne 0 ]; then
6245                            sudo $COMMAND
6246                        else
6247                            $COMMAND
6248                        fi
6249                    }
6250
6251                    retry_docker_start_count=0
6252                    docker_ok="false"
6253
6254                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
6255                    do
6256                        # Start using sudo if not invoked as root
6257                        if [ "$(id -u)" -ne 0 ]; then
6258                            sudo /bin/sh -c "${dockerd_start}"
6259                        else
6260                            eval "${dockerd_start}"
6261                        fi
6262
6263                        retry_count=0
6264                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
6265                        do
6266                            sleep 1s
6267                            set +e
6268                                docker info > /dev/null 2>&1 && docker_ok="true"
6269                            set -e
6270
6271                            retry_count=`expr $retry_count + 1`
6272                        done
6273
6274                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6275                            echo "(*) Failed to start docker, retrying..."
6276                            set +e
6277                                sudo_if pkill dockerd
6278                                sudo_if pkill containerd
6279                            set -e
6280                        fi
6281
6282                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
6283                    done
6284
6285                    # Execute whatever commands were passed in (if any). This allows us
6286                    # to set this script to ENTRYPOINT while still executing the default CMD.
6287                    exec "$@"
6288                    EOF
6289
6290                    chmod +x /usr/local/share/docker-init.sh
6291                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
6292
6293                    # Clean up
6294                    rm -rf /var/lib/apt/lists/*
6295
6296                    echo 'docker-in-docker-debian script has completed!'"#),
6297                ]).await;
6298
6299                return Ok(http::Response::builder()
6300                    .status(200)
6301                    .body(AsyncBody::from(response))
6302                    .unwrap());
6303            }
6304            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6305                let response = r#"
6306                    {
6307                        "schemaVersion": 2,
6308                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6309                        "config": {
6310                            "mediaType": "application/vnd.devcontainers",
6311                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6312                            "size": 2
6313                        },
6314                        "layers": [
6315                            {
6316                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6317                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6318                                "size": 20992,
6319                                "annotations": {
6320                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6321                                }
6322                            }
6323                        ],
6324                        "annotations": {
6325                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6326                            "com.github.package.type": "devcontainer_feature"
6327                        }
6328                    }
6329                    "#;
6330
6331                return Ok(http::Response::builder()
6332                    .status(200)
6333                    .body(http_client::AsyncBody::from(response))
6334                    .unwrap());
6335            }
6336            if parts.uri.path()
6337                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6338            {
6339                let response = build_tarball(vec![
6340                    ("./devcontainer-feature.json", r#"
6341                        {
6342                            "id": "go",
6343                            "version": "1.3.3",
6344                            "name": "Go",
6345                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6346                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6347                            "options": {
6348                                "version": {
6349                                    "type": "string",
6350                                    "proposals": [
6351                                        "latest",
6352                                        "none",
6353                                        "1.24",
6354                                        "1.23"
6355                                    ],
6356                                    "default": "latest",
6357                                    "description": "Select or enter a Go version to install"
6358                                },
6359                                "golangciLintVersion": {
6360                                    "type": "string",
6361                                    "default": "latest",
6362                                    "description": "Version of golangci-lint to install"
6363                                }
6364                            },
6365                            "init": true,
6366                            "customizations": {
6367                                "vscode": {
6368                                    "extensions": [
6369                                        "golang.Go"
6370                                    ],
6371                                    "settings": {
6372                                        "github.copilot.chat.codeGeneration.instructions": [
6373                                            {
6374                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6375                                            }
6376                                        ]
6377                                    }
6378                                }
6379                            },
6380                            "containerEnv": {
6381                                "GOROOT": "/usr/local/go",
6382                                "GOPATH": "/go",
6383                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6384                            },
6385                            "capAdd": [
6386                                "SYS_PTRACE"
6387                            ],
6388                            "securityOpt": [
6389                                "seccomp=unconfined"
6390                            ],
6391                            "installsAfter": [
6392                                "ghcr.io/devcontainers/features/common-utils"
6393                            ]
6394                        }
6395                        "#),
6396                    ("./install.sh", r#"
6397                    #!/usr/bin/env bash
6398                    #-------------------------------------------------------------------------------------------------------------
6399                    # Copyright (c) Microsoft Corporation. All rights reserved.
6400                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6401                    #-------------------------------------------------------------------------------------------------------------
6402                    #
6403                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6404                    # Maintainer: The VS Code and Codespaces Teams
6405
6406                    TARGET_GO_VERSION="${VERSION:-"latest"}"
6407                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6408
6409                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6410                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6411                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6412                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6413
6414                    # https://www.google.com/linuxrepositories/
6415                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6416
6417                    set -e
6418
6419                    if [ "$(id -u)" -ne 0 ]; then
6420                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6421                        exit 1
6422                    fi
6423
6424                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6425                    . /etc/os-release
6426                    # Get an adjusted ID independent of distro variants
6427                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6428                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6429                        ADJUSTED_ID="debian"
6430                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6431                        ADJUSTED_ID="rhel"
6432                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6433                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6434                        else
6435                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6436                        fi
6437                    else
6438                        echo "Linux distro ${ID} not supported."
6439                        exit 1
6440                    fi
6441
6442                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6443                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6444                        # Update the repo files to reference vault.centos.org.
6445                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6446                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6447                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6448                    fi
6449
6450                    # Setup INSTALL_CMD & PKG_MGR_CMD
6451                    if type apt-get > /dev/null 2>&1; then
6452                        PKG_MGR_CMD=apt-get
6453                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6454                    elif type microdnf > /dev/null 2>&1; then
6455                        PKG_MGR_CMD=microdnf
6456                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6457                    elif type dnf > /dev/null 2>&1; then
6458                        PKG_MGR_CMD=dnf
6459                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6460                    else
6461                        PKG_MGR_CMD=yum
6462                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6463                    fi
6464
6465                    # Clean up
6466                    clean_up() {
6467                        case ${ADJUSTED_ID} in
6468                            debian)
6469                                rm -rf /var/lib/apt/lists/*
6470                                ;;
6471                            rhel)
6472                                rm -rf /var/cache/dnf/* /var/cache/yum/*
6473                                rm -rf /tmp/yum.log
6474                                rm -rf ${GPG_INSTALL_PATH}
6475                                ;;
6476                        esac
6477                    }
6478                    clean_up
6479
6480
6481                    # Figure out correct version of a three part version number is not passed
6482                    find_version_from_git_tags() {
6483                        local variable_name=$1
6484                        local requested_version=${!variable_name}
6485                        if [ "${requested_version}" = "none" ]; then return; fi
6486                        local repository=$2
6487                        local prefix=${3:-"tags/v"}
6488                        local separator=${4:-"."}
6489                        local last_part_optional=${5:-"false"}
6490                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6491                            local escaped_separator=${separator//./\\.}
6492                            local last_part
6493                            if [ "${last_part_optional}" = "true" ]; then
6494                                last_part="(${escaped_separator}[0-9]+)?"
6495                            else
6496                                last_part="${escaped_separator}[0-9]+"
6497                            fi
6498                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6499                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6500                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6501                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6502                            else
6503                                set +e
6504                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6505                                set -e
6506                            fi
6507                        fi
6508                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6509                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6510                            exit 1
6511                        fi
6512                        echo "${variable_name}=${!variable_name}"
6513                    }
6514
6515                    pkg_mgr_update() {
6516                        case $ADJUSTED_ID in
6517                            debian)
6518                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6519                                    echo "Running apt-get update..."
6520                                    ${PKG_MGR_CMD} update -y
6521                                fi
6522                                ;;
6523                            rhel)
6524                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6525                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6526                                        echo "Running ${PKG_MGR_CMD} makecache ..."
6527                                        ${PKG_MGR_CMD} makecache
6528                                    fi
6529                                else
6530                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6531                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6532                                        set +e
6533                                        ${PKG_MGR_CMD} check-update
6534                                        rc=$?
6535                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6536                                            exit 1
6537                                        fi
6538                                        set -e
6539                                    fi
6540                                fi
6541                                ;;
6542                        esac
6543                    }
6544
6545                    # Checks if packages are installed and installs them if not
6546                    check_packages() {
6547                        case ${ADJUSTED_ID} in
6548                            debian)
6549                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6550                                    pkg_mgr_update
6551                                    ${INSTALL_CMD} "$@"
6552                                fi
6553                                ;;
6554                            rhel)
6555                                if ! rpm -q "$@" > /dev/null 2>&1; then
6556                                    pkg_mgr_update
6557                                    ${INSTALL_CMD} "$@"
6558                                fi
6559                                ;;
6560                        esac
6561                    }
6562
6563                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6564                    rm -f /etc/profile.d/00-restore-env.sh
6565                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6566                    chmod +x /etc/profile.d/00-restore-env.sh
6567
6568                    # Some distributions do not install awk by default (e.g. Mariner)
6569                    if ! type awk >/dev/null 2>&1; then
6570                        check_packages awk
6571                    fi
6572
6573                    # Determine the appropriate non-root user
6574                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6575                        USERNAME=""
6576                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6577                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6578                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6579                                USERNAME=${CURRENT_USER}
6580                                break
6581                            fi
6582                        done
6583                        if [ "${USERNAME}" = "" ]; then
6584                            USERNAME=root
6585                        fi
6586                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6587                        USERNAME=root
6588                    fi
6589
6590                    export DEBIAN_FRONTEND=noninteractive
6591
6592                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
6593
6594                    if [ $ADJUSTED_ID = "debian" ]; then
6595                        check_packages g++ libc6-dev
6596                    else
6597                        check_packages gcc-c++ glibc-devel
6598                    fi
6599                    # Install curl, git, other dependencies if missing
6600                    if ! type curl > /dev/null 2>&1; then
6601                        check_packages curl
6602                    fi
6603                    if ! type git > /dev/null 2>&1; then
6604                        check_packages git
6605                    fi
6606                    # Some systems, e.g. Mariner, still a few more packages
6607                    if ! type as > /dev/null 2>&1; then
6608                        check_packages binutils
6609                    fi
6610                    if ! [ -f /usr/include/linux/errno.h ]; then
6611                        check_packages kernel-headers
6612                    fi
6613                    # Minimal RHEL install may need findutils installed
6614                    if ! [ -f /usr/bin/find ]; then
6615                        check_packages findutils
6616                    fi
6617
6618                    # Get closest match for version number specified
6619                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6620
6621                    architecture="$(uname -m)"
6622                    case $architecture in
6623                        x86_64) architecture="amd64";;
6624                        aarch64 | armv8*) architecture="arm64";;
6625                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
6626                        i?86) architecture="386";;
6627                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6628                    esac
6629
6630                    # Install Go
6631                    umask 0002
6632                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6633                        groupadd -r golang
6634                    fi
6635                    usermod -a -G golang "${USERNAME}"
6636                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6637
6638                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6639                        # Use a temporary location for gpg keys to avoid polluting image
6640                        export GNUPGHOME="/tmp/tmp-gnupg"
6641                        mkdir -p ${GNUPGHOME}
6642                        chmod 700 ${GNUPGHOME}
6643                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6644                        gpg -q --import /tmp/tmp-gnupg/golang_key
6645                        echo "Downloading Go ${TARGET_GO_VERSION}..."
6646                        set +e
6647                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6648                        exit_code=$?
6649                        set -e
6650                        if [ "$exit_code" != "0" ]; then
6651                            echo "(!) Download failed."
6652                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6653                            set +e
6654                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6655                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6656                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6657                            # Handle Go's odd version pattern where "0" releases omit the last part
6658                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6659                                ((minor=minor-1))
6660                                TARGET_GO_VERSION="${major}.${minor}"
6661                                # Look for latest version from previous minor release
6662                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6663                            else
6664                                ((breakfix=breakfix-1))
6665                                if [ "${breakfix}" = "0" ]; then
6666                                    TARGET_GO_VERSION="${major}.${minor}"
6667                                else
6668                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6669                                fi
6670                            fi
6671                            set -e
6672                            echo "Trying ${TARGET_GO_VERSION}..."
6673                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6674                        fi
6675                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6676                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6677                        echo "Extracting Go ${TARGET_GO_VERSION}..."
6678                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6679                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6680                    else
6681                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6682                    fi
6683
6684                    # Install Go tools that are isImportant && !replacedByGopls based on
6685                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6686                    GO_TOOLS="\
6687                        golang.org/x/tools/gopls@latest \
6688                        honnef.co/go/tools/cmd/staticcheck@latest \
6689                        golang.org/x/lint/golint@latest \
6690                        github.com/mgechev/revive@latest \
6691                        github.com/go-delve/delve/cmd/dlv@latest \
6692                        github.com/fatih/gomodifytags@latest \
6693                        github.com/haya14busa/goplay/cmd/goplay@latest \
6694                        github.com/cweill/gotests/gotests@latest \
6695                        github.com/josharian/impl@latest"
6696
6697                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6698                        echo "Installing common Go tools..."
6699                        export PATH=${TARGET_GOROOT}/bin:${PATH}
6700                        export GOPATH=/tmp/gotools
6701                        export GOCACHE="${GOPATH}/cache"
6702
6703                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6704                        cd "${GOPATH}"
6705
6706                        # Use go get for versions of go under 1.16
6707                        go_install_command=install
6708                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6709                            export GO111MODULE=on
6710                            go_install_command=get
6711                            echo "Go version < 1.16, using go get."
6712                        fi
6713
6714                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6715
6716                        # Move Go tools into path
6717                        if [ -d "${GOPATH}/bin" ]; then
6718                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6719                        fi
6720
6721                        # Install golangci-lint from precompiled binaries
6722                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6723                            echo "Installing golangci-lint latest..."
6724                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6725                                sh -s -- -b "${TARGET_GOPATH}/bin"
6726                        else
6727                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6728                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6729                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6730                        fi
6731
6732                        # Remove Go tools temp directory
6733                        rm -rf "${GOPATH}"
6734                    fi
6735
6736
6737                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6738                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6739                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6740                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6741
6742                    # Clean up
6743                    clean_up
6744
6745                    echo "Done!"
6746                        "#),
6747                ])
6748                .await;
6749                return Ok(http::Response::builder()
6750                    .status(200)
6751                    .body(AsyncBody::from(response))
6752                    .unwrap());
6753            }
6754            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6755                let response = r#"
6756                    {
6757                        "schemaVersion": 2,
6758                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6759                        "config": {
6760                            "mediaType": "application/vnd.devcontainers",
6761                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6762                            "size": 2
6763                        },
6764                        "layers": [
6765                            {
6766                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6767                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6768                                "size": 19968,
6769                                "annotations": {
6770                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6771                                }
6772                            }
6773                        ],
6774                        "annotations": {
6775                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6776                            "com.github.package.type": "devcontainer_feature"
6777                        }
6778                    }"#;
6779                return Ok(http::Response::builder()
6780                    .status(200)
6781                    .body(AsyncBody::from(response))
6782                    .unwrap());
6783            }
6784            if parts.uri.path()
6785                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6786            {
6787                let response = build_tarball(vec![
6788                    (
6789                        "./devcontainer-feature.json",
6790                        r#"
6791{
6792    "id": "aws-cli",
6793    "version": "1.1.3",
6794    "name": "AWS CLI",
6795    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6796    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6797    "options": {
6798        "version": {
6799            "type": "string",
6800            "proposals": [
6801                "latest"
6802            ],
6803            "default": "latest",
6804            "description": "Select or enter an AWS CLI version."
6805        },
6806        "verbose": {
6807            "type": "boolean",
6808            "default": true,
6809            "description": "Suppress verbose output."
6810        }
6811    },
6812    "customizations": {
6813        "vscode": {
6814            "extensions": [
6815                "AmazonWebServices.aws-toolkit-vscode"
6816            ],
6817            "settings": {
6818                "github.copilot.chat.codeGeneration.instructions": [
6819                    {
6820                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6821                    }
6822                ]
6823            }
6824        }
6825    },
6826    "installsAfter": [
6827        "ghcr.io/devcontainers/features/common-utils"
6828    ]
6829}
6830                    "#,
6831                    ),
6832                    (
6833                        "./install.sh",
6834                        r#"#!/usr/bin/env bash
6835                    #-------------------------------------------------------------------------------------------------------------
6836                    # Copyright (c) Microsoft Corporation. All rights reserved.
6837                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6838                    #-------------------------------------------------------------------------------------------------------------
6839                    #
6840                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6841                    # Maintainer: The VS Code and Codespaces Teams
6842
6843                    set -e
6844
6845                    # Clean up
6846                    rm -rf /var/lib/apt/lists/*
6847
6848                    VERSION=${VERSION:-"latest"}
6849                    VERBOSE=${VERBOSE:-"true"}
6850
6851                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6852                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6853
6854                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6855                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6856                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6857                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6858                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6859                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6860                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6861                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6862                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6863                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6864                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6865                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6866                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6867                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6868                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6869                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6870                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6871                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6872                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6873                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6874                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6875                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6876                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6877                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6878                    YLZATHZKTJyiqA==
6879                    =vYOk
6880                    -----END PGP PUBLIC KEY BLOCK-----"
6881
6882                    if [ "$(id -u)" -ne 0 ]; then
6883                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6884                        exit 1
6885                    fi
6886
6887                    apt_get_update()
6888                    {
6889                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6890                            echo "Running apt-get update..."
6891                            apt-get update -y
6892                        fi
6893                    }
6894
6895                    # Checks if packages are installed and installs them if not
6896                    check_packages() {
6897                        if ! dpkg -s "$@" > /dev/null 2>&1; then
6898                            apt_get_update
6899                            apt-get -y install --no-install-recommends "$@"
6900                        fi
6901                    }
6902
6903                    export DEBIAN_FRONTEND=noninteractive
6904
6905                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6906
6907                    verify_aws_cli_gpg_signature() {
6908                        local filePath=$1
6909                        local sigFilePath=$2
6910                        local awsGpgKeyring=aws-cli-public-key.gpg
6911
6912                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6913                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6914                        local status=$?
6915
6916                        rm "./${awsGpgKeyring}"
6917
6918                        return ${status}
6919                    }
6920
6921                    install() {
6922                        local scriptZipFile=awscli.zip
6923                        local scriptSigFile=awscli.sig
6924
6925                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6926                        if [ "${VERSION}" != "latest" ]; then
6927                            local versionStr=-${VERSION}
6928                        fi
6929                        architecture=$(dpkg --print-architecture)
6930                        case "${architecture}" in
6931                            amd64) architectureStr=x86_64 ;;
6932                            arm64) architectureStr=aarch64 ;;
6933                            *)
6934                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6935                                exit 1
6936                        esac
6937                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6938                        curl "${scriptUrl}" -o "${scriptZipFile}"
6939                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6940
6941                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6942                        if (( $? > 0 )); then
6943                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6944                            exit 1
6945                        fi
6946
6947                        if [ "${VERBOSE}" = "false" ]; then
6948                            unzip -q "${scriptZipFile}"
6949                        else
6950                            unzip "${scriptZipFile}"
6951                        fi
6952
6953                        ./aws/install
6954
6955                        # kubectl bash completion
6956                        mkdir -p /etc/bash_completion.d
6957                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6958
6959                        # kubectl zsh completion
6960                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6961                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6962                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6963                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6964                        fi
6965
6966                        rm -rf ./aws
6967                    }
6968
6969                    echo "(*) Installing AWS CLI..."
6970
6971                    install
6972
6973                    # Clean up
6974                    rm -rf /var/lib/apt/lists/*
6975
6976                    echo "Done!""#,
6977                    ),
6978                    ("./scripts/", r#""#),
6979                    (
6980                        "./scripts/fetch-latest-completer-scripts.sh",
6981                        r#"
6982                        #!/bin/bash
6983                        #-------------------------------------------------------------------------------------------------------------
6984                        # Copyright (c) Microsoft Corporation. All rights reserved.
6985                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6986                        #-------------------------------------------------------------------------------------------------------------
6987                        #
6988                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6989                        # Maintainer: The Dev Container spec maintainers
6990                        #
6991                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6992                        #
6993                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6994                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6995                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6996
6997                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6998                        chmod +x "$BASH_COMPLETER_SCRIPT"
6999
7000                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7001                        chmod +x "$ZSH_COMPLETER_SCRIPT"
7002                        "#,
7003                    ),
7004                    ("./scripts/vendor/", r#""#),
7005                    (
7006                        "./scripts/vendor/aws_bash_completer",
7007                        r#"
7008                        # Typically that would be added under one of the following paths:
7009                        # - /etc/bash_completion.d
7010                        # - /usr/local/etc/bash_completion.d
7011                        # - /usr/share/bash-completion/completions
7012
7013                        complete -C aws_completer aws
7014                        "#,
7015                    ),
7016                    (
7017                        "./scripts/vendor/aws_zsh_completer.sh",
7018                        r#"
7019                        # Source this file to activate auto completion for zsh using the bash
7020                        # compatibility helper.  Make sure to run `compinit` before, which should be
7021                        # given usually.
7022                        #
7023                        # % source /path/to/zsh_complete.sh
7024                        #
7025                        # Typically that would be called somewhere in your .zshrc.
7026                        #
7027                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7028                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7029                        #
7030                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7031                        #
7032                        # zsh releases prior to that version do not export the required env variables!
7033
7034                        autoload -Uz bashcompinit
7035                        bashcompinit -i
7036
7037                        _bash_complete() {
7038                          local ret=1
7039                          local -a suf matches
7040                          local -x COMP_POINT COMP_CWORD
7041                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7042                          local -x COMP_LINE="$words"
7043                          local -A savejobstates savejobtexts
7044
7045                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7046                          (( COMP_CWORD = CURRENT - 1))
7047                          COMP_WORDS=( $words )
7048                          BASH_VERSINFO=( 2 05b 0 1 release )
7049
7050                          savejobstates=( ${(kv)jobstates} )
7051                          savejobtexts=( ${(kv)jobtexts} )
7052
7053                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7054
7055                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7056
7057                          if [[ -n $matches ]]; then
7058                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7059                              compset -P '*/' && matches=( ${matches##*/} )
7060                              compset -S '/*' && matches=( ${matches%%/*} )
7061                              compadd -Q -f "${suf[@]}" -a matches && ret=0
7062                            else
7063                              compadd -Q "${suf[@]}" -a matches && ret=0
7064                            fi
7065                          fi
7066
7067                          if (( ret )); then
7068                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7069                              _default "${suf[@]}" && ret=0
7070                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7071                              _directories "${suf[@]}" && ret=0
7072                            fi
7073                          fi
7074
7075                          return ret
7076                        }
7077
7078                        complete -C aws_completer aws
7079                        "#,
7080                    ),
7081                ]).await;
7082
7083                return Ok(http::Response::builder()
7084                    .status(200)
7085                    .body(AsyncBody::from(response))
7086                    .unwrap());
7087            }
7088
7089            Ok(http::Response::builder()
7090                .status(404)
7091                .body(http_client::AsyncBody::default())
7092                .unwrap())
7093        })
7094    }
7095}