devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use fs::Fs;
  10use http_client::HttpClient;
  11use util::{ResultExt, command::Command};
  12
  13use crate::{
  14    DevContainerConfig, DevContainerContext,
  15    command_json::{CommandRunner, DefaultCommandRunner},
  16    devcontainer_api::{DevContainerError, DevContainerUp},
  17    devcontainer_json::{
  18        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
  19        deserialize_devcontainer_json,
  20    },
  21    docker::{
  22        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  23        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  24        get_remote_dir_from_config,
  25    },
  26    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  27    get_oci_token,
  28    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  29    safe_id_lower,
  30};
  31
  32enum ConfigStatus {
  33    Deserialized(DevContainer),
  34    VariableParsed(DevContainer),
  35}
  36
  37#[derive(Debug, Clone, Eq, PartialEq, Default)]
  38pub(crate) struct DockerComposeResources {
  39    files: Vec<PathBuf>,
  40    config: DockerComposeConfig,
  41}
  42
  43struct DevContainerManifest {
  44    http_client: Arc<dyn HttpClient>,
  45    fs: Arc<dyn Fs>,
  46    docker_client: Arc<dyn DockerClient>,
  47    command_runner: Arc<dyn CommandRunner>,
  48    raw_config: String,
  49    config: ConfigStatus,
  50    local_environment: HashMap<String, String>,
  51    local_project_directory: PathBuf,
  52    config_directory: PathBuf,
  53    file_name: String,
  54    root_image: Option<DockerInspect>,
  55    features_build_info: Option<FeaturesBuildInfo>,
  56    features: Vec<FeatureManifest>,
  57}
  58const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
  59impl DevContainerManifest {
  60    async fn new(
  61        context: &DevContainerContext,
  62        environment: HashMap<String, String>,
  63        docker_client: Arc<dyn DockerClient>,
  64        command_runner: Arc<dyn CommandRunner>,
  65        local_config: DevContainerConfig,
  66        local_project_path: &Path,
  67    ) -> Result<Self, DevContainerError> {
  68        let config_path = local_project_path.join(local_config.config_path.clone());
  69        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  70        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  71            log::error!("Unable to read devcontainer contents: {e}");
  72            DevContainerError::DevContainerParseFailed
  73        })?;
  74
  75        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  76
  77        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  78            log::error!("Dev container file should be in a directory");
  79            DevContainerError::NotInValidProject
  80        })?;
  81        let file_name = config_path
  82            .file_name()
  83            .and_then(|f| f.to_str())
  84            .ok_or_else(|| {
  85                log::error!("Dev container file has no file name, or is invalid unicode");
  86                DevContainerError::DevContainerParseFailed
  87            })?;
  88
  89        Ok(Self {
  90            fs: context.fs.clone(),
  91            http_client: context.http_client.clone(),
  92            docker_client,
  93            command_runner,
  94            raw_config: devcontainer_contents,
  95            config: ConfigStatus::Deserialized(devcontainer),
  96            local_project_directory: local_project_path.to_path_buf(),
  97            local_environment: environment,
  98            config_directory: devcontainer_directory.to_path_buf(),
  99            file_name: file_name.to_string(),
 100            root_image: None,
 101            features_build_info: None,
 102            features: Vec::new(),
 103        })
 104    }
 105
 106    fn devcontainer_id(&self) -> String {
 107        let mut labels = self.identifying_labels();
 108        labels.sort_by_key(|(key, _)| *key);
 109
 110        let mut hasher = DefaultHasher::new();
 111        for (key, value) in &labels {
 112            key.hash(&mut hasher);
 113            value.hash(&mut hasher);
 114        }
 115
 116        format!("{:016x}", hasher.finish())
 117    }
 118
 119    fn identifying_labels(&self) -> Vec<(&str, String)> {
 120        let labels = vec![
 121            (
 122                "devcontainer.local_folder",
 123                (self.local_project_directory.display()).to_string(),
 124            ),
 125            (
 126                "devcontainer.config_file",
 127                (self.config_file().display()).to_string(),
 128            ),
 129        ];
 130        labels
 131    }
 132
 133    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
 134        let mut replaced_content = content
 135            .replace("${devcontainerId}", &self.devcontainer_id())
 136            .replace(
 137                "${containerWorkspaceFolderBasename}",
 138                &self.remote_workspace_base_name().unwrap_or_default(),
 139            )
 140            .replace(
 141                "${localWorkspaceFolderBasename}",
 142                &self.local_workspace_base_name()?,
 143            )
 144            .replace(
 145                "${containerWorkspaceFolder}",
 146                &self
 147                    .remote_workspace_folder()
 148                    .map(|path| path.display().to_string())
 149                    .unwrap_or_default()
 150                    .replace('\\', "/"),
 151            )
 152            .replace(
 153                "${localWorkspaceFolder}",
 154                &self.local_workspace_folder().replace('\\', "/"),
 155            );
 156        for (k, v) in &self.local_environment {
 157            let find = format!("${{localEnv:{k}}}");
 158            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
 159        }
 160
 161        Ok(replaced_content)
 162    }
 163
 164    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 165        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 166        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
 167
 168        self.config = ConfigStatus::VariableParsed(parsed_config);
 169
 170        Ok(())
 171    }
 172
 173    fn runtime_remote_env(
 174        &self,
 175        container_env: &HashMap<String, String>,
 176    ) -> Result<HashMap<String, String>, DevContainerError> {
 177        let mut merged_remote_env = container_env.clone();
 178        // HOME is user-specific, and we will often not run as the image user
 179        merged_remote_env.remove("HOME");
 180        if let Some(remote_env) = self.dev_container().remote_env.clone() {
 181            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
 182                log::error!(
 183                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
 184                    remote_env
 185                );
 186                DevContainerError::DevContainerParseFailed
 187            })?;
 188            for (k, v) in container_env {
 189                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
 190            }
 191            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
 192                .map_err(|e| {
 193                    log::error!(
 194                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
 195                        &raw
 196                    );
 197                    DevContainerError::DevContainerParseFailed
 198                })?;
 199            for (k, v) in reserialized {
 200                merged_remote_env.insert(k, v);
 201            }
 202        }
 203        Ok(merged_remote_env)
 204    }
 205
 206    fn config_file(&self) -> PathBuf {
 207        self.config_directory.join(&self.file_name)
 208    }
 209
 210    fn dev_container(&self) -> &DevContainer {
 211        match &self.config {
 212            ConfigStatus::Deserialized(dev_container) => dev_container,
 213            ConfigStatus::VariableParsed(dev_container) => dev_container,
 214        }
 215    }
 216
 217    async fn dockerfile_location(&self) -> Option<PathBuf> {
 218        let dev_container = self.dev_container();
 219        match dev_container.build_type() {
 220            DevContainerBuildType::Image => None,
 221            DevContainerBuildType::Dockerfile => dev_container
 222                .build
 223                .as_ref()
 224                .map(|build| self.config_directory.join(&build.dockerfile)),
 225            DevContainerBuildType::DockerCompose => {
 226                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 227                    return None;
 228                };
 229                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 230                else {
 231                    return None;
 232                };
 233                main_service
 234                    .build
 235                    .and_then(|b| b.dockerfile)
 236                    .map(|dockerfile| self.config_directory.join(dockerfile))
 237            }
 238            DevContainerBuildType::None => None,
 239        }
 240    }
 241
 242    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 243        let mut hasher = DefaultHasher::new();
 244        let prefix = match &self.dev_container().name {
 245            Some(name) => &safe_id_lower(name),
 246            None => "zed-dc",
 247        };
 248        let prefix = prefix.get(..6).unwrap_or(prefix);
 249
 250        dockerfile_build_path.hash(&mut hasher);
 251
 252        let hash = hasher.finish();
 253        format!("{}-{:x}-features", prefix, hash)
 254    }
 255
 256    /// Gets the base image from the devcontainer with the following precedence:
 257    /// - The devcontainer image if an image is specified
 258    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 259    /// - The image sourced in the docker-compose main service, if one is specified
 260    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 261    /// If no such image is available, return an error
 262    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 263        if let Some(image) = &self.dev_container().image {
 264            return Ok(image.to_string());
 265        }
 266        if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
 267            let dockerfile_contents = self
 268                .fs
 269                .load(&self.config_directory.join(dockerfile))
 270                .await
 271                .map_err(|e| {
 272                    log::error!("Error reading dockerfile: {e}");
 273                    DevContainerError::DevContainerParseFailed
 274                })?;
 275            return image_from_dockerfile(self, dockerfile_contents);
 276        }
 277        if self.dev_container().docker_compose_file.is_some() {
 278            let docker_compose_manifest = self.docker_compose_manifest().await?;
 279            let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 280
 281            if let Some(dockerfile) = main_service
 282                .build
 283                .as_ref()
 284                .and_then(|b| b.dockerfile.as_ref())
 285            {
 286                let dockerfile_contents = self
 287                    .fs
 288                    .load(&self.config_directory.join(dockerfile))
 289                    .await
 290                    .map_err(|e| {
 291                        log::error!("Error reading dockerfile: {e}");
 292                        DevContainerError::DevContainerParseFailed
 293                    })?;
 294                return image_from_dockerfile(self, dockerfile_contents);
 295            }
 296            if let Some(image) = &main_service.image {
 297                return Ok(image.to_string());
 298            }
 299
 300            log::error!("No valid base image found in docker-compose configuration");
 301            return Err(DevContainerError::DevContainerParseFailed);
 302        }
 303        log::error!("No valid base image found in dev container configuration");
 304        Err(DevContainerError::DevContainerParseFailed)
 305    }
 306
 307    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 308        let dev_container = match &self.config {
 309            ConfigStatus::Deserialized(_) => {
 310                log::error!(
 311                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 312                );
 313                return Err(DevContainerError::DevContainerParseFailed);
 314            }
 315            ConfigStatus::VariableParsed(dev_container) => dev_container,
 316        };
 317        let root_image_tag = self.get_base_image_from_config().await?;
 318        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 319
 320        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 321        let timestamp = std::time::SystemTime::now()
 322            .duration_since(std::time::UNIX_EPOCH)
 323            .map(|d| d.as_millis())
 324            .unwrap_or(0);
 325
 326        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 327        let empty_context_dir = temp_base.join("empty-folder");
 328
 329        self.fs
 330            .create_dir(&features_content_dir)
 331            .await
 332            .map_err(|e| {
 333                log::error!("Failed to create features content dir: {e}");
 334                DevContainerError::FilesystemError
 335            })?;
 336
 337        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 338            log::error!("Failed to create empty context dir: {e}");
 339            DevContainerError::FilesystemError
 340        })?;
 341
 342        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 343        let image_tag =
 344            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 345
 346        let build_info = FeaturesBuildInfo {
 347            dockerfile_path,
 348            features_content_dir,
 349            empty_context_dir,
 350            build_image: dev_container.image.clone(),
 351            image_tag,
 352        };
 353
 354        let features = match &dev_container.features {
 355            Some(features) => features,
 356            None => &HashMap::new(),
 357        };
 358
 359        let container_user = get_container_user_from_config(&root_image, self)?;
 360        let remote_user = get_remote_user_from_config(&root_image, self)?;
 361
 362        let builtin_env_content = format!(
 363            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 364            container_user, remote_user
 365        );
 366
 367        let builtin_env_path = build_info
 368            .features_content_dir
 369            .join("devcontainer-features.builtin.env");
 370
 371        self.fs
 372            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 373            .await
 374            .map_err(|e| {
 375                log::error!("Failed to write builtin env file: {e}");
 376                DevContainerError::FilesystemError
 377            })?;
 378
 379        let ordered_features =
 380            resolve_feature_order(features, &dev_container.override_feature_install_order);
 381
 382        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 383            if matches!(options, FeatureOptions::Bool(false)) {
 384                log::debug!(
 385                    "Feature '{}' is disabled (set to false), skipping",
 386                    feature_ref
 387                );
 388                continue;
 389            }
 390
 391            let feature_id = extract_feature_id(feature_ref);
 392            let consecutive_id = format!("{}_{}", feature_id, index);
 393            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 394
 395            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 396                log::error!(
 397                    "Failed to create feature directory for {}: {e}",
 398                    feature_ref
 399                );
 400                DevContainerError::FilesystemError
 401            })?;
 402
 403            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 404                log::error!(
 405                    "Feature '{}' is not a supported OCI feature reference",
 406                    feature_ref
 407                );
 408                DevContainerError::DevContainerParseFailed
 409            })?;
 410            let TokenResponse { token } =
 411                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 412                    .await
 413                    .map_err(|e| {
 414                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 415                        DevContainerError::ResourceFetchFailed
 416                    })?;
 417            let manifest = get_oci_manifest(
 418                &oci_ref.registry,
 419                &oci_ref.path,
 420                &token,
 421                &self.http_client,
 422                &oci_ref.version,
 423                None,
 424            )
 425            .await
 426            .map_err(|e| {
 427                log::error!(
 428                    "Failed to fetch OCI manifest for feature '{}': {e}",
 429                    feature_ref
 430                );
 431                DevContainerError::ResourceFetchFailed
 432            })?;
 433            let digest = &manifest
 434                .layers
 435                .first()
 436                .ok_or_else(|| {
 437                    log::error!(
 438                        "OCI manifest for feature '{}' contains no layers",
 439                        feature_ref
 440                    );
 441                    DevContainerError::ResourceFetchFailed
 442                })?
 443                .digest;
 444            download_oci_tarball(
 445                &token,
 446                &oci_ref.registry,
 447                &oci_ref.path,
 448                digest,
 449                "application/vnd.devcontainers.layer.v1+tar",
 450                &feature_dir,
 451                &self.http_client,
 452                &self.fs,
 453                None,
 454            )
 455            .await?;
 456
 457            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 458            if !self.fs.is_file(feature_json_path).await {
 459                let message = format!(
 460                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 461                    feature_json_path
 462                );
 463                log::error!("{}", &message);
 464                return Err(DevContainerError::ResourceFetchFailed);
 465            }
 466
 467            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 468                log::error!("error reading devcontainer-feature.json: {:?}", e);
 469                DevContainerError::FilesystemError
 470            })?;
 471
 472            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 473
 474            let feature_json: DevContainerFeatureJson =
 475                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
 476                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 477                    DevContainerError::ResourceFetchFailed
 478                })?;
 479
 480            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 481
 482            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 483
 484            let env_content = feature_manifest
 485                .write_feature_env(&self.fs, options)
 486                .await?;
 487
 488            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 489
 490            self.fs
 491                .write(
 492                    &feature_manifest
 493                        .file_path()
 494                        .join("devcontainer-features-install.sh"),
 495                    &wrapper_content.as_bytes(),
 496                )
 497                .await
 498                .map_err(|e| {
 499                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 500                    DevContainerError::FilesystemError
 501                })?;
 502
 503            self.features.push(feature_manifest);
 504        }
 505
 506        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 507
 508        let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
 509        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 510
 511        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 512            self.fs.load(location).await.log_err()
 513        } else {
 514            None
 515        };
 516
 517        let dockerfile_content = self.generate_dockerfile_extended(
 518            &container_user,
 519            &remote_user,
 520            dockerfile_base_content,
 521            use_buildkit,
 522        );
 523
 524        self.fs
 525            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 526            .await
 527            .map_err(|e| {
 528                log::error!("Failed to write Dockerfile.extended: {e}");
 529                DevContainerError::FilesystemError
 530            })?;
 531
 532        log::debug!(
 533            "Features build resources written to {:?}",
 534            build_info.features_content_dir
 535        );
 536
 537        self.root_image = Some(root_image);
 538        self.features_build_info = Some(build_info);
 539
 540        Ok(())
 541    }
 542
 543    fn generate_dockerfile_extended(
 544        &self,
 545        container_user: &str,
 546        remote_user: &str,
 547        dockerfile_content: Option<String>,
 548        use_buildkit: bool,
 549    ) -> String {
 550        #[cfg(not(target_os = "windows"))]
 551        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 552        #[cfg(target_os = "windows")]
 553        let update_remote_user_uid = false;
 554        let feature_layers: String = self
 555            .features
 556            .iter()
 557            .map(|manifest| {
 558                manifest.generate_dockerfile_feature_layer(
 559                    use_buildkit,
 560                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 561                )
 562            })
 563            .collect();
 564
 565        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 566        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 567
 568        let dockerfile_content = dockerfile_content
 569            .map(|content| {
 570                if dockerfile_alias(&content).is_some() {
 571                    content
 572                } else {
 573                    dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
 574                }
 575            })
 576            .unwrap_or("".to_string());
 577
 578        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 579
 580        let feature_content_source_stage = if use_buildkit {
 581            "".to_string()
 582        } else {
 583            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 584                .to_string()
 585        };
 586
 587        let builtin_env_source_path = if use_buildkit {
 588            "./devcontainer-features.builtin.env"
 589        } else {
 590            "/tmp/build-features/devcontainer-features.builtin.env"
 591        };
 592
 593        let mut extended_dockerfile = format!(
 594            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 595
 596{dockerfile_content}
 597{feature_content_source_stage}
 598FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 599USER root
 600COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 601RUN chmod -R 0755 /tmp/build-features/
 602
 603FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 604
 605USER root
 606
 607RUN mkdir -p {dest}
 608COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 609
 610RUN \
 611echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 612echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 613
 614{feature_layers}
 615
 616ARG _DEV_CONTAINERS_IMAGE_USER=root
 617USER $_DEV_CONTAINERS_IMAGE_USER
 618"#
 619        );
 620
 621        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 622        if !update_remote_user_uid {
 623            extended_dockerfile = format!(
 624                r#"{extended_dockerfile}
 625# Ensure that /etc/profile does not clobber the existing path
 626RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 627"#
 628            );
 629
 630            for feature in &self.features {
 631                let container_env_layer = feature.generate_dockerfile_env();
 632                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 633            }
 634
 635            if let Some(env) = &self.dev_container().container_env {
 636                for (key, value) in env {
 637                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 638                }
 639            }
 640        }
 641
 642        extended_dockerfile
 643    }
 644
 645    fn build_merged_resources(
 646        &self,
 647        base_image: DockerInspect,
 648    ) -> Result<DockerBuildResources, DevContainerError> {
 649        let dev_container = match &self.config {
 650            ConfigStatus::Deserialized(_) => {
 651                log::error!(
 652                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 653                );
 654                return Err(DevContainerError::DevContainerParseFailed);
 655            }
 656            ConfigStatus::VariableParsed(dev_container) => dev_container,
 657        };
 658        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 659
 660        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 661
 662        mounts.append(&mut feature_mounts);
 663
 664        let privileged = dev_container.privileged.unwrap_or(false)
 665            || self.features.iter().any(|f| f.privileged());
 666
 667        let mut entrypoint_script_lines = vec![
 668            "echo Container started".to_string(),
 669            "trap \"exit 0\" 15".to_string(),
 670        ];
 671
 672        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 673            entrypoint_script_lines.push(entrypoint.clone());
 674        }
 675        entrypoint_script_lines.append(&mut vec![
 676            "exec \"$@\"".to_string(),
 677            "while sleep 1 & wait $!; do :; done".to_string(),
 678        ]);
 679
 680        Ok(DockerBuildResources {
 681            image: base_image,
 682            additional_mounts: mounts,
 683            privileged,
 684            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 685        })
 686    }
 687
 688    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 689        if let ConfigStatus::Deserialized(_) = &self.config {
 690            log::error!(
 691                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 692            );
 693            return Err(DevContainerError::DevContainerParseFailed);
 694        }
 695        let dev_container = self.dev_container();
 696        match dev_container.build_type() {
 697            DevContainerBuildType::Image => {
 698                let built_docker_image = self.build_docker_image().await?;
 699                let Some(base_image) = dev_container.image.as_ref() else {
 700                    log::error!("Dev container is using and image which can't be referenced");
 701                    return Err(DevContainerError::DevContainerParseFailed);
 702                };
 703                let built_docker_image = self
 704                    .update_remote_user_uid(built_docker_image, base_image)
 705                    .await?;
 706
 707                let resources = self.build_merged_resources(built_docker_image)?;
 708                Ok(DevContainerBuildResources::Docker(resources))
 709            }
 710            DevContainerBuildType::Dockerfile => {
 711                let built_docker_image = self.build_docker_image().await?;
 712                let Some(features_build_info) = &self.features_build_info else {
 713                    log::error!(
 714                        "Can't attempt to build update UID dockerfile before initial docker build"
 715                    );
 716                    return Err(DevContainerError::DevContainerParseFailed);
 717                };
 718                let built_docker_image = self
 719                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 720                    .await?;
 721
 722                let resources = self.build_merged_resources(built_docker_image)?;
 723                Ok(DevContainerBuildResources::Docker(resources))
 724            }
 725            DevContainerBuildType::DockerCompose => {
 726                log::debug!("Using docker compose. Building extended compose files");
 727                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 728
 729                return Ok(DevContainerBuildResources::DockerCompose(
 730                    docker_compose_resources,
 731                ));
 732            }
 733            DevContainerBuildType::None => {
 734                return Err(DevContainerError::DevContainerParseFailed);
 735            }
 736        }
 737    }
 738
 739    async fn run_dev_container(
 740        &self,
 741        build_resources: DevContainerBuildResources,
 742    ) -> Result<DevContainerUp, DevContainerError> {
 743        let ConfigStatus::VariableParsed(_) = &self.config else {
 744            log::error!(
 745                "Variables have not been parsed; cannot proceed with running the dev container"
 746            );
 747            return Err(DevContainerError::DevContainerParseFailed);
 748        };
 749        let running_container = match build_resources {
 750            DevContainerBuildResources::DockerCompose(resources) => {
 751                self.run_docker_compose(resources).await?
 752            }
 753            DevContainerBuildResources::Docker(resources) => {
 754                self.run_docker_image(resources).await?
 755            }
 756        };
 757
 758        let remote_user = get_remote_user_from_config(&running_container, self)?;
 759        let remote_workspace_folder = get_remote_dir_from_config(
 760            &running_container,
 761            (&self.local_project_directory.display()).to_string(),
 762        )?;
 763
 764        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 765
 766        Ok(DevContainerUp {
 767            container_id: running_container.id,
 768            remote_user,
 769            remote_workspace_folder,
 770            extension_ids: self.extension_ids(),
 771            remote_env,
 772        })
 773    }
 774
 775    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 776        let dev_container = match &self.config {
 777            ConfigStatus::Deserialized(_) => {
 778                log::error!(
 779                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 780                );
 781                return Err(DevContainerError::DevContainerParseFailed);
 782            }
 783            ConfigStatus::VariableParsed(dev_container) => dev_container,
 784        };
 785        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 786            return Err(DevContainerError::DevContainerParseFailed);
 787        };
 788        let docker_compose_full_paths = docker_compose_files
 789            .iter()
 790            .map(|relative| self.config_directory.join(relative))
 791            .collect::<Vec<PathBuf>>();
 792
 793        let Some(config) = self
 794            .docker_client
 795            .get_docker_compose_config(&docker_compose_full_paths)
 796            .await?
 797        else {
 798            log::error!("Output could not deserialize into DockerComposeConfig");
 799            return Err(DevContainerError::DevContainerParseFailed);
 800        };
 801        Ok(DockerComposeResources {
 802            files: docker_compose_full_paths,
 803            config,
 804        })
 805    }
 806
 807    async fn build_and_extend_compose_files(
 808        &self,
 809    ) -> Result<DockerComposeResources, DevContainerError> {
 810        let dev_container = match &self.config {
 811            ConfigStatus::Deserialized(_) => {
 812                log::error!(
 813                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 814                );
 815                return Err(DevContainerError::DevContainerParseFailed);
 816            }
 817            ConfigStatus::VariableParsed(dev_container) => dev_container,
 818        };
 819
 820        let Some(features_build_info) = &self.features_build_info else {
 821            log::error!(
 822                "Cannot build and extend compose files: features build info is not yet constructed"
 823            );
 824            return Err(DevContainerError::DevContainerParseFailed);
 825        };
 826        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 827        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 828
 829        let (main_service_name, main_service) =
 830            find_primary_service(&docker_compose_resources, self)?;
 831        let (built_service_image, built_service_image_tag) = if main_service
 832            .build
 833            .as_ref()
 834            .map(|b| b.dockerfile.as_ref())
 835            .is_some()
 836        {
 837            if !supports_buildkit {
 838                self.build_feature_content_image().await?;
 839            }
 840
 841            let dockerfile_path = &features_build_info.dockerfile_path;
 842
 843            let build_args = if !supports_buildkit {
 844                HashMap::from([
 845                    (
 846                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 847                        "dev_container_auto_added_stage_label".to_string(),
 848                    ),
 849                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 850                ])
 851            } else {
 852                HashMap::from([
 853                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 854                    (
 855                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 856                        "dev_container_auto_added_stage_label".to_string(),
 857                    ),
 858                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 859                ])
 860            };
 861
 862            let additional_contexts = if !supports_buildkit {
 863                None
 864            } else {
 865                Some(HashMap::from([(
 866                    "dev_containers_feature_content_source".to_string(),
 867                    features_build_info
 868                        .features_content_dir
 869                        .display()
 870                        .to_string(),
 871                )]))
 872            };
 873
 874            let build_override = DockerComposeConfig {
 875                name: None,
 876                services: HashMap::from([(
 877                    main_service_name.clone(),
 878                    DockerComposeService {
 879                        image: Some(features_build_info.image_tag.clone()),
 880                        entrypoint: None,
 881                        cap_add: None,
 882                        security_opt: None,
 883                        labels: None,
 884                        build: Some(DockerComposeServiceBuild {
 885                            context: Some(
 886                                main_service
 887                                    .build
 888                                    .as_ref()
 889                                    .and_then(|b| b.context.clone())
 890                                    .unwrap_or_else(|| {
 891                                        features_build_info.empty_context_dir.display().to_string()
 892                                    }),
 893                            ),
 894                            dockerfile: Some(dockerfile_path.display().to_string()),
 895                            args: Some(build_args),
 896                            additional_contexts,
 897                        }),
 898                        volumes: Vec::new(),
 899                        ..Default::default()
 900                    },
 901                )]),
 902                volumes: HashMap::new(),
 903            };
 904
 905            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 906            let config_location = temp_base.join("docker_compose_build.json");
 907
 908            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 909                log::error!("Error serializing docker compose runtime override: {e}");
 910                DevContainerError::DevContainerParseFailed
 911            })?;
 912
 913            self.fs
 914                .write(&config_location, config_json.as_bytes())
 915                .await
 916                .map_err(|e| {
 917                    log::error!("Error writing the runtime override file: {e}");
 918                    DevContainerError::FilesystemError
 919                })?;
 920
 921            docker_compose_resources.files.push(config_location);
 922
 923            self.docker_client
 924                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 925                .await?;
 926            (
 927                self.docker_client
 928                    .inspect(&features_build_info.image_tag)
 929                    .await?,
 930                &features_build_info.image_tag,
 931            )
 932        } else if let Some(image) = &main_service.image {
 933            if dev_container
 934                .features
 935                .as_ref()
 936                .is_none_or(|features| features.is_empty())
 937            {
 938                (self.docker_client.inspect(image).await?, image)
 939            } else {
 940                if !supports_buildkit {
 941                    self.build_feature_content_image().await?;
 942                }
 943
 944                let dockerfile_path = &features_build_info.dockerfile_path;
 945
 946                let build_args = if !supports_buildkit {
 947                    HashMap::from([
 948                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 949                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 950                    ])
 951                } else {
 952                    HashMap::from([
 953                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 954                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 955                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 956                    ])
 957                };
 958
 959                let additional_contexts = if !supports_buildkit {
 960                    None
 961                } else {
 962                    Some(HashMap::from([(
 963                        "dev_containers_feature_content_source".to_string(),
 964                        features_build_info
 965                            .features_content_dir
 966                            .display()
 967                            .to_string(),
 968                    )]))
 969                };
 970
 971                let build_override = DockerComposeConfig {
 972                    name: None,
 973                    services: HashMap::from([(
 974                        main_service_name.clone(),
 975                        DockerComposeService {
 976                            image: Some(features_build_info.image_tag.clone()),
 977                            entrypoint: None,
 978                            cap_add: None,
 979                            security_opt: None,
 980                            labels: None,
 981                            build: Some(DockerComposeServiceBuild {
 982                                context: Some(
 983                                    features_build_info.empty_context_dir.display().to_string(),
 984                                ),
 985                                dockerfile: Some(dockerfile_path.display().to_string()),
 986                                args: Some(build_args),
 987                                additional_contexts,
 988                            }),
 989                            volumes: Vec::new(),
 990                            ..Default::default()
 991                        },
 992                    )]),
 993                    volumes: HashMap::new(),
 994                };
 995
 996                let temp_base = std::env::temp_dir().join("devcontainer-zed");
 997                let config_location = temp_base.join("docker_compose_build.json");
 998
 999                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1000                    log::error!("Error serializing docker compose runtime override: {e}");
1001                    DevContainerError::DevContainerParseFailed
1002                })?;
1003
1004                self.fs
1005                    .write(&config_location, config_json.as_bytes())
1006                    .await
1007                    .map_err(|e| {
1008                        log::error!("Error writing the runtime override file: {e}");
1009                        DevContainerError::FilesystemError
1010                    })?;
1011
1012                docker_compose_resources.files.push(config_location);
1013
1014                self.docker_client
1015                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1016                    .await?;
1017
1018                (
1019                    self.docker_client
1020                        .inspect(&features_build_info.image_tag)
1021                        .await?,
1022                    &features_build_info.image_tag,
1023                )
1024            }
1025        } else {
1026            log::error!("Docker compose must have either image or dockerfile defined");
1027            return Err(DevContainerError::DevContainerParseFailed);
1028        };
1029
1030        let built_service_image = self
1031            .update_remote_user_uid(built_service_image, built_service_image_tag)
1032            .await?;
1033
1034        let resources = self.build_merged_resources(built_service_image)?;
1035
1036        let network_mode = main_service.network_mode.as_ref();
1037        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1038        let runtime_override_file = self
1039            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1040            .await?;
1041
1042        docker_compose_resources.files.push(runtime_override_file);
1043
1044        Ok(docker_compose_resources)
1045    }
1046
1047    async fn write_runtime_override_file(
1048        &self,
1049        main_service_name: &str,
1050        network_mode_service: Option<&str>,
1051        resources: DockerBuildResources,
1052    ) -> Result<PathBuf, DevContainerError> {
1053        let config =
1054            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1055        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1056        let config_location = temp_base.join("docker_compose_runtime.json");
1057
1058        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1059            log::error!("Error serializing docker compose runtime override: {e}");
1060            DevContainerError::DevContainerParseFailed
1061        })?;
1062
1063        self.fs
1064            .write(&config_location, config_json.as_bytes())
1065            .await
1066            .map_err(|e| {
1067                log::error!("Error writing the runtime override file: {e}");
1068                DevContainerError::FilesystemError
1069            })?;
1070
1071        Ok(config_location)
1072    }
1073
1074    fn build_runtime_override(
1075        &self,
1076        main_service_name: &str,
1077        network_mode_service: Option<&str>,
1078        resources: DockerBuildResources,
1079    ) -> Result<DockerComposeConfig, DevContainerError> {
1080        let mut runtime_labels = HashMap::new();
1081
1082        if let Some(metadata) = &resources.image.config.labels.metadata {
1083            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1084                log::error!("Error serializing docker image metadata: {e}");
1085                DevContainerError::ContainerNotValid(resources.image.id.clone())
1086            })?;
1087
1088            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1089        }
1090
1091        for (k, v) in self.identifying_labels() {
1092            runtime_labels.insert(k.to_string(), v.to_string());
1093        }
1094
1095        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1096            .additional_mounts
1097            .iter()
1098            .filter_map(|mount| {
1099                if let Some(mount_type) = &mount.mount_type
1100                    && mount_type.to_lowercase() == "volume"
1101                    && let Some(source) = &mount.source
1102                {
1103                    Some((
1104                        source.clone(),
1105                        DockerComposeVolume {
1106                            name: source.clone(),
1107                        },
1108                    ))
1109                } else {
1110                    None
1111                }
1112            })
1113            .collect();
1114
1115        let volumes: Vec<MountDefinition> = resources
1116            .additional_mounts
1117            .iter()
1118            .map(|v| MountDefinition {
1119                source: v.source.clone(),
1120                target: v.target.clone(),
1121                mount_type: v.mount_type.clone(),
1122            })
1123            .collect();
1124
1125        let mut main_service = DockerComposeService {
1126            entrypoint: Some(vec![
1127                "/bin/sh".to_string(),
1128                "-c".to_string(),
1129                resources.entrypoint_script,
1130                "-".to_string(),
1131            ]),
1132            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1133            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1134            labels: Some(runtime_labels),
1135            volumes,
1136            privileged: Some(resources.privileged),
1137            ..Default::default()
1138        };
1139        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1140        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1141        if let Some(forward_ports) = &self.dev_container().forward_ports {
1142            let main_service_ports: Vec<String> = forward_ports
1143                .iter()
1144                .filter_map(|f| match f {
1145                    ForwardPort::Number(port) => Some(port.to_string()),
1146                    ForwardPort::String(port) => {
1147                        let parts: Vec<&str> = port.split(":").collect();
1148                        if parts.len() <= 1 {
1149                            Some(port.to_string())
1150                        } else if parts.len() == 2 {
1151                            if parts[0] == main_service_name {
1152                                Some(parts[1].to_string())
1153                            } else {
1154                                None
1155                            }
1156                        } else {
1157                            None
1158                        }
1159                    }
1160                })
1161                .collect();
1162            for port in main_service_ports {
1163                // If the main service uses a different service's network bridge, append to that service's ports instead
1164                if let Some(network_service_name) = network_mode_service {
1165                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1166                        service.ports.push(DockerComposeServicePort {
1167                            target: port.clone(),
1168                            published: port.clone(),
1169                            ..Default::default()
1170                        });
1171                    } else {
1172                        service_declarations.insert(
1173                            network_service_name.to_string(),
1174                            DockerComposeService {
1175                                ports: vec![DockerComposeServicePort {
1176                                    target: port.clone(),
1177                                    published: port.clone(),
1178                                    ..Default::default()
1179                                }],
1180                                ..Default::default()
1181                            },
1182                        );
1183                    }
1184                } else {
1185                    main_service.ports.push(DockerComposeServicePort {
1186                        target: port.clone(),
1187                        published: port.clone(),
1188                        ..Default::default()
1189                    });
1190                }
1191            }
1192            let other_service_ports: Vec<(&str, &str)> = forward_ports
1193                .iter()
1194                .filter_map(|f| match f {
1195                    ForwardPort::Number(_) => None,
1196                    ForwardPort::String(port) => {
1197                        let parts: Vec<&str> = port.split(":").collect();
1198                        if parts.len() != 2 {
1199                            None
1200                        } else {
1201                            if parts[0] == main_service_name {
1202                                None
1203                            } else {
1204                                Some((parts[0], parts[1]))
1205                            }
1206                        }
1207                    }
1208                })
1209                .collect();
1210            for (service_name, port) in other_service_ports {
1211                if let Some(service) = service_declarations.get_mut(service_name) {
1212                    service.ports.push(DockerComposeServicePort {
1213                        target: port.to_string(),
1214                        published: port.to_string(),
1215                        ..Default::default()
1216                    });
1217                } else {
1218                    service_declarations.insert(
1219                        service_name.to_string(),
1220                        DockerComposeService {
1221                            ports: vec![DockerComposeServicePort {
1222                                target: port.to_string(),
1223                                published: port.to_string(),
1224                                ..Default::default()
1225                            }],
1226                            ..Default::default()
1227                        },
1228                    );
1229                }
1230            }
1231        }
1232
1233        service_declarations.insert(main_service_name.to_string(), main_service);
1234        let new_docker_compose_config = DockerComposeConfig {
1235            name: None,
1236            services: service_declarations,
1237            volumes: config_volumes,
1238        };
1239
1240        Ok(new_docker_compose_config)
1241    }
1242
1243    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1244        let dev_container = match &self.config {
1245            ConfigStatus::Deserialized(_) => {
1246                log::error!(
1247                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1248                );
1249                return Err(DevContainerError::DevContainerParseFailed);
1250            }
1251            ConfigStatus::VariableParsed(dev_container) => dev_container,
1252        };
1253
1254        match dev_container.build_type() {
1255            DevContainerBuildType::Image => {
1256                let Some(image_tag) = &dev_container.image else {
1257                    return Err(DevContainerError::DevContainerParseFailed);
1258                };
1259                let base_image = self.docker_client.inspect(image_tag).await?;
1260                if dev_container
1261                    .features
1262                    .as_ref()
1263                    .is_none_or(|features| features.is_empty())
1264                {
1265                    log::debug!("No features to add. Using base image");
1266                    return Ok(base_image);
1267                }
1268            }
1269            DevContainerBuildType::Dockerfile => {}
1270            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1271                return Err(DevContainerError::DevContainerParseFailed);
1272            }
1273        };
1274
1275        let mut command = self.create_docker_build()?;
1276
1277        let output = self
1278            .command_runner
1279            .run_command(&mut command)
1280            .await
1281            .map_err(|e| {
1282                log::error!("Error building docker image: {e}");
1283                DevContainerError::CommandFailed(command.get_program().display().to_string())
1284            })?;
1285
1286        if !output.status.success() {
1287            let stderr = String::from_utf8_lossy(&output.stderr);
1288            log::error!("docker buildx build failed: {stderr}");
1289            return Err(DevContainerError::CommandFailed(
1290                command.get_program().display().to_string(),
1291            ));
1292        }
1293
1294        // After a successful build, inspect the newly tagged image to get its metadata
1295        let Some(features_build_info) = &self.features_build_info else {
1296            log::error!("Features build info expected, but not created");
1297            return Err(DevContainerError::DevContainerParseFailed);
1298        };
1299        let image = self
1300            .docker_client
1301            .inspect(&features_build_info.image_tag)
1302            .await?;
1303
1304        Ok(image)
1305    }
1306
1307    #[cfg(target_os = "windows")]
1308    async fn update_remote_user_uid(
1309        &self,
1310        image: DockerInspect,
1311        _base_image: &str,
1312    ) -> Result<DockerInspect, DevContainerError> {
1313        Ok(image)
1314    }
1315    #[cfg(not(target_os = "windows"))]
1316    async fn update_remote_user_uid(
1317        &self,
1318        image: DockerInspect,
1319        base_image: &str,
1320    ) -> Result<DockerInspect, DevContainerError> {
1321        let dev_container = self.dev_container();
1322
1323        let Some(features_build_info) = &self.features_build_info else {
1324            return Ok(image);
1325        };
1326
1327        // updateRemoteUserUID defaults to true per the devcontainers spec
1328        if dev_container.update_remote_user_uid == Some(false) {
1329            return Ok(image);
1330        }
1331
1332        let remote_user = get_remote_user_from_config(&image, self)?;
1333        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1334            return Ok(image);
1335        }
1336
1337        let image_user = image
1338            .config
1339            .image_user
1340            .as_deref()
1341            .unwrap_or("root")
1342            .to_string();
1343
1344        let host_uid = Command::new("id")
1345            .arg("-u")
1346            .output()
1347            .await
1348            .map_err(|e| {
1349                log::error!("Failed to get host UID: {e}");
1350                DevContainerError::CommandFailed("id -u".to_string())
1351            })
1352            .and_then(|output| {
1353                String::from_utf8_lossy(&output.stdout)
1354                    .trim()
1355                    .parse::<u32>()
1356                    .map_err(|e| {
1357                        log::error!("Failed to parse host UID: {e}");
1358                        DevContainerError::CommandFailed("id -u".to_string())
1359                    })
1360            })?;
1361
1362        let host_gid = Command::new("id")
1363            .arg("-g")
1364            .output()
1365            .await
1366            .map_err(|e| {
1367                log::error!("Failed to get host GID: {e}");
1368                DevContainerError::CommandFailed("id -g".to_string())
1369            })
1370            .and_then(|output| {
1371                String::from_utf8_lossy(&output.stdout)
1372                    .trim()
1373                    .parse::<u32>()
1374                    .map_err(|e| {
1375                        log::error!("Failed to parse host GID: {e}");
1376                        DevContainerError::CommandFailed("id -g".to_string())
1377                    })
1378            })?;
1379
1380        let dockerfile_content = self.generate_update_uid_dockerfile();
1381
1382        let dockerfile_path = features_build_info
1383            .features_content_dir
1384            .join("updateUID.Dockerfile");
1385        self.fs
1386            .write(&dockerfile_path, dockerfile_content.as_bytes())
1387            .await
1388            .map_err(|e| {
1389                log::error!("Failed to write updateUID Dockerfile: {e}");
1390                DevContainerError::FilesystemError
1391            })?;
1392
1393        let updated_image_tag = format!("{}-uid", features_build_info.image_tag);
1394
1395        let mut command = Command::new(self.docker_client.docker_cli());
1396        command.args(["build"]);
1397        command.args(["-f", &dockerfile_path.display().to_string()]);
1398        command.args(["-t", &updated_image_tag]);
1399        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1400        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1401        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1402        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1403        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1404        command.arg(features_build_info.empty_context_dir.display().to_string());
1405
1406        let output = self
1407            .command_runner
1408            .run_command(&mut command)
1409            .await
1410            .map_err(|e| {
1411                log::error!("Error building UID update image: {e}");
1412                DevContainerError::CommandFailed(command.get_program().display().to_string())
1413            })?;
1414
1415        if !output.status.success() {
1416            let stderr = String::from_utf8_lossy(&output.stderr);
1417            log::error!("UID update build failed: {stderr}");
1418            return Err(DevContainerError::CommandFailed(
1419                command.get_program().display().to_string(),
1420            ));
1421        }
1422
1423        self.docker_client.inspect(&updated_image_tag).await
1424    }
1425
1426    #[cfg(not(target_os = "windows"))]
1427    fn generate_update_uid_dockerfile(&self) -> String {
1428        let mut dockerfile = r#"ARG BASE_IMAGE
1429FROM $BASE_IMAGE
1430
1431USER root
1432
1433ARG REMOTE_USER
1434ARG NEW_UID
1435ARG NEW_GID
1436SHELL ["/bin/sh", "-c"]
1437RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1438	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1439	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1440	if [ -z "$OLD_UID" ]; then \
1441		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1442	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1443		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1444	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1445		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1446	else \
1447		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1448			FREE_GID=65532; \
1449			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1450			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1451			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1452		fi; \
1453		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1454		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1455		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1456			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1457		fi; \
1458		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1459	fi;
1460
1461ARG IMAGE_USER
1462USER $IMAGE_USER
1463
1464# Ensure that /etc/profile does not clobber the existing path
1465RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1466"#.to_string();
1467        for feature in &self.features {
1468            let container_env_layer = feature.generate_dockerfile_env();
1469            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1470        }
1471
1472        if let Some(env) = &self.dev_container().container_env {
1473            for (key, value) in env {
1474                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1475            }
1476        }
1477        dockerfile
1478    }
1479
1480    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1481        let Some(features_build_info) = &self.features_build_info else {
1482            log::error!("Features build info not available for building feature content image");
1483            return Err(DevContainerError::DevContainerParseFailed);
1484        };
1485        let features_content_dir = &features_build_info.features_content_dir;
1486
1487        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1488        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1489
1490        self.fs
1491            .write(&dockerfile_path, dockerfile_content.as_bytes())
1492            .await
1493            .map_err(|e| {
1494                log::error!("Failed to write feature content Dockerfile: {e}");
1495                DevContainerError::FilesystemError
1496            })?;
1497
1498        let mut command = Command::new(self.docker_client.docker_cli());
1499        command.args([
1500            "build",
1501            "-t",
1502            "dev_container_feature_content_temp",
1503            "-f",
1504            &dockerfile_path.display().to_string(),
1505            &features_content_dir.display().to_string(),
1506        ]);
1507
1508        let output = self
1509            .command_runner
1510            .run_command(&mut command)
1511            .await
1512            .map_err(|e| {
1513                log::error!("Error building feature content image: {e}");
1514                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1515            })?;
1516
1517        if !output.status.success() {
1518            let stderr = String::from_utf8_lossy(&output.stderr);
1519            log::error!("Feature content image build failed: {stderr}");
1520            return Err(DevContainerError::CommandFailed(
1521                self.docker_client.docker_cli(),
1522            ));
1523        }
1524
1525        Ok(())
1526    }
1527
1528    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1529        let dev_container = match &self.config {
1530            ConfigStatus::Deserialized(_) => {
1531                log::error!(
1532                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1533                );
1534                return Err(DevContainerError::DevContainerParseFailed);
1535            }
1536            ConfigStatus::VariableParsed(dev_container) => dev_container,
1537        };
1538
1539        let Some(features_build_info) = &self.features_build_info else {
1540            log::error!(
1541                "Cannot create docker build command; features build info has not been constructed"
1542            );
1543            return Err(DevContainerError::DevContainerParseFailed);
1544        };
1545        let mut command = Command::new(self.docker_client.docker_cli());
1546
1547        command.args(["buildx", "build"]);
1548
1549        // --load is short for --output=docker, loading the built image into the local docker images
1550        command.arg("--load");
1551
1552        // BuildKit build context: provides the features content directory as a named context
1553        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1554        command.args([
1555            "--build-context",
1556            &format!(
1557                "dev_containers_feature_content_source={}",
1558                features_build_info.features_content_dir.display()
1559            ),
1560        ]);
1561
1562        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1563        if let Some(build_image) = &features_build_info.build_image {
1564            command.args([
1565                "--build-arg",
1566                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1567            ]);
1568        } else {
1569            command.args([
1570                "--build-arg",
1571                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1572            ]);
1573        }
1574
1575        command.args([
1576            "--build-arg",
1577            &format!(
1578                "_DEV_CONTAINERS_IMAGE_USER={}",
1579                self.root_image
1580                    .as_ref()
1581                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1582                    .unwrap_or(&"root".to_string())
1583            ),
1584        ]);
1585
1586        command.args([
1587            "--build-arg",
1588            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1589        ]);
1590
1591        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1592            for (key, value) in args {
1593                command.args(["--build-arg", &format!("{}={}", key, value)]);
1594            }
1595        }
1596
1597        command.args(["--target", "dev_containers_target_stage"]);
1598
1599        command.args([
1600            "-f",
1601            &features_build_info.dockerfile_path.display().to_string(),
1602        ]);
1603
1604        command.args(["-t", &features_build_info.image_tag]);
1605
1606        if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1607            command.arg(self.config_directory.display().to_string());
1608        } else {
1609            // Use an empty folder as the build context to avoid pulling in unneeded files.
1610            // The actual feature content is supplied via the BuildKit build context above.
1611            command.arg(features_build_info.empty_context_dir.display().to_string());
1612        }
1613
1614        Ok(command)
1615    }
1616
1617    async fn run_docker_compose(
1618        &self,
1619        resources: DockerComposeResources,
1620    ) -> Result<DockerInspect, DevContainerError> {
1621        let mut command = Command::new(self.docker_client.docker_cli());
1622        command.args(&["compose", "--project-name", &self.project_name()]);
1623        for docker_compose_file in resources.files {
1624            command.args(&["-f", &docker_compose_file.display().to_string()]);
1625        }
1626        command.args(&["up", "-d"]);
1627
1628        let output = self
1629            .command_runner
1630            .run_command(&mut command)
1631            .await
1632            .map_err(|e| {
1633                log::error!("Error running docker compose up: {e}");
1634                DevContainerError::CommandFailed(command.get_program().display().to_string())
1635            })?;
1636
1637        if !output.status.success() {
1638            let stderr = String::from_utf8_lossy(&output.stderr);
1639            log::error!("Non-success status from docker compose up: {}", stderr);
1640            return Err(DevContainerError::CommandFailed(
1641                command.get_program().display().to_string(),
1642            ));
1643        }
1644
1645        if let Some(docker_ps) = self.check_for_existing_container().await? {
1646            log::debug!("Found newly created dev container");
1647            return self.docker_client.inspect(&docker_ps.id).await;
1648        }
1649
1650        log::error!("Could not find existing container after docker compose up");
1651
1652        Err(DevContainerError::DevContainerParseFailed)
1653    }
1654
1655    async fn run_docker_image(
1656        &self,
1657        build_resources: DockerBuildResources,
1658    ) -> Result<DockerInspect, DevContainerError> {
1659        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1660
1661        let output = self
1662            .command_runner
1663            .run_command(&mut docker_run_command)
1664            .await
1665            .map_err(|e| {
1666                log::error!("Error running docker run: {e}");
1667                DevContainerError::CommandFailed(
1668                    docker_run_command.get_program().display().to_string(),
1669                )
1670            })?;
1671
1672        if !output.status.success() {
1673            let std_err = String::from_utf8_lossy(&output.stderr);
1674            log::error!("Non-success status from docker run. StdErr: {std_err}");
1675            return Err(DevContainerError::CommandFailed(
1676                docker_run_command.get_program().display().to_string(),
1677            ));
1678        }
1679
1680        log::debug!("Checking for container that was started");
1681        let Some(docker_ps) = self.check_for_existing_container().await? else {
1682            log::error!("Could not locate container just created");
1683            return Err(DevContainerError::DevContainerParseFailed);
1684        };
1685        self.docker_client.inspect(&docker_ps.id).await
1686    }
1687
1688    fn local_workspace_folder(&self) -> String {
1689        self.local_project_directory.display().to_string()
1690    }
1691    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1692        self.local_project_directory
1693            .file_name()
1694            .map(|f| f.display().to_string())
1695            .ok_or(DevContainerError::DevContainerParseFailed)
1696    }
1697
1698    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1699        self.dev_container()
1700            .workspace_folder
1701            .as_ref()
1702            .map(|folder| PathBuf::from(folder))
1703            .or(Some(
1704                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1705            ))
1706            .ok_or(DevContainerError::DevContainerParseFailed)
1707    }
1708    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1709        self.remote_workspace_folder().and_then(|f| {
1710            f.file_name()
1711                .map(|file_name| file_name.display().to_string())
1712                .ok_or(DevContainerError::DevContainerParseFailed)
1713        })
1714    }
1715
1716    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1717        if let Some(mount) = &self.dev_container().workspace_mount {
1718            return Ok(mount.clone());
1719        }
1720        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1721            return Err(DevContainerError::DevContainerParseFailed);
1722        };
1723
1724        Ok(MountDefinition {
1725            source: Some(self.local_workspace_folder()),
1726            target: format!("/workspaces/{}", project_directory_name.display()),
1727            mount_type: None,
1728        })
1729    }
1730
1731    fn create_docker_run_command(
1732        &self,
1733        build_resources: DockerBuildResources,
1734    ) -> Result<Command, DevContainerError> {
1735        let remote_workspace_mount = self.remote_workspace_mount()?;
1736
1737        let docker_cli = self.docker_client.docker_cli();
1738        let mut command = Command::new(&docker_cli);
1739
1740        command.arg("run");
1741
1742        if build_resources.privileged {
1743            command.arg("--privileged");
1744        }
1745
1746        if &docker_cli == "podman" {
1747            command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1748        }
1749
1750        command.arg("--sig-proxy=false");
1751        command.arg("-d");
1752        command.arg("--mount");
1753        command.arg(remote_workspace_mount.to_string());
1754
1755        for mount in &build_resources.additional_mounts {
1756            command.arg("--mount");
1757            command.arg(mount.to_string());
1758        }
1759
1760        for (key, val) in self.identifying_labels() {
1761            command.arg("-l");
1762            command.arg(format!("{}={}", key, val));
1763        }
1764
1765        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1766            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1767                log::error!("Problem serializing image metadata: {e}");
1768                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1769            })?;
1770            command.arg("-l");
1771            command.arg(format!(
1772                "{}={}",
1773                "devcontainer.metadata", serialized_metadata
1774            ));
1775        }
1776
1777        if let Some(forward_ports) = &self.dev_container().forward_ports {
1778            for port in forward_ports {
1779                if let ForwardPort::Number(port_number) = port {
1780                    command.arg("-p");
1781                    command.arg(format!("{port_number}:{port_number}"));
1782                }
1783            }
1784        }
1785        for app_port in &self.dev_container().app_port {
1786            command.arg("-p");
1787            // Should just implement display for an AppPort struct which takes care of this; it might be a custom map like (literally) "8081:8080"
1788            command.arg(app_port);
1789        }
1790
1791        command.arg("--entrypoint");
1792        command.arg("/bin/sh");
1793        command.arg(&build_resources.image.id);
1794        command.arg("-c");
1795
1796        command.arg(build_resources.entrypoint_script);
1797        command.arg("-");
1798
1799        Ok(command)
1800    }
1801
1802    fn extension_ids(&self) -> Vec<String> {
1803        self.dev_container()
1804            .customizations
1805            .as_ref()
1806            .map(|c| c.zed.extensions.clone())
1807            .unwrap_or_default()
1808    }
1809
1810    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1811        self.run_initialize_commands().await?;
1812
1813        self.download_feature_and_dockerfile_resources().await?;
1814
1815        let build_resources = self.build_resources().await?;
1816
1817        let devcontainer_up = self.run_dev_container(build_resources).await?;
1818
1819        self.run_remote_scripts(&devcontainer_up, true).await?;
1820
1821        Ok(devcontainer_up)
1822    }
1823
1824    async fn run_remote_scripts(
1825        &self,
1826        devcontainer_up: &DevContainerUp,
1827        new_container: bool,
1828    ) -> Result<(), DevContainerError> {
1829        let ConfigStatus::VariableParsed(config) = &self.config else {
1830            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1831            return Err(DevContainerError::DevContainerScriptsFailed);
1832        };
1833        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1834
1835        if new_container {
1836            if let Some(on_create_command) = &config.on_create_command {
1837                for (command_name, command) in on_create_command.script_commands() {
1838                    log::debug!("Running on create command {command_name}");
1839                    self.docker_client
1840                        .run_docker_exec(
1841                            &devcontainer_up.container_id,
1842                            &remote_folder,
1843                            "root",
1844                            &devcontainer_up.remote_env,
1845                            command,
1846                        )
1847                        .await?;
1848                }
1849            }
1850            if let Some(update_content_command) = &config.update_content_command {
1851                for (command_name, command) in update_content_command.script_commands() {
1852                    log::debug!("Running update content command {command_name}");
1853                    self.docker_client
1854                        .run_docker_exec(
1855                            &devcontainer_up.container_id,
1856                            &remote_folder,
1857                            "root",
1858                            &devcontainer_up.remote_env,
1859                            command,
1860                        )
1861                        .await?;
1862                }
1863            }
1864
1865            if let Some(post_create_command) = &config.post_create_command {
1866                for (command_name, command) in post_create_command.script_commands() {
1867                    log::debug!("Running post create command {command_name}");
1868                    self.docker_client
1869                        .run_docker_exec(
1870                            &devcontainer_up.container_id,
1871                            &remote_folder,
1872                            &devcontainer_up.remote_user,
1873                            &devcontainer_up.remote_env,
1874                            command,
1875                        )
1876                        .await?;
1877                }
1878            }
1879            if let Some(post_start_command) = &config.post_start_command {
1880                for (command_name, command) in post_start_command.script_commands() {
1881                    log::debug!("Running post start command {command_name}");
1882                    self.docker_client
1883                        .run_docker_exec(
1884                            &devcontainer_up.container_id,
1885                            &remote_folder,
1886                            &devcontainer_up.remote_user,
1887                            &devcontainer_up.remote_env,
1888                            command,
1889                        )
1890                        .await?;
1891                }
1892            }
1893        }
1894        if let Some(post_attach_command) = &config.post_attach_command {
1895            for (command_name, command) in post_attach_command.script_commands() {
1896                log::debug!("Running post attach command {command_name}");
1897                self.docker_client
1898                    .run_docker_exec(
1899                        &devcontainer_up.container_id,
1900                        &remote_folder,
1901                        &devcontainer_up.remote_user,
1902                        &devcontainer_up.remote_env,
1903                        command,
1904                    )
1905                    .await?;
1906            }
1907        }
1908
1909        Ok(())
1910    }
1911
1912    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1913        let ConfigStatus::VariableParsed(config) = &self.config else {
1914            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1915            return Err(DevContainerError::DevContainerParseFailed);
1916        };
1917
1918        if let Some(initialize_command) = &config.initialize_command {
1919            log::debug!("Running initialize command");
1920            initialize_command
1921                .run(&self.command_runner, &self.local_project_directory)
1922                .await
1923        } else {
1924            log::warn!("No initialize command found");
1925            Ok(())
1926        }
1927    }
1928
1929    async fn check_for_existing_devcontainer(
1930        &self,
1931    ) -> Result<Option<DevContainerUp>, DevContainerError> {
1932        if let Some(docker_ps) = self.check_for_existing_container().await? {
1933            log::debug!("Dev container already found. Proceeding with it");
1934
1935            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1936
1937            if !docker_inspect.is_running() {
1938                log::debug!("Container not running. Will attempt to start, and then proceed");
1939                self.docker_client.start_container(&docker_ps.id).await?;
1940            }
1941
1942            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1943
1944            let remote_folder = get_remote_dir_from_config(
1945                &docker_inspect,
1946                (&self.local_project_directory.display()).to_string(),
1947            )?;
1948
1949            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1950
1951            let dev_container_up = DevContainerUp {
1952                container_id: docker_ps.id,
1953                remote_user: remote_user,
1954                remote_workspace_folder: remote_folder,
1955                extension_ids: self.extension_ids(),
1956                remote_env,
1957            };
1958
1959            self.run_remote_scripts(&dev_container_up, false).await?;
1960
1961            Ok(Some(dev_container_up))
1962        } else {
1963            log::debug!("Existing container not found.");
1964
1965            Ok(None)
1966        }
1967    }
1968
1969    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1970        self.docker_client
1971            .find_process_by_filters(
1972                self.identifying_labels()
1973                    .iter()
1974                    .map(|(k, v)| format!("label={k}={v}"))
1975                    .collect(),
1976            )
1977            .await
1978    }
1979
1980    fn project_name(&self) -> String {
1981        if let Some(name) = &self.dev_container().name {
1982            safe_id_lower(name)
1983        } else {
1984            let alternate_name = &self
1985                .local_workspace_base_name()
1986                .unwrap_or(self.local_workspace_folder());
1987            safe_id_lower(alternate_name)
1988        }
1989    }
1990}
1991
1992/// Holds all the information needed to construct a `docker buildx build` command
1993/// that extends a base image with dev container features.
1994///
1995/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
1996/// (cli/src/spec-node/containerFeatures.ts).
1997#[derive(Debug, Eq, PartialEq)]
1998pub(crate) struct FeaturesBuildInfo {
1999    /// Path to the generated Dockerfile.extended
2000    pub dockerfile_path: PathBuf,
2001    /// Path to the features content directory (used as a BuildKit build context)
2002    pub features_content_dir: PathBuf,
2003    /// Path to an empty directory used as the Docker build context
2004    pub empty_context_dir: PathBuf,
2005    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2006    pub build_image: Option<String>,
2007    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2008    pub image_tag: String,
2009}
2010
2011pub(crate) async fn read_devcontainer_configuration(
2012    config: DevContainerConfig,
2013    context: &DevContainerContext,
2014    environment: HashMap<String, String>,
2015) -> Result<DevContainer, DevContainerError> {
2016    let docker = if context.use_podman {
2017        Docker::new("podman")
2018    } else {
2019        Docker::new("docker")
2020    };
2021    let mut dev_container = DevContainerManifest::new(
2022        context,
2023        environment,
2024        Arc::new(docker),
2025        Arc::new(DefaultCommandRunner::new()),
2026        config,
2027        &context.project_directory.as_ref(),
2028    )
2029    .await?;
2030    dev_container.parse_nonremote_vars()?;
2031    Ok(dev_container.dev_container().clone())
2032}
2033
2034pub(crate) async fn spawn_dev_container(
2035    context: &DevContainerContext,
2036    environment: HashMap<String, String>,
2037    config: DevContainerConfig,
2038    local_project_path: &Path,
2039) -> Result<DevContainerUp, DevContainerError> {
2040    let docker = if context.use_podman {
2041        Docker::new("podman")
2042    } else {
2043        Docker::new("docker")
2044    };
2045    let mut devcontainer_manifest = DevContainerManifest::new(
2046        context,
2047        environment,
2048        Arc::new(docker),
2049        Arc::new(DefaultCommandRunner::new()),
2050        config,
2051        local_project_path,
2052    )
2053    .await?;
2054
2055    devcontainer_manifest.parse_nonremote_vars()?;
2056
2057    log::debug!("Checking for existing container");
2058    if let Some(devcontainer) = devcontainer_manifest
2059        .check_for_existing_devcontainer()
2060        .await?
2061    {
2062        Ok(devcontainer)
2063    } else {
2064        log::debug!("Existing container not found. Building");
2065
2066        devcontainer_manifest.build_and_run().await
2067    }
2068}
2069
2070#[derive(Debug)]
2071struct DockerBuildResources {
2072    image: DockerInspect,
2073    additional_mounts: Vec<MountDefinition>,
2074    privileged: bool,
2075    entrypoint_script: String,
2076}
2077
2078#[derive(Debug)]
2079enum DevContainerBuildResources {
2080    DockerCompose(DockerComposeResources),
2081    Docker(DockerBuildResources),
2082}
2083
2084fn find_primary_service(
2085    docker_compose: &DockerComposeResources,
2086    devcontainer: &DevContainerManifest,
2087) -> Result<(String, DockerComposeService), DevContainerError> {
2088    let Some(service_name) = &devcontainer.dev_container().service else {
2089        return Err(DevContainerError::DevContainerParseFailed);
2090    };
2091
2092    match docker_compose.config.services.get(service_name) {
2093        Some(service) => Ok((service_name.clone(), service.clone())),
2094        None => Err(DevContainerError::DevContainerParseFailed),
2095    }
2096}
2097
2098/// Destination folder inside the container where feature content is staged during build.
2099/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2100const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2101
2102/// Escapes regex special characters in a string.
2103fn escape_regex_chars(input: &str) -> String {
2104    let mut result = String::with_capacity(input.len() * 2);
2105    for c in input.chars() {
2106        if ".*+?^${}()|[]\\".contains(c) {
2107            result.push('\\');
2108        }
2109        result.push(c);
2110    }
2111    result
2112}
2113
2114/// Extracts the short feature ID from a full feature reference string.
2115///
2116/// Examples:
2117/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2118/// - `ghcr.io/user/repo/go` → `go`
2119/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2120/// - `./myFeature` → `myFeature`
2121fn extract_feature_id(feature_ref: &str) -> &str {
2122    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2123        &feature_ref[..at_idx]
2124    } else {
2125        let last_slash = feature_ref.rfind('/');
2126        let last_colon = feature_ref.rfind(':');
2127        match (last_slash, last_colon) {
2128            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2129            _ => feature_ref,
2130        }
2131    };
2132    match without_version.rfind('/') {
2133        Some(idx) => &without_version[idx + 1..],
2134        None => without_version,
2135    }
2136}
2137
2138/// Generates a shell command that looks up a user's passwd entry.
2139///
2140/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2141/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2142fn get_ent_passwd_shell_command(user: &str) -> String {
2143    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2144    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2145    format!(
2146        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2147        shell = escaped_for_shell,
2148        re = escaped_for_regex,
2149    )
2150}
2151
2152/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2153///
2154/// Features listed in the override come first (in the specified order), followed
2155/// by any remaining features sorted lexicographically by their full reference ID.
2156fn resolve_feature_order<'a>(
2157    features: &'a HashMap<String, FeatureOptions>,
2158    override_order: &Option<Vec<String>>,
2159) -> Vec<(&'a String, &'a FeatureOptions)> {
2160    if let Some(order) = override_order {
2161        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2162        for ordered_id in order {
2163            if let Some((key, options)) = features.get_key_value(ordered_id) {
2164                ordered.push((key, options));
2165            }
2166        }
2167        let mut remaining: Vec<_> = features
2168            .iter()
2169            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2170            .collect();
2171        remaining.sort_by_key(|(id, _)| id.as_str());
2172        ordered.extend(remaining);
2173        ordered
2174    } else {
2175        let mut entries: Vec<_> = features.iter().collect();
2176        entries.sort_by_key(|(id, _)| id.as_str());
2177        entries
2178    }
2179}
2180
2181/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2182///
2183/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2184/// `containerFeaturesConfiguration.ts`.
2185fn generate_install_wrapper(
2186    feature_ref: &str,
2187    feature_id: &str,
2188    env_variables: &str,
2189) -> Result<String, DevContainerError> {
2190    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2191        log::error!("Error escaping feature ref {feature_ref}: {e}");
2192        DevContainerError::DevContainerParseFailed
2193    })?;
2194    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2195        log::error!("Error escaping feature {feature_id}: {e}");
2196        DevContainerError::DevContainerParseFailed
2197    })?;
2198    let options_indented: String = env_variables
2199        .lines()
2200        .filter(|l| !l.is_empty())
2201        .map(|l| format!("    {}", l))
2202        .collect::<Vec<_>>()
2203        .join("\n");
2204    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2205        log::error!("Error escaping options {options_indented}: {e}");
2206        DevContainerError::DevContainerParseFailed
2207    })?;
2208
2209    let script = format!(
2210        r#"#!/bin/sh
2211set -e
2212
2213on_exit () {{
2214    [ $? -eq 0 ] && exit
2215    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2216}}
2217
2218trap on_exit EXIT
2219
2220echo ===========================================================================
2221echo 'Feature       : {escaped_name}'
2222echo 'Id            : {escaped_id}'
2223echo 'Options       :'
2224echo {escaped_options}
2225echo ===========================================================================
2226
2227set -a
2228. ../devcontainer-features.builtin.env
2229. ./devcontainer-features.env
2230set +a
2231
2232chmod +x ./install.sh
2233./install.sh
2234"#
2235    );
2236
2237    Ok(script)
2238}
2239
2240// Dockerfile actions need to be moved to their own file
2241fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2242    dockerfile_content
2243        .lines()
2244        .find(|line| line.starts_with("FROM"))
2245        .and_then(|line| {
2246            let words: Vec<&str> = line.split(" ").collect();
2247            if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2248                return Some(words[words.len() - 1].to_string());
2249            } else {
2250                return None;
2251            }
2252        })
2253}
2254
2255fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2256    if dockerfile_alias(dockerfile_content).is_some() {
2257        dockerfile_content.to_string()
2258    } else {
2259        dockerfile_content
2260            .lines()
2261            .map(|line| {
2262                if line.starts_with("FROM") {
2263                    format!("{} AS {}", line, alias)
2264                } else {
2265                    line.to_string()
2266                }
2267            })
2268            .collect::<Vec<String>>()
2269            .join("\n")
2270    }
2271}
2272
2273fn image_from_dockerfile(
2274    devcontainer: &DevContainerManifest,
2275    dockerfile_contents: String,
2276) -> Result<String, DevContainerError> {
2277    let mut raw_contents = dockerfile_contents
2278        .lines()
2279        .find(|line| line.starts_with("FROM"))
2280        .and_then(|from_line| {
2281            from_line
2282                .split(' ')
2283                .collect::<Vec<&str>>()
2284                .get(1)
2285                .map(|s| s.to_string())
2286        })
2287        .ok_or_else(|| {
2288            log::error!("Could not find an image definition in dockerfile");
2289            DevContainerError::DevContainerParseFailed
2290        })?;
2291
2292    for (k, v) in devcontainer
2293        .dev_container()
2294        .build
2295        .as_ref()
2296        .and_then(|b| b.args.as_ref())
2297        .unwrap_or(&HashMap::new())
2298    {
2299        raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2300    }
2301    Ok(raw_contents)
2302}
2303
2304// Container user things
2305// This should come from spec - see the docs
2306fn get_remote_user_from_config(
2307    docker_config: &DockerInspect,
2308    devcontainer: &DevContainerManifest,
2309) -> Result<String, DevContainerError> {
2310    if let DevContainer {
2311        remote_user: Some(user),
2312        ..
2313    } = &devcontainer.dev_container()
2314    {
2315        return Ok(user.clone());
2316    }
2317    if let Some(metadata) = &docker_config.config.labels.metadata {
2318        for metadatum in metadata {
2319            if let Some(remote_user) = metadatum.get("remoteUser") {
2320                if let Some(remote_user_str) = remote_user.as_str() {
2321                    return Ok(remote_user_str.to_string());
2322                }
2323            }
2324        }
2325    }
2326    if let Some(image_user) = &docker_config.config.image_user {
2327        if !image_user.is_empty() {
2328            return Ok(image_user.to_string());
2329        }
2330    }
2331    Ok("root".to_string())
2332}
2333
2334// This should come from spec - see the docs
2335fn get_container_user_from_config(
2336    docker_config: &DockerInspect,
2337    devcontainer: &DevContainerManifest,
2338) -> Result<String, DevContainerError> {
2339    if let Some(user) = &devcontainer.dev_container().container_user {
2340        return Ok(user.to_string());
2341    }
2342    if let Some(metadata) = &docker_config.config.labels.metadata {
2343        for metadatum in metadata {
2344            if let Some(container_user) = metadatum.get("containerUser") {
2345                if let Some(container_user_str) = container_user.as_str() {
2346                    return Ok(container_user_str.to_string());
2347                }
2348            }
2349        }
2350    }
2351    if let Some(image_user) = &docker_config.config.image_user {
2352        return Ok(image_user.to_string());
2353    }
2354
2355    Ok("root".to_string())
2356}
2357
2358#[cfg(test)]
2359mod test {
2360    use std::{
2361        collections::HashMap,
2362        ffi::OsStr,
2363        path::PathBuf,
2364        process::{ExitStatus, Output},
2365        sync::{Arc, Mutex},
2366    };
2367
2368    use async_trait::async_trait;
2369    use fs::{FakeFs, Fs};
2370    use gpui::{AppContext, TestAppContext};
2371    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2372    use project::{
2373        ProjectEnvironment,
2374        worktree_store::{WorktreeIdCounter, WorktreeStore},
2375    };
2376    use serde_json_lenient::Value;
2377    use util::{command::Command, paths::SanitizedPath};
2378
2379    #[cfg(not(target_os = "windows"))]
2380    use crate::docker::DockerComposeServicePort;
2381    use crate::{
2382        DevContainerConfig, DevContainerContext,
2383        command_json::CommandRunner,
2384        devcontainer_api::DevContainerError,
2385        devcontainer_json::MountDefinition,
2386        devcontainer_manifest::{
2387            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2388            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2389        },
2390        docker::{
2391            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2392            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2393            DockerPs,
2394        },
2395        oci::TokenResponse,
2396    };
2397    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2398
2399    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2400        let buffer = futures::io::Cursor::new(Vec::new());
2401        let mut builder = async_tar::Builder::new(buffer);
2402        for (file_name, content) in content {
2403            if content.is_empty() {
2404                let mut header = async_tar::Header::new_gnu();
2405                header.set_size(0);
2406                header.set_mode(0o755);
2407                header.set_entry_type(async_tar::EntryType::Directory);
2408                header.set_cksum();
2409                builder
2410                    .append_data(&mut header, file_name, &[] as &[u8])
2411                    .await
2412                    .unwrap();
2413            } else {
2414                let data = content.as_bytes();
2415                let mut header = async_tar::Header::new_gnu();
2416                header.set_size(data.len() as u64);
2417                header.set_mode(0o755);
2418                header.set_entry_type(async_tar::EntryType::Regular);
2419                header.set_cksum();
2420                builder
2421                    .append_data(&mut header, file_name, data)
2422                    .await
2423                    .unwrap();
2424            }
2425        }
2426        let buffer = builder.into_inner().await.unwrap();
2427        buffer.into_inner()
2428    }
2429
2430    fn test_project_filename() -> String {
2431        PathBuf::from(TEST_PROJECT_PATH)
2432            .file_name()
2433            .expect("is valid")
2434            .display()
2435            .to_string()
2436    }
2437
2438    async fn init_devcontainer_config(
2439        fs: &Arc<FakeFs>,
2440        devcontainer_contents: &str,
2441    ) -> DevContainerConfig {
2442        fs.insert_tree(
2443            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2444            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2445        )
2446        .await;
2447
2448        DevContainerConfig::default_config()
2449    }
2450
2451    struct TestDependencies {
2452        fs: Arc<FakeFs>,
2453        _http_client: Arc<dyn HttpClient>,
2454        docker: Arc<FakeDocker>,
2455        command_runner: Arc<TestCommandRunner>,
2456    }
2457
2458    async fn init_default_devcontainer_manifest(
2459        cx: &mut TestAppContext,
2460        devcontainer_contents: &str,
2461    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2462        let fs = FakeFs::new(cx.executor());
2463        let http_client = fake_http_client();
2464        let command_runner = Arc::new(TestCommandRunner::new());
2465        let docker = Arc::new(FakeDocker::new());
2466        let environment = HashMap::new();
2467
2468        init_devcontainer_manifest(
2469            cx,
2470            fs,
2471            http_client,
2472            docker,
2473            command_runner,
2474            environment,
2475            devcontainer_contents,
2476        )
2477        .await
2478    }
2479
2480    async fn init_devcontainer_manifest(
2481        cx: &mut TestAppContext,
2482        fs: Arc<FakeFs>,
2483        http_client: Arc<dyn HttpClient>,
2484        docker_client: Arc<FakeDocker>,
2485        command_runner: Arc<TestCommandRunner>,
2486        environment: HashMap<String, String>,
2487        devcontainer_contents: &str,
2488    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2489        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2490        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2491        let worktree_store =
2492            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2493        let project_environment =
2494            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2495
2496        let context = DevContainerContext {
2497            project_directory: SanitizedPath::cast_arc(project_path),
2498            use_podman: false,
2499            fs: fs.clone(),
2500            http_client: http_client.clone(),
2501            environment: project_environment.downgrade(),
2502        };
2503
2504        let test_dependencies = TestDependencies {
2505            fs: fs.clone(),
2506            _http_client: http_client.clone(),
2507            docker: docker_client.clone(),
2508            command_runner: command_runner.clone(),
2509        };
2510        let manifest = DevContainerManifest::new(
2511            &context,
2512            environment,
2513            docker_client,
2514            command_runner,
2515            local_config,
2516            &PathBuf::from(TEST_PROJECT_PATH),
2517        )
2518        .await?;
2519
2520        Ok((test_dependencies, manifest))
2521    }
2522
2523    #[gpui::test]
2524    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2525        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2526            cx,
2527            r#"
2528// These are some external comments. serde_lenient should handle them
2529{
2530    // These are some internal comments
2531    "image": "image",
2532    "remoteUser": "root",
2533}
2534            "#,
2535        )
2536        .await
2537        .unwrap();
2538
2539        let mut metadata = HashMap::new();
2540        metadata.insert(
2541            "remoteUser".to_string(),
2542            serde_json_lenient::Value::String("vsCode".to_string()),
2543        );
2544        let given_docker_config = DockerInspect {
2545            id: "docker_id".to_string(),
2546            config: DockerInspectConfig {
2547                labels: DockerConfigLabels {
2548                    metadata: Some(vec![metadata]),
2549                },
2550                image_user: None,
2551                env: Vec::new(),
2552            },
2553            mounts: None,
2554            state: None,
2555        };
2556
2557        let remote_user =
2558            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2559
2560        assert_eq!(remote_user, "root".to_string())
2561    }
2562
2563    #[gpui::test]
2564    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2565        let (_, devcontainer_manifest) =
2566            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2567        let mut metadata = HashMap::new();
2568        metadata.insert(
2569            "remoteUser".to_string(),
2570            serde_json_lenient::Value::String("vsCode".to_string()),
2571        );
2572        let given_docker_config = DockerInspect {
2573            id: "docker_id".to_string(),
2574            config: DockerInspectConfig {
2575                labels: DockerConfigLabels {
2576                    metadata: Some(vec![metadata]),
2577                },
2578                image_user: None,
2579                env: Vec::new(),
2580            },
2581            mounts: None,
2582            state: None,
2583        };
2584
2585        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2586
2587        assert!(remote_user.is_ok());
2588        let remote_user = remote_user.expect("ok");
2589        assert_eq!(&remote_user, "vsCode")
2590    }
2591
2592    #[test]
2593    fn should_extract_feature_id_from_references() {
2594        assert_eq!(
2595            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2596            "aws-cli"
2597        );
2598        assert_eq!(
2599            extract_feature_id("ghcr.io/devcontainers/features/go"),
2600            "go"
2601        );
2602        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2603        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2604        assert_eq!(
2605            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2606            "rust"
2607        );
2608    }
2609
2610    #[gpui::test]
2611    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2612        let mut metadata = HashMap::new();
2613        metadata.insert(
2614            "remoteUser".to_string(),
2615            serde_json_lenient::Value::String("vsCode".to_string()),
2616        );
2617
2618        let (_, devcontainer_manifest) =
2619            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2620        let build_resources = DockerBuildResources {
2621            image: DockerInspect {
2622                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2623                config: DockerInspectConfig {
2624                    labels: DockerConfigLabels { metadata: None },
2625                    image_user: None,
2626                    env: Vec::new(),
2627                },
2628                mounts: None,
2629                state: None,
2630            },
2631            additional_mounts: vec![],
2632            privileged: false,
2633            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2634        };
2635        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2636
2637        assert!(docker_run_command.is_ok());
2638        let docker_run_command = docker_run_command.expect("ok");
2639
2640        assert_eq!(docker_run_command.get_program(), "docker");
2641        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2642            .join(".devcontainer")
2643            .join("devcontainer.json");
2644        let expected_config_file_label = expected_config_file_label.display();
2645        assert_eq!(
2646            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2647            vec![
2648                OsStr::new("run"),
2649                OsStr::new("--sig-proxy=false"),
2650                OsStr::new("-d"),
2651                OsStr::new("--mount"),
2652                OsStr::new(
2653                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2654                ),
2655                OsStr::new("-l"),
2656                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2657                OsStr::new("-l"),
2658                OsStr::new(&format!(
2659                    "devcontainer.config_file={expected_config_file_label}"
2660                )),
2661                OsStr::new("--entrypoint"),
2662                OsStr::new("/bin/sh"),
2663                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2664                OsStr::new("-c"),
2665                OsStr::new(
2666                    "
2667    echo Container started
2668    trap \"exit 0\" 15
2669    exec \"$@\"
2670    while sleep 1 & wait $!; do :; done
2671                        "
2672                    .trim()
2673                ),
2674                OsStr::new("-"),
2675            ]
2676        )
2677    }
2678
2679    #[gpui::test]
2680    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2681        // State where service not defined in dev container
2682        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2683        let given_docker_compose_config = DockerComposeResources {
2684            config: DockerComposeConfig {
2685                name: Some("devcontainers".to_string()),
2686                services: HashMap::new(),
2687                ..Default::default()
2688            },
2689            ..Default::default()
2690        };
2691
2692        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2693
2694        assert!(bad_result.is_err());
2695
2696        // State where service defined in devcontainer, not found in DockerCompose config
2697        let (_, given_dev_container) =
2698            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2699                .await
2700                .unwrap();
2701        let given_docker_compose_config = DockerComposeResources {
2702            config: DockerComposeConfig {
2703                name: Some("devcontainers".to_string()),
2704                services: HashMap::new(),
2705                ..Default::default()
2706            },
2707            ..Default::default()
2708        };
2709
2710        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2711
2712        assert!(bad_result.is_err());
2713        // State where service defined in devcontainer and in DockerCompose config
2714
2715        let (_, given_dev_container) =
2716            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2717                .await
2718                .unwrap();
2719        let given_docker_compose_config = DockerComposeResources {
2720            config: DockerComposeConfig {
2721                name: Some("devcontainers".to_string()),
2722                services: HashMap::from([(
2723                    "found_service".to_string(),
2724                    DockerComposeService {
2725                        ..Default::default()
2726                    },
2727                )]),
2728                ..Default::default()
2729            },
2730            ..Default::default()
2731        };
2732
2733        let (service_name, _) =
2734            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2735
2736        assert_eq!(service_name, "found_service".to_string());
2737    }
2738
2739    #[gpui::test]
2740    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2741        let fs = FakeFs::new(cx.executor());
2742        let given_devcontainer_contents = r#"
2743// These are some external comments. serde_lenient should handle them
2744{
2745    // These are some internal comments
2746    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2747    "name": "myDevContainer-${devcontainerId}",
2748    "remoteUser": "root",
2749    "remoteEnv": {
2750        "DEVCONTAINER_ID": "${devcontainerId}",
2751        "MYVAR2": "myvarothervalue",
2752        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2753        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2754        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2755        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2756        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2757        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2758
2759    }
2760}
2761                    "#;
2762        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2763            cx,
2764            fs,
2765            fake_http_client(),
2766            Arc::new(FakeDocker::new()),
2767            Arc::new(TestCommandRunner::new()),
2768            HashMap::from([
2769                ("local_env_1".to_string(), "local_env_value1".to_string()),
2770                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2771            ]),
2772            given_devcontainer_contents,
2773        )
2774        .await
2775        .unwrap();
2776
2777        devcontainer_manifest.parse_nonremote_vars().unwrap();
2778
2779        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2780            &devcontainer_manifest.config
2781        else {
2782            panic!("Config not parsed");
2783        };
2784
2785        // ${devcontainerId}
2786        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2787        assert_eq!(
2788            variable_replaced_devcontainer.name,
2789            Some(format!("myDevContainer-{devcontainer_id}"))
2790        );
2791        assert_eq!(
2792            variable_replaced_devcontainer
2793                .remote_env
2794                .as_ref()
2795                .and_then(|env| env.get("DEVCONTAINER_ID")),
2796            Some(&devcontainer_id)
2797        );
2798
2799        // ${containerWorkspaceFolderBasename}
2800        assert_eq!(
2801            variable_replaced_devcontainer
2802                .remote_env
2803                .as_ref()
2804                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2805            Some(&test_project_filename())
2806        );
2807
2808        // ${localWorkspaceFolderBasename}
2809        assert_eq!(
2810            variable_replaced_devcontainer
2811                .remote_env
2812                .as_ref()
2813                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2814            Some(&test_project_filename())
2815        );
2816
2817        // ${containerWorkspaceFolder}
2818        assert_eq!(
2819            variable_replaced_devcontainer
2820                .remote_env
2821                .as_ref()
2822                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2823            Some(&format!("/workspaces/{}", test_project_filename()))
2824        );
2825
2826        // ${localWorkspaceFolder}
2827        assert_eq!(
2828            variable_replaced_devcontainer
2829                .remote_env
2830                .as_ref()
2831                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2832            Some(&TEST_PROJECT_PATH.to_string())
2833        );
2834
2835        // ${localEnv:VARIABLE_NAME}
2836        assert_eq!(
2837            variable_replaced_devcontainer
2838                .remote_env
2839                .as_ref()
2840                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2841            Some(&"local_env_value1".to_string())
2842        );
2843        assert_eq!(
2844            variable_replaced_devcontainer
2845                .remote_env
2846                .as_ref()
2847                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2848            Some(&"THISVALUEHERE".to_string())
2849        );
2850    }
2851
2852    #[gpui::test]
2853    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2854        let given_devcontainer_contents = r#"
2855                // These are some external comments. serde_lenient should handle them
2856                {
2857                    // These are some internal comments
2858                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2859                    "name": "myDevContainer-${devcontainerId}",
2860                    "remoteUser": "root",
2861                    "remoteEnv": {
2862                        "DEVCONTAINER_ID": "${devcontainerId}",
2863                        "MYVAR2": "myvarothervalue",
2864                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2865                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2866                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2867                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2868
2869                    },
2870                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2871                    "workspaceFolder": "/workspace/customfolder"
2872                }
2873            "#;
2874
2875        let (_, mut devcontainer_manifest) =
2876            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2877                .await
2878                .unwrap();
2879
2880        devcontainer_manifest.parse_nonremote_vars().unwrap();
2881
2882        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2883            &devcontainer_manifest.config
2884        else {
2885            panic!("Config not parsed");
2886        };
2887
2888        // ${devcontainerId}
2889        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2890        assert_eq!(
2891            variable_replaced_devcontainer.name,
2892            Some(format!("myDevContainer-{devcontainer_id}"))
2893        );
2894        assert_eq!(
2895            variable_replaced_devcontainer
2896                .remote_env
2897                .as_ref()
2898                .and_then(|env| env.get("DEVCONTAINER_ID")),
2899            Some(&devcontainer_id)
2900        );
2901
2902        // ${containerWorkspaceFolderBasename}
2903        assert_eq!(
2904            variable_replaced_devcontainer
2905                .remote_env
2906                .as_ref()
2907                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2908            Some(&"customfolder".to_string())
2909        );
2910
2911        // ${localWorkspaceFolderBasename}
2912        assert_eq!(
2913            variable_replaced_devcontainer
2914                .remote_env
2915                .as_ref()
2916                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2917            Some(&"project".to_string())
2918        );
2919
2920        // ${containerWorkspaceFolder}
2921        assert_eq!(
2922            variable_replaced_devcontainer
2923                .remote_env
2924                .as_ref()
2925                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2926            Some(&"/workspace/customfolder".to_string())
2927        );
2928
2929        // ${localWorkspaceFolder}
2930        assert_eq!(
2931            variable_replaced_devcontainer
2932                .remote_env
2933                .as_ref()
2934                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2935            Some(&TEST_PROJECT_PATH.to_string())
2936        );
2937    }
2938
2939    // updateRemoteUserUID is treated as false in Windows, so this test will fail
2940    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2941    #[cfg(not(target_os = "windows"))]
2942    #[gpui::test]
2943    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2944        cx.executor().allow_parking();
2945        env_logger::try_init().ok();
2946        let given_devcontainer_contents = r#"
2947            /*---------------------------------------------------------------------------------------------
2948             *  Copyright (c) Microsoft Corporation. All rights reserved.
2949             *  Licensed under the MIT License. See License.txt in the project root for license information.
2950             *--------------------------------------------------------------------------------------------*/
2951            {
2952              "name": "cli-${devcontainerId}",
2953              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2954              "build": {
2955                "dockerfile": "Dockerfile",
2956                "args": {
2957                  "VARIANT": "18-bookworm",
2958                  "FOO": "bar",
2959                },
2960              },
2961              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2962              "workspaceFolder": "/workspace2",
2963              "mounts": [
2964                // Keep command history across instances
2965                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2966              ],
2967
2968              "forwardPorts": [
2969                8082,
2970                8083,
2971              ],
2972              "appPort": [
2973                8084,
2974                "8085:8086",
2975              ],
2976
2977              "containerEnv": {
2978                "VARIABLE_VALUE": "value",
2979              },
2980
2981              "initializeCommand": "touch IAM.md",
2982
2983              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
2984
2985              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
2986
2987              "postCreateCommand": {
2988                "yarn": "yarn install",
2989                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2990              },
2991
2992              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2993
2994              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
2995
2996              "remoteUser": "node",
2997
2998              "remoteEnv": {
2999                "PATH": "${containerEnv:PATH}:/some/other/path",
3000                "OTHER_ENV": "other_env_value"
3001              },
3002
3003              "features": {
3004                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3005                  "moby": false,
3006                },
3007                "ghcr.io/devcontainers/features/go:1": {},
3008              },
3009
3010              "customizations": {
3011                "vscode": {
3012                  "extensions": [
3013                    "dbaeumer.vscode-eslint",
3014                    "GitHub.vscode-pull-request-github",
3015                  ],
3016                },
3017                "zed": {
3018                  "extensions": ["vue", "ruby"],
3019                },
3020                "codespaces": {
3021                  "repositories": {
3022                    "devcontainers/features": {
3023                      "permissions": {
3024                        "contents": "write",
3025                        "workflows": "write",
3026                      },
3027                    },
3028                  },
3029                },
3030              },
3031            }
3032            "#;
3033
3034        let (test_dependencies, mut devcontainer_manifest) =
3035            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3036                .await
3037                .unwrap();
3038
3039        test_dependencies
3040            .fs
3041            .atomic_write(
3042                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3043                r#"
3044#  Copyright (c) Microsoft Corporation. All rights reserved.
3045#  Licensed under the MIT License. See License.txt in the project root for license information.
3046ARG VARIANT="16-bullseye"
3047FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3048
3049RUN mkdir -p /workspaces && chown node:node /workspaces
3050
3051ARG USERNAME=node
3052USER $USERNAME
3053
3054# Save command line history
3055RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3056&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3057&& mkdir -p /home/$USERNAME/commandhistory \
3058&& touch /home/$USERNAME/commandhistory/.bash_history \
3059&& chown -R $USERNAME /home/$USERNAME/commandhistory
3060                    "#.trim().to_string(),
3061            )
3062            .await
3063            .unwrap();
3064
3065        devcontainer_manifest.parse_nonremote_vars().unwrap();
3066
3067        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3068
3069        assert_eq!(
3070            devcontainer_up.extension_ids,
3071            vec!["vue".to_string(), "ruby".to_string()]
3072        );
3073
3074        let files = test_dependencies.fs.files();
3075        let feature_dockerfile = files
3076            .iter()
3077            .find(|f| {
3078                f.file_name()
3079                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3080            })
3081            .expect("to be found");
3082        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3083        assert_eq!(
3084            &feature_dockerfile,
3085            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3086
3087#  Copyright (c) Microsoft Corporation. All rights reserved.
3088#  Licensed under the MIT License. See License.txt in the project root for license information.
3089ARG VARIANT="16-bullseye"
3090FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3091
3092RUN mkdir -p /workspaces && chown node:node /workspaces
3093
3094ARG USERNAME=node
3095USER $USERNAME
3096
3097# Save command line history
3098RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3099&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3100&& mkdir -p /home/$USERNAME/commandhistory \
3101&& touch /home/$USERNAME/commandhistory/.bash_history \
3102&& chown -R $USERNAME /home/$USERNAME/commandhistory
3103
3104FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3105USER root
3106COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3107RUN chmod -R 0755 /tmp/build-features/
3108
3109FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3110
3111USER root
3112
3113RUN mkdir -p /tmp/dev-container-features
3114COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3115
3116RUN \
3117echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3118echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3119
3120
3121RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3122cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3123&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3124&& cd /tmp/dev-container-features/docker-in-docker_0 \
3125&& chmod +x ./devcontainer-features-install.sh \
3126&& ./devcontainer-features-install.sh \
3127&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3128
3129RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3130cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3131&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3132&& cd /tmp/dev-container-features/go_1 \
3133&& chmod +x ./devcontainer-features-install.sh \
3134&& ./devcontainer-features-install.sh \
3135&& rm -rf /tmp/dev-container-features/go_1
3136
3137
3138ARG _DEV_CONTAINERS_IMAGE_USER=root
3139USER $_DEV_CONTAINERS_IMAGE_USER
3140"#
3141        );
3142
3143        let uid_dockerfile = files
3144            .iter()
3145            .find(|f| {
3146                f.file_name()
3147                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3148            })
3149            .expect("to be found");
3150        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3151
3152        assert_eq!(
3153            &uid_dockerfile,
3154            r#"ARG BASE_IMAGE
3155FROM $BASE_IMAGE
3156
3157USER root
3158
3159ARG REMOTE_USER
3160ARG NEW_UID
3161ARG NEW_GID
3162SHELL ["/bin/sh", "-c"]
3163RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3164	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3165	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3166	if [ -z "$OLD_UID" ]; then \
3167		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3168	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3169		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3170	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3171		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3172	else \
3173		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3174			FREE_GID=65532; \
3175			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3176			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3177			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3178		fi; \
3179		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3180		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3181		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3182			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3183		fi; \
3184		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3185	fi;
3186
3187ARG IMAGE_USER
3188USER $IMAGE_USER
3189
3190# Ensure that /etc/profile does not clobber the existing path
3191RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3192
3193ENV DOCKER_BUILDKIT=1
3194
3195ENV GOPATH=/go
3196ENV GOROOT=/usr/local/go
3197ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3198ENV VARIABLE_VALUE=value
3199"#
3200        );
3201
3202        let golang_install_wrapper = files
3203            .iter()
3204            .find(|f| {
3205                f.file_name()
3206                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3207                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3208            })
3209            .expect("to be found");
3210        let golang_install_wrapper = test_dependencies
3211            .fs
3212            .load(golang_install_wrapper)
3213            .await
3214            .unwrap();
3215        assert_eq!(
3216            &golang_install_wrapper,
3217            r#"#!/bin/sh
3218set -e
3219
3220on_exit () {
3221    [ $? -eq 0 ] && exit
3222    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3223}
3224
3225trap on_exit EXIT
3226
3227echo ===========================================================================
3228echo 'Feature       : go'
3229echo 'Id            : ghcr.io/devcontainers/features/go:1'
3230echo 'Options       :'
3231echo '    GOLANGCILINTVERSION=latest
3232    VERSION=latest'
3233echo ===========================================================================
3234
3235set -a
3236. ../devcontainer-features.builtin.env
3237. ./devcontainer-features.env
3238set +a
3239
3240chmod +x ./install.sh
3241./install.sh
3242"#
3243        );
3244
3245        let docker_commands = test_dependencies
3246            .command_runner
3247            .commands_by_program("docker");
3248
3249        let docker_run_command = docker_commands
3250            .iter()
3251            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3252            .expect("found");
3253
3254        assert_eq!(
3255            docker_run_command.args,
3256            vec![
3257                "run".to_string(),
3258                "--privileged".to_string(),
3259                "--sig-proxy=false".to_string(),
3260                "-d".to_string(),
3261                "--mount".to_string(),
3262                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3263                "--mount".to_string(),
3264                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3265                "--mount".to_string(),
3266                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3267                "-l".to_string(),
3268                "devcontainer.local_folder=/path/to/local/project".to_string(),
3269                "-l".to_string(),
3270                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3271                "-l".to_string(),
3272                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3273                "-p".to_string(),
3274                "8082:8082".to_string(),
3275                "-p".to_string(),
3276                "8083:8083".to_string(),
3277                "-p".to_string(),
3278                "8084:8084".to_string(),
3279                "-p".to_string(),
3280                "8085:8086".to_string(),
3281                "--entrypoint".to_string(),
3282                "/bin/sh".to_string(),
3283                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3284                "-c".to_string(),
3285                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3286                "-".to_string()
3287            ]
3288        );
3289
3290        let docker_exec_commands = test_dependencies
3291            .docker
3292            .exec_commands_recorded
3293            .lock()
3294            .unwrap();
3295
3296        assert!(docker_exec_commands.iter().all(|exec| {
3297            exec.env
3298                == HashMap::from([
3299                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3300                    (
3301                        "PATH".to_string(),
3302                        "/initial/path:/some/other/path".to_string(),
3303                    ),
3304                ])
3305        }))
3306    }
3307
3308    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3309    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3310    #[cfg(not(target_os = "windows"))]
3311    #[gpui::test]
3312    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3313        cx.executor().allow_parking();
3314        env_logger::try_init().ok();
3315        let given_devcontainer_contents = r#"
3316            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3317            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3318            {
3319              "features": {
3320                "ghcr.io/devcontainers/features/aws-cli:1": {},
3321                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3322              },
3323              "name": "Rust and PostgreSQL",
3324              "dockerComposeFile": "docker-compose.yml",
3325              "service": "app",
3326              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3327
3328              // Features to add to the dev container. More info: https://containers.dev/features.
3329              // "features": {},
3330
3331              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3332              "forwardPorts": [
3333                8083,
3334                "db:5432",
3335                "db:1234",
3336              ],
3337
3338              // Use 'postCreateCommand' to run commands after the container is created.
3339              // "postCreateCommand": "rustc --version",
3340
3341              // Configure tool-specific properties.
3342              // "customizations": {},
3343
3344              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3345              // "remoteUser": "root"
3346            }
3347            "#;
3348        let (test_dependencies, mut devcontainer_manifest) =
3349            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3350                .await
3351                .unwrap();
3352
3353        test_dependencies
3354            .fs
3355            .atomic_write(
3356                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3357                r#"
3358version: '3.8'
3359
3360volumes:
3361    postgres-data:
3362
3363services:
3364    app:
3365        build:
3366            context: .
3367            dockerfile: Dockerfile
3368        env_file:
3369            # Ensure that the variables in .env match the same variables in devcontainer.json
3370            - .env
3371
3372        volumes:
3373            - ../..:/workspaces:cached
3374
3375        # Overrides default command so things don't shut down after the process ends.
3376        command: sleep infinity
3377
3378        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3379        network_mode: service:db
3380
3381        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3382        # (Adding the "ports" property to this file will not forward from a Codespace.)
3383
3384    db:
3385        image: postgres:14.1
3386        restart: unless-stopped
3387        volumes:
3388            - postgres-data:/var/lib/postgresql/data
3389        env_file:
3390            # Ensure that the variables in .env match the same variables in devcontainer.json
3391            - .env
3392
3393        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3394        # (Adding the "ports" property to this file will not forward from a Codespace.)
3395                    "#.trim().to_string(),
3396            )
3397            .await
3398            .unwrap();
3399
3400        test_dependencies.fs.atomic_write(
3401            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3402            r#"
3403FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3404
3405# Include lld linker to improve build times either by using environment variable
3406# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3407RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3408    && apt-get -y install clang lld \
3409    && apt-get autoremove -y && apt-get clean -y
3410            "#.trim().to_string()).await.unwrap();
3411
3412        devcontainer_manifest.parse_nonremote_vars().unwrap();
3413
3414        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3415
3416        let files = test_dependencies.fs.files();
3417        let feature_dockerfile = files
3418            .iter()
3419            .find(|f| {
3420                f.file_name()
3421                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3422            })
3423            .expect("to be found");
3424        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3425        assert_eq!(
3426            &feature_dockerfile,
3427            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3428
3429FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3430
3431# Include lld linker to improve build times either by using environment variable
3432# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3433RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3434    && apt-get -y install clang lld \
3435    && apt-get autoremove -y && apt-get clean -y
3436
3437FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3438USER root
3439COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3440RUN chmod -R 0755 /tmp/build-features/
3441
3442FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3443
3444USER root
3445
3446RUN mkdir -p /tmp/dev-container-features
3447COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3448
3449RUN \
3450echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3451echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3452
3453
3454RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3455cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3456&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3457&& cd /tmp/dev-container-features/aws-cli_0 \
3458&& chmod +x ./devcontainer-features-install.sh \
3459&& ./devcontainer-features-install.sh \
3460&& rm -rf /tmp/dev-container-features/aws-cli_0
3461
3462RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3463cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3464&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3465&& cd /tmp/dev-container-features/docker-in-docker_1 \
3466&& chmod +x ./devcontainer-features-install.sh \
3467&& ./devcontainer-features-install.sh \
3468&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3469
3470
3471ARG _DEV_CONTAINERS_IMAGE_USER=root
3472USER $_DEV_CONTAINERS_IMAGE_USER
3473"#
3474        );
3475
3476        let uid_dockerfile = files
3477            .iter()
3478            .find(|f| {
3479                f.file_name()
3480                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3481            })
3482            .expect("to be found");
3483        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3484
3485        assert_eq!(
3486            &uid_dockerfile,
3487            r#"ARG BASE_IMAGE
3488FROM $BASE_IMAGE
3489
3490USER root
3491
3492ARG REMOTE_USER
3493ARG NEW_UID
3494ARG NEW_GID
3495SHELL ["/bin/sh", "-c"]
3496RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3497	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3498	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3499	if [ -z "$OLD_UID" ]; then \
3500		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3501	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3502		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3503	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3504		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3505	else \
3506		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3507			FREE_GID=65532; \
3508			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3509			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3510			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3511		fi; \
3512		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3513		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3514		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3515			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3516		fi; \
3517		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3518	fi;
3519
3520ARG IMAGE_USER
3521USER $IMAGE_USER
3522
3523# Ensure that /etc/profile does not clobber the existing path
3524RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3525
3526
3527ENV DOCKER_BUILDKIT=1
3528"#
3529        );
3530
3531        let build_override = files
3532            .iter()
3533            .find(|f| {
3534                f.file_name()
3535                    .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3536            })
3537            .expect("to be found");
3538        let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3539        let build_config: DockerComposeConfig =
3540            serde_json_lenient::from_str(&build_override).unwrap();
3541        let build_context = build_config
3542            .services
3543            .get("app")
3544            .and_then(|s| s.build.as_ref())
3545            .and_then(|b| b.context.clone())
3546            .expect("build override should have a context");
3547        assert_eq!(
3548            build_context, ".",
3549            "build override should preserve the original build context from docker-compose.yml"
3550        );
3551
3552        let runtime_override = files
3553            .iter()
3554            .find(|f| {
3555                f.file_name()
3556                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3557            })
3558            .expect("to be found");
3559        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3560
3561        let expected_runtime_override = DockerComposeConfig {
3562            name: None,
3563            services: HashMap::from([
3564                (
3565                    "app".to_string(),
3566                    DockerComposeService {
3567                        entrypoint: Some(vec![
3568                            "/bin/sh".to_string(),
3569                            "-c".to_string(),
3570                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3571                            "-".to_string(),
3572                        ]),
3573                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3574                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3575                        privileged: Some(true),
3576                        labels: Some(HashMap::from([
3577                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3578                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3579                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3580                        ])),
3581                        volumes: vec![
3582                            MountDefinition {
3583                                source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3584                                target: "/var/lib/docker".to_string(),
3585                                mount_type: Some("volume".to_string())
3586                            }
3587                        ],
3588                        ..Default::default()
3589                    },
3590                ),
3591                (
3592                    "db".to_string(),
3593                    DockerComposeService {
3594                        ports: vec![
3595                            DockerComposeServicePort {
3596                                target: "8083".to_string(),
3597                                published: "8083".to_string(),
3598                                ..Default::default()
3599                            },
3600                            DockerComposeServicePort {
3601                                target: "5432".to_string(),
3602                                published: "5432".to_string(),
3603                                ..Default::default()
3604                            },
3605                            DockerComposeServicePort {
3606                                target: "1234".to_string(),
3607                                published: "1234".to_string(),
3608                                ..Default::default()
3609                            },
3610                        ],
3611                        ..Default::default()
3612                    },
3613                ),
3614            ]),
3615            volumes: HashMap::from([(
3616                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3617                DockerComposeVolume {
3618                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3619                },
3620            )]),
3621        };
3622
3623        assert_eq!(
3624            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3625            expected_runtime_override
3626        )
3627    }
3628
3629    #[gpui::test]
3630    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3631        cx: &mut TestAppContext,
3632    ) {
3633        cx.executor().allow_parking();
3634        env_logger::try_init().ok();
3635        let given_devcontainer_contents = r#"
3636        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3637        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3638        {
3639          "features": {
3640            "ghcr.io/devcontainers/features/aws-cli:1": {},
3641            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3642          },
3643          "name": "Rust and PostgreSQL",
3644          "dockerComposeFile": "docker-compose.yml",
3645          "service": "app",
3646          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3647
3648          // Features to add to the dev container. More info: https://containers.dev/features.
3649          // "features": {},
3650
3651          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3652          "forwardPorts": [
3653            8083,
3654            "db:5432",
3655            "db:1234",
3656          ],
3657          "updateRemoteUserUID": false,
3658          "appPort": "8084",
3659
3660          // Use 'postCreateCommand' to run commands after the container is created.
3661          // "postCreateCommand": "rustc --version",
3662
3663          // Configure tool-specific properties.
3664          // "customizations": {},
3665
3666          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3667          // "remoteUser": "root"
3668        }
3669        "#;
3670        let (test_dependencies, mut devcontainer_manifest) =
3671            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3672                .await
3673                .unwrap();
3674
3675        test_dependencies
3676        .fs
3677        .atomic_write(
3678            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3679            r#"
3680version: '3.8'
3681
3682volumes:
3683postgres-data:
3684
3685services:
3686app:
3687    build:
3688        context: .
3689        dockerfile: Dockerfile
3690    env_file:
3691        # Ensure that the variables in .env match the same variables in devcontainer.json
3692        - .env
3693
3694    volumes:
3695        - ../..:/workspaces:cached
3696
3697    # Overrides default command so things don't shut down after the process ends.
3698    command: sleep infinity
3699
3700    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3701    network_mode: service:db
3702
3703    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3704    # (Adding the "ports" property to this file will not forward from a Codespace.)
3705
3706db:
3707    image: postgres:14.1
3708    restart: unless-stopped
3709    volumes:
3710        - postgres-data:/var/lib/postgresql/data
3711    env_file:
3712        # Ensure that the variables in .env match the same variables in devcontainer.json
3713        - .env
3714
3715    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3716    # (Adding the "ports" property to this file will not forward from a Codespace.)
3717                "#.trim().to_string(),
3718        )
3719        .await
3720        .unwrap();
3721
3722        test_dependencies.fs.atomic_write(
3723        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3724        r#"
3725FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3726
3727# Include lld linker to improve build times either by using environment variable
3728# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3729RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3730&& apt-get -y install clang lld \
3731&& apt-get autoremove -y && apt-get clean -y
3732        "#.trim().to_string()).await.unwrap();
3733
3734        devcontainer_manifest.parse_nonremote_vars().unwrap();
3735
3736        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3737
3738        let files = test_dependencies.fs.files();
3739        let feature_dockerfile = files
3740            .iter()
3741            .find(|f| {
3742                f.file_name()
3743                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3744            })
3745            .expect("to be found");
3746        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3747        assert_eq!(
3748            &feature_dockerfile,
3749            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3750
3751FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3752
3753# Include lld linker to improve build times either by using environment variable
3754# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3755RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3756&& apt-get -y install clang lld \
3757&& apt-get autoremove -y && apt-get clean -y
3758
3759FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3760USER root
3761COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3762RUN chmod -R 0755 /tmp/build-features/
3763
3764FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3765
3766USER root
3767
3768RUN mkdir -p /tmp/dev-container-features
3769COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3770
3771RUN \
3772echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3773echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3774
3775
3776RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3777cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3778&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3779&& cd /tmp/dev-container-features/aws-cli_0 \
3780&& chmod +x ./devcontainer-features-install.sh \
3781&& ./devcontainer-features-install.sh \
3782&& rm -rf /tmp/dev-container-features/aws-cli_0
3783
3784RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3785cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3786&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3787&& cd /tmp/dev-container-features/docker-in-docker_1 \
3788&& chmod +x ./devcontainer-features-install.sh \
3789&& ./devcontainer-features-install.sh \
3790&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3791
3792
3793ARG _DEV_CONTAINERS_IMAGE_USER=root
3794USER $_DEV_CONTAINERS_IMAGE_USER
3795
3796# Ensure that /etc/profile does not clobber the existing path
3797RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3798
3799
3800ENV DOCKER_BUILDKIT=1
3801"#
3802        );
3803    }
3804
3805    #[cfg(not(target_os = "windows"))]
3806    #[gpui::test]
3807    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3808        cx.executor().allow_parking();
3809        env_logger::try_init().ok();
3810        let given_devcontainer_contents = r#"
3811        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3812        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3813        {
3814          "features": {
3815            "ghcr.io/devcontainers/features/aws-cli:1": {},
3816            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3817          },
3818          "name": "Rust and PostgreSQL",
3819          "dockerComposeFile": "docker-compose.yml",
3820          "service": "app",
3821          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3822
3823          // Features to add to the dev container. More info: https://containers.dev/features.
3824          // "features": {},
3825
3826          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3827          // "forwardPorts": [5432],
3828
3829          // Use 'postCreateCommand' to run commands after the container is created.
3830          // "postCreateCommand": "rustc --version",
3831
3832          // Configure tool-specific properties.
3833          // "customizations": {},
3834
3835          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3836          // "remoteUser": "root"
3837        }
3838        "#;
3839        let mut fake_docker = FakeDocker::new();
3840        fake_docker.set_podman(true);
3841        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3842            cx,
3843            FakeFs::new(cx.executor()),
3844            fake_http_client(),
3845            Arc::new(fake_docker),
3846            Arc::new(TestCommandRunner::new()),
3847            HashMap::new(),
3848            given_devcontainer_contents,
3849        )
3850        .await
3851        .unwrap();
3852
3853        test_dependencies
3854        .fs
3855        .atomic_write(
3856            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3857            r#"
3858version: '3.8'
3859
3860volumes:
3861postgres-data:
3862
3863services:
3864app:
3865build:
3866    context: .
3867    dockerfile: Dockerfile
3868env_file:
3869    # Ensure that the variables in .env match the same variables in devcontainer.json
3870    - .env
3871
3872volumes:
3873    - ../..:/workspaces:cached
3874
3875# Overrides default command so things don't shut down after the process ends.
3876command: sleep infinity
3877
3878# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3879network_mode: service:db
3880
3881# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3882# (Adding the "ports" property to this file will not forward from a Codespace.)
3883
3884db:
3885image: postgres:14.1
3886restart: unless-stopped
3887volumes:
3888    - postgres-data:/var/lib/postgresql/data
3889env_file:
3890    # Ensure that the variables in .env match the same variables in devcontainer.json
3891    - .env
3892
3893# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3894# (Adding the "ports" property to this file will not forward from a Codespace.)
3895                "#.trim().to_string(),
3896        )
3897        .await
3898        .unwrap();
3899
3900        test_dependencies.fs.atomic_write(
3901        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3902        r#"
3903FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3904
3905# Include lld linker to improve build times either by using environment variable
3906# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3907RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3908&& apt-get -y install clang lld \
3909&& apt-get autoremove -y && apt-get clean -y
3910        "#.trim().to_string()).await.unwrap();
3911
3912        devcontainer_manifest.parse_nonremote_vars().unwrap();
3913
3914        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3915
3916        let files = test_dependencies.fs.files();
3917
3918        let feature_dockerfile = files
3919            .iter()
3920            .find(|f| {
3921                f.file_name()
3922                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3923            })
3924            .expect("to be found");
3925        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3926        assert_eq!(
3927            &feature_dockerfile,
3928            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3929
3930FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3931
3932# Include lld linker to improve build times either by using environment variable
3933# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3934RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3935&& apt-get -y install clang lld \
3936&& apt-get autoremove -y && apt-get clean -y
3937
3938FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3939
3940FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3941USER root
3942COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3943RUN chmod -R 0755 /tmp/build-features/
3944
3945FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3946
3947USER root
3948
3949RUN mkdir -p /tmp/dev-container-features
3950COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3951
3952RUN \
3953echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3954echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3955
3956
3957COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3958RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3959&& cd /tmp/dev-container-features/aws-cli_0 \
3960&& chmod +x ./devcontainer-features-install.sh \
3961&& ./devcontainer-features-install.sh
3962
3963COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3964RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3965&& cd /tmp/dev-container-features/docker-in-docker_1 \
3966&& chmod +x ./devcontainer-features-install.sh \
3967&& ./devcontainer-features-install.sh
3968
3969
3970ARG _DEV_CONTAINERS_IMAGE_USER=root
3971USER $_DEV_CONTAINERS_IMAGE_USER
3972"#
3973        );
3974
3975        let uid_dockerfile = files
3976            .iter()
3977            .find(|f| {
3978                f.file_name()
3979                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3980            })
3981            .expect("to be found");
3982        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3983
3984        assert_eq!(
3985            &uid_dockerfile,
3986            r#"ARG BASE_IMAGE
3987FROM $BASE_IMAGE
3988
3989USER root
3990
3991ARG REMOTE_USER
3992ARG NEW_UID
3993ARG NEW_GID
3994SHELL ["/bin/sh", "-c"]
3995RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3996	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3997	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3998	if [ -z "$OLD_UID" ]; then \
3999		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4000	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4001		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4002	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4003		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4004	else \
4005		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4006			FREE_GID=65532; \
4007			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4008			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4009			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4010		fi; \
4011		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4012		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4013		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4014			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4015		fi; \
4016		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4017	fi;
4018
4019ARG IMAGE_USER
4020USER $IMAGE_USER
4021
4022# Ensure that /etc/profile does not clobber the existing path
4023RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4024
4025
4026ENV DOCKER_BUILDKIT=1
4027"#
4028        );
4029    }
4030
4031    #[gpui::test]
4032    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4033        cx.executor().allow_parking();
4034        env_logger::try_init().ok();
4035        let given_devcontainer_contents = r#"
4036            /*---------------------------------------------------------------------------------------------
4037             *  Copyright (c) Microsoft Corporation. All rights reserved.
4038             *  Licensed under the MIT License. See License.txt in the project root for license information.
4039             *--------------------------------------------------------------------------------------------*/
4040            {
4041              "name": "cli-${devcontainerId}",
4042              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4043              "build": {
4044                "dockerfile": "Dockerfile",
4045                "args": {
4046                  "VARIANT": "18-bookworm",
4047                  "FOO": "bar",
4048                },
4049              },
4050              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4051              "workspaceFolder": "/workspace2",
4052              "mounts": [
4053                // Keep command history across instances
4054                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4055              ],
4056
4057              "forwardPorts": [
4058                8082,
4059                8083,
4060              ],
4061              "appPort": "8084",
4062              "updateRemoteUserUID": false,
4063
4064              "containerEnv": {
4065                "VARIABLE_VALUE": "value",
4066              },
4067
4068              "initializeCommand": "touch IAM.md",
4069
4070              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4071
4072              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4073
4074              "postCreateCommand": {
4075                "yarn": "yarn install",
4076                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4077              },
4078
4079              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4080
4081              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4082
4083              "remoteUser": "node",
4084
4085              "remoteEnv": {
4086                "PATH": "${containerEnv:PATH}:/some/other/path",
4087                "OTHER_ENV": "other_env_value"
4088              },
4089
4090              "features": {
4091                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4092                  "moby": false,
4093                },
4094                "ghcr.io/devcontainers/features/go:1": {},
4095              },
4096
4097              "customizations": {
4098                "vscode": {
4099                  "extensions": [
4100                    "dbaeumer.vscode-eslint",
4101                    "GitHub.vscode-pull-request-github",
4102                  ],
4103                },
4104                "zed": {
4105                  "extensions": ["vue", "ruby"],
4106                },
4107                "codespaces": {
4108                  "repositories": {
4109                    "devcontainers/features": {
4110                      "permissions": {
4111                        "contents": "write",
4112                        "workflows": "write",
4113                      },
4114                    },
4115                  },
4116                },
4117              },
4118            }
4119            "#;
4120
4121        let (test_dependencies, mut devcontainer_manifest) =
4122            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4123                .await
4124                .unwrap();
4125
4126        test_dependencies
4127            .fs
4128            .atomic_write(
4129                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4130                r#"
4131#  Copyright (c) Microsoft Corporation. All rights reserved.
4132#  Licensed under the MIT License. See License.txt in the project root for license information.
4133ARG VARIANT="16-bullseye"
4134FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4135
4136RUN mkdir -p /workspaces && chown node:node /workspaces
4137
4138ARG USERNAME=node
4139USER $USERNAME
4140
4141# Save command line history
4142RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4143&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4144&& mkdir -p /home/$USERNAME/commandhistory \
4145&& touch /home/$USERNAME/commandhistory/.bash_history \
4146&& chown -R $USERNAME /home/$USERNAME/commandhistory
4147                    "#.trim().to_string(),
4148            )
4149            .await
4150            .unwrap();
4151
4152        devcontainer_manifest.parse_nonremote_vars().unwrap();
4153
4154        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4155
4156        assert_eq!(
4157            devcontainer_up.extension_ids,
4158            vec!["vue".to_string(), "ruby".to_string()]
4159        );
4160
4161        let files = test_dependencies.fs.files();
4162        let feature_dockerfile = files
4163            .iter()
4164            .find(|f| {
4165                f.file_name()
4166                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4167            })
4168            .expect("to be found");
4169        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4170        assert_eq!(
4171            &feature_dockerfile,
4172            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4173
4174#  Copyright (c) Microsoft Corporation. All rights reserved.
4175#  Licensed under the MIT License. See License.txt in the project root for license information.
4176ARG VARIANT="16-bullseye"
4177FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4178
4179RUN mkdir -p /workspaces && chown node:node /workspaces
4180
4181ARG USERNAME=node
4182USER $USERNAME
4183
4184# Save command line history
4185RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4186&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4187&& mkdir -p /home/$USERNAME/commandhistory \
4188&& touch /home/$USERNAME/commandhistory/.bash_history \
4189&& chown -R $USERNAME /home/$USERNAME/commandhistory
4190
4191FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4192USER root
4193COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4194RUN chmod -R 0755 /tmp/build-features/
4195
4196FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4197
4198USER root
4199
4200RUN mkdir -p /tmp/dev-container-features
4201COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4202
4203RUN \
4204echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4205echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4206
4207
4208RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4209cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4210&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4211&& cd /tmp/dev-container-features/docker-in-docker_0 \
4212&& chmod +x ./devcontainer-features-install.sh \
4213&& ./devcontainer-features-install.sh \
4214&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4215
4216RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4217cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4218&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4219&& cd /tmp/dev-container-features/go_1 \
4220&& chmod +x ./devcontainer-features-install.sh \
4221&& ./devcontainer-features-install.sh \
4222&& rm -rf /tmp/dev-container-features/go_1
4223
4224
4225ARG _DEV_CONTAINERS_IMAGE_USER=root
4226USER $_DEV_CONTAINERS_IMAGE_USER
4227
4228# Ensure that /etc/profile does not clobber the existing path
4229RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4230
4231ENV DOCKER_BUILDKIT=1
4232
4233ENV GOPATH=/go
4234ENV GOROOT=/usr/local/go
4235ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4236ENV VARIABLE_VALUE=value
4237"#
4238        );
4239
4240        let golang_install_wrapper = files
4241            .iter()
4242            .find(|f| {
4243                f.file_name()
4244                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4245                    && f.to_str().is_some_and(|s| s.contains("go_"))
4246            })
4247            .expect("to be found");
4248        let golang_install_wrapper = test_dependencies
4249            .fs
4250            .load(golang_install_wrapper)
4251            .await
4252            .unwrap();
4253        assert_eq!(
4254            &golang_install_wrapper,
4255            r#"#!/bin/sh
4256set -e
4257
4258on_exit () {
4259    [ $? -eq 0 ] && exit
4260    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4261}
4262
4263trap on_exit EXIT
4264
4265echo ===========================================================================
4266echo 'Feature       : go'
4267echo 'Id            : ghcr.io/devcontainers/features/go:1'
4268echo 'Options       :'
4269echo '    GOLANGCILINTVERSION=latest
4270    VERSION=latest'
4271echo ===========================================================================
4272
4273set -a
4274. ../devcontainer-features.builtin.env
4275. ./devcontainer-features.env
4276set +a
4277
4278chmod +x ./install.sh
4279./install.sh
4280"#
4281        );
4282
4283        let docker_commands = test_dependencies
4284            .command_runner
4285            .commands_by_program("docker");
4286
4287        let docker_run_command = docker_commands
4288            .iter()
4289            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4290
4291        assert!(docker_run_command.is_some());
4292
4293        let docker_exec_commands = test_dependencies
4294            .docker
4295            .exec_commands_recorded
4296            .lock()
4297            .unwrap();
4298
4299        assert!(docker_exec_commands.iter().all(|exec| {
4300            exec.env
4301                == HashMap::from([
4302                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4303                    (
4304                        "PATH".to_string(),
4305                        "/initial/path:/some/other/path".to_string(),
4306                    ),
4307                ])
4308        }))
4309    }
4310
4311    #[cfg(not(target_os = "windows"))]
4312    #[gpui::test]
4313    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4314        cx.executor().allow_parking();
4315        env_logger::try_init().ok();
4316        let given_devcontainer_contents = r#"
4317            {
4318              "name": "cli-${devcontainerId}",
4319              "image": "test_image:latest",
4320            }
4321            "#;
4322
4323        let (test_dependencies, mut devcontainer_manifest) =
4324            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4325                .await
4326                .unwrap();
4327
4328        devcontainer_manifest.parse_nonremote_vars().unwrap();
4329
4330        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4331
4332        let files = test_dependencies.fs.files();
4333        let uid_dockerfile = files
4334            .iter()
4335            .find(|f| {
4336                f.file_name()
4337                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4338            })
4339            .expect("to be found");
4340        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4341
4342        assert_eq!(
4343            &uid_dockerfile,
4344            r#"ARG BASE_IMAGE
4345FROM $BASE_IMAGE
4346
4347USER root
4348
4349ARG REMOTE_USER
4350ARG NEW_UID
4351ARG NEW_GID
4352SHELL ["/bin/sh", "-c"]
4353RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4354	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4355	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4356	if [ -z "$OLD_UID" ]; then \
4357		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4358	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4359		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4360	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4361		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4362	else \
4363		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4364			FREE_GID=65532; \
4365			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4366			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4367			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4368		fi; \
4369		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4370		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4371		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4372			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4373		fi; \
4374		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4375	fi;
4376
4377ARG IMAGE_USER
4378USER $IMAGE_USER
4379
4380# Ensure that /etc/profile does not clobber the existing path
4381RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4382"#
4383        );
4384    }
4385
4386    #[cfg(not(target_os = "windows"))]
4387    #[gpui::test]
4388    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4389        cx.executor().allow_parking();
4390        env_logger::try_init().ok();
4391        let given_devcontainer_contents = r#"
4392            {
4393              "name": "cli-${devcontainerId}",
4394              "dockerComposeFile": "docker-compose-plain.yml",
4395              "service": "app",
4396            }
4397            "#;
4398
4399        let (test_dependencies, mut devcontainer_manifest) =
4400            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4401                .await
4402                .unwrap();
4403
4404        test_dependencies
4405            .fs
4406            .atomic_write(
4407                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4408                r#"
4409services:
4410    app:
4411        image: test_image:latest
4412        command: sleep infinity
4413        volumes:
4414            - ..:/workspace:cached
4415                "#
4416                .trim()
4417                .to_string(),
4418            )
4419            .await
4420            .unwrap();
4421
4422        devcontainer_manifest.parse_nonremote_vars().unwrap();
4423
4424        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4425
4426        let files = test_dependencies.fs.files();
4427        let uid_dockerfile = files
4428            .iter()
4429            .find(|f| {
4430                f.file_name()
4431                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4432            })
4433            .expect("to be found");
4434        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4435
4436        assert_eq!(
4437            &uid_dockerfile,
4438            r#"ARG BASE_IMAGE
4439FROM $BASE_IMAGE
4440
4441USER root
4442
4443ARG REMOTE_USER
4444ARG NEW_UID
4445ARG NEW_GID
4446SHELL ["/bin/sh", "-c"]
4447RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4448	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4449	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4450	if [ -z "$OLD_UID" ]; then \
4451		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4452	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4453		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4454	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4455		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4456	else \
4457		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4458			FREE_GID=65532; \
4459			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4460			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4461			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4462		fi; \
4463		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4464		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4465		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4466			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4467		fi; \
4468		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4469	fi;
4470
4471ARG IMAGE_USER
4472USER $IMAGE_USER
4473
4474# Ensure that /etc/profile does not clobber the existing path
4475RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4476"#
4477        );
4478    }
4479
4480    pub(crate) struct RecordedExecCommand {
4481        pub(crate) _container_id: String,
4482        pub(crate) _remote_folder: String,
4483        pub(crate) _user: String,
4484        pub(crate) env: HashMap<String, String>,
4485        pub(crate) _inner_command: Command,
4486    }
4487
4488    pub(crate) struct FakeDocker {
4489        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4490        podman: bool,
4491    }
4492
4493    impl FakeDocker {
4494        pub(crate) fn new() -> Self {
4495            Self {
4496                podman: false,
4497                exec_commands_recorded: Mutex::new(Vec::new()),
4498            }
4499        }
4500        #[cfg(not(target_os = "windows"))]
4501        fn set_podman(&mut self, podman: bool) {
4502            self.podman = podman;
4503        }
4504    }
4505
4506    #[async_trait]
4507    impl DockerClient for FakeDocker {
4508        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4509            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4510                return Ok(DockerInspect {
4511                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4512                        .to_string(),
4513                    config: DockerInspectConfig {
4514                        labels: DockerConfigLabels {
4515                            metadata: Some(vec![HashMap::from([(
4516                                "remoteUser".to_string(),
4517                                Value::String("node".to_string()),
4518                            )])]),
4519                        },
4520                        env: Vec::new(),
4521                        image_user: Some("root".to_string()),
4522                    },
4523                    mounts: None,
4524                    state: None,
4525                });
4526            }
4527            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4528                return Ok(DockerInspect {
4529                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4530                        .to_string(),
4531                    config: DockerInspectConfig {
4532                        labels: DockerConfigLabels {
4533                            metadata: Some(vec![HashMap::from([(
4534                                "remoteUser".to_string(),
4535                                Value::String("vscode".to_string()),
4536                            )])]),
4537                        },
4538                        image_user: Some("root".to_string()),
4539                        env: Vec::new(),
4540                    },
4541                    mounts: None,
4542                    state: None,
4543                });
4544            }
4545            if id.starts_with("cli_") {
4546                return Ok(DockerInspect {
4547                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4548                        .to_string(),
4549                    config: DockerInspectConfig {
4550                        labels: DockerConfigLabels {
4551                            metadata: Some(vec![HashMap::from([(
4552                                "remoteUser".to_string(),
4553                                Value::String("node".to_string()),
4554                            )])]),
4555                        },
4556                        image_user: Some("root".to_string()),
4557                        env: vec!["PATH=/initial/path".to_string()],
4558                    },
4559                    mounts: None,
4560                    state: None,
4561                });
4562            }
4563            if id == "found_docker_ps" {
4564                return Ok(DockerInspect {
4565                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4566                        .to_string(),
4567                    config: DockerInspectConfig {
4568                        labels: DockerConfigLabels {
4569                            metadata: Some(vec![HashMap::from([(
4570                                "remoteUser".to_string(),
4571                                Value::String("node".to_string()),
4572                            )])]),
4573                        },
4574                        image_user: Some("root".to_string()),
4575                        env: vec!["PATH=/initial/path".to_string()],
4576                    },
4577                    mounts: Some(vec![DockerInspectMount {
4578                        source: "/path/to/local/project".to_string(),
4579                        destination: "/workspaces/project".to_string(),
4580                    }]),
4581                    state: None,
4582                });
4583            }
4584            if id.starts_with("rust_a-") {
4585                return Ok(DockerInspect {
4586                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4587                        .to_string(),
4588                    config: DockerInspectConfig {
4589                        labels: DockerConfigLabels {
4590                            metadata: Some(vec![HashMap::from([(
4591                                "remoteUser".to_string(),
4592                                Value::String("vscode".to_string()),
4593                            )])]),
4594                        },
4595                        image_user: Some("root".to_string()),
4596                        env: Vec::new(),
4597                    },
4598                    mounts: None,
4599                    state: None,
4600                });
4601            }
4602            if id == "test_image:latest" {
4603                return Ok(DockerInspect {
4604                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4605                        .to_string(),
4606                    config: DockerInspectConfig {
4607                        labels: DockerConfigLabels {
4608                            metadata: Some(vec![HashMap::from([(
4609                                "remoteUser".to_string(),
4610                                Value::String("node".to_string()),
4611                            )])]),
4612                        },
4613                        env: Vec::new(),
4614                        image_user: Some("root".to_string()),
4615                    },
4616                    mounts: None,
4617                    state: None,
4618                });
4619            }
4620
4621            Err(DevContainerError::DockerNotAvailable)
4622        }
4623        async fn get_docker_compose_config(
4624            &self,
4625            config_files: &Vec<PathBuf>,
4626        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4627            if config_files.len() == 1
4628                && config_files.get(0)
4629                    == Some(&PathBuf::from(
4630                        "/path/to/local/project/.devcontainer/docker-compose.yml",
4631                    ))
4632            {
4633                return Ok(Some(DockerComposeConfig {
4634                    name: None,
4635                    services: HashMap::from([
4636                        (
4637                            "app".to_string(),
4638                            DockerComposeService {
4639                                build: Some(DockerComposeServiceBuild {
4640                                    context: Some(".".to_string()),
4641                                    dockerfile: Some("Dockerfile".to_string()),
4642                                    args: None,
4643                                    additional_contexts: None,
4644                                }),
4645                                volumes: vec![MountDefinition {
4646                                    source: Some("../..".to_string()),
4647                                    target: "/workspaces".to_string(),
4648                                    mount_type: Some("bind".to_string()),
4649                                }],
4650                                network_mode: Some("service:db".to_string()),
4651                                ..Default::default()
4652                            },
4653                        ),
4654                        (
4655                            "db".to_string(),
4656                            DockerComposeService {
4657                                image: Some("postgres:14.1".to_string()),
4658                                volumes: vec![MountDefinition {
4659                                    source: Some("postgres-data".to_string()),
4660                                    target: "/var/lib/postgresql/data".to_string(),
4661                                    mount_type: Some("volume".to_string()),
4662                                }],
4663                                env_file: Some(vec![".env".to_string()]),
4664                                ..Default::default()
4665                            },
4666                        ),
4667                    ]),
4668                    volumes: HashMap::from([(
4669                        "postgres-data".to_string(),
4670                        DockerComposeVolume::default(),
4671                    )]),
4672                }));
4673            }
4674            if config_files.len() == 1
4675                && config_files.get(0)
4676                    == Some(&PathBuf::from(
4677                        "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4678                    ))
4679            {
4680                return Ok(Some(DockerComposeConfig {
4681                    name: None,
4682                    services: HashMap::from([(
4683                        "app".to_string(),
4684                        DockerComposeService {
4685                            image: Some("test_image:latest".to_string()),
4686                            command: vec!["sleep".to_string(), "infinity".to_string()],
4687                            ..Default::default()
4688                        },
4689                    )]),
4690                    ..Default::default()
4691                }));
4692            }
4693            Err(DevContainerError::DockerNotAvailable)
4694        }
4695        async fn docker_compose_build(
4696            &self,
4697            _config_files: &Vec<PathBuf>,
4698            _project_name: &str,
4699        ) -> Result<(), DevContainerError> {
4700            Ok(())
4701        }
4702        async fn run_docker_exec(
4703            &self,
4704            container_id: &str,
4705            remote_folder: &str,
4706            user: &str,
4707            env: &HashMap<String, String>,
4708            inner_command: Command,
4709        ) -> Result<(), DevContainerError> {
4710            let mut record = self
4711                .exec_commands_recorded
4712                .lock()
4713                .expect("should be available");
4714            record.push(RecordedExecCommand {
4715                _container_id: container_id.to_string(),
4716                _remote_folder: remote_folder.to_string(),
4717                _user: user.to_string(),
4718                env: env.clone(),
4719                _inner_command: inner_command,
4720            });
4721            Ok(())
4722        }
4723        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4724            Err(DevContainerError::DockerNotAvailable)
4725        }
4726        async fn find_process_by_filters(
4727            &self,
4728            _filters: Vec<String>,
4729        ) -> Result<Option<DockerPs>, DevContainerError> {
4730            Ok(Some(DockerPs {
4731                id: "found_docker_ps".to_string(),
4732            }))
4733        }
4734        fn supports_compose_buildkit(&self) -> bool {
4735            !self.podman
4736        }
4737        fn docker_cli(&self) -> String {
4738            if self.podman {
4739                "podman".to_string()
4740            } else {
4741                "docker".to_string()
4742            }
4743        }
4744    }
4745
4746    #[derive(Debug, Clone)]
4747    pub(crate) struct TestCommand {
4748        pub(crate) program: String,
4749        pub(crate) args: Vec<String>,
4750    }
4751
4752    pub(crate) struct TestCommandRunner {
4753        commands_recorded: Mutex<Vec<TestCommand>>,
4754    }
4755
4756    impl TestCommandRunner {
4757        fn new() -> Self {
4758            Self {
4759                commands_recorded: Mutex::new(Vec::new()),
4760            }
4761        }
4762
4763        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4764            let record = self.commands_recorded.lock().expect("poisoned");
4765            record
4766                .iter()
4767                .filter(|r| r.program == program)
4768                .map(|r| r.clone())
4769                .collect()
4770        }
4771    }
4772
4773    #[async_trait]
4774    impl CommandRunner for TestCommandRunner {
4775        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4776            let mut record = self.commands_recorded.lock().expect("poisoned");
4777
4778            record.push(TestCommand {
4779                program: command.get_program().display().to_string(),
4780                args: command
4781                    .get_args()
4782                    .map(|a| a.display().to_string())
4783                    .collect(),
4784            });
4785
4786            Ok(Output {
4787                status: ExitStatus::default(),
4788                stdout: vec![],
4789                stderr: vec![],
4790            })
4791        }
4792    }
4793
4794    fn fake_http_client() -> Arc<dyn HttpClient> {
4795        FakeHttpClient::create(|request| async move {
4796            let (parts, _body) = request.into_parts();
4797            if parts.uri.path() == "/token" {
4798                let token_response = TokenResponse {
4799                    token: "token".to_string(),
4800                };
4801                return Ok(http::Response::builder()
4802                    .status(200)
4803                    .body(http_client::AsyncBody::from(
4804                        serde_json_lenient::to_string(&token_response).unwrap(),
4805                    ))
4806                    .unwrap());
4807            }
4808
4809            // OCI specific things
4810            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4811                let response = r#"
4812                    {
4813                        "schemaVersion": 2,
4814                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
4815                        "config": {
4816                            "mediaType": "application/vnd.devcontainers",
4817                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4818                            "size": 2
4819                        },
4820                        "layers": [
4821                            {
4822                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4823                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4824                                "size": 59392,
4825                                "annotations": {
4826                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4827                                }
4828                            }
4829                        ],
4830                        "annotations": {
4831                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4832                            "com.github.package.type": "devcontainer_feature"
4833                        }
4834                    }
4835                    "#;
4836                return Ok(http::Response::builder()
4837                    .status(200)
4838                    .body(http_client::AsyncBody::from(response))
4839                    .unwrap());
4840            }
4841
4842            if parts.uri.path()
4843                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4844            {
4845                let response = build_tarball(vec![
4846                    ("./NOTES.md", r#"
4847                        ## Limitations
4848
4849                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4850                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4851                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4852                          ```
4853                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4854                          ```
4855                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4856
4857
4858                        ## OS Support
4859
4860                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4861
4862                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4863
4864                        `bash` is required to execute the `install.sh` script."#),
4865                    ("./README.md", r#"
4866                        # Docker (Docker-in-Docker) (docker-in-docker)
4867
4868                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4869
4870                        ## Example Usage
4871
4872                        ```json
4873                        "features": {
4874                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4875                        }
4876                        ```
4877
4878                        ## Options
4879
4880                        | Options Id | Description | Type | Default Value |
4881                        |-----|-----|-----|-----|
4882                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4883                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4884                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4885                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4886                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4887                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4888                        | installDockerBuildx | Install Docker Buildx | boolean | true |
4889                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4890                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4891
4892                        ## Customizations
4893
4894                        ### VS Code Extensions
4895
4896                        - `ms-azuretools.vscode-containers`
4897
4898                        ## Limitations
4899
4900                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4901                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4902                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4903                          ```
4904                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4905                          ```
4906                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4907
4908
4909                        ## OS Support
4910
4911                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4912
4913                        `bash` is required to execute the `install.sh` script.
4914
4915
4916                        ---
4917
4918                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
4919                    ("./devcontainer-feature.json", r#"
4920                        {
4921                          "id": "docker-in-docker",
4922                          "version": "2.16.1",
4923                          "name": "Docker (Docker-in-Docker)",
4924                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4925                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4926                          "options": {
4927                            "version": {
4928                              "type": "string",
4929                              "proposals": [
4930                                "latest",
4931                                "none",
4932                                "20.10"
4933                              ],
4934                              "default": "latest",
4935                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4936                            },
4937                            "moby": {
4938                              "type": "boolean",
4939                              "default": true,
4940                              "description": "Install OSS Moby build instead of Docker CE"
4941                            },
4942                            "mobyBuildxVersion": {
4943                              "type": "string",
4944                              "default": "latest",
4945                              "description": "Install a specific version of moby-buildx when using Moby"
4946                            },
4947                            "dockerDashComposeVersion": {
4948                              "type": "string",
4949                              "enum": [
4950                                "none",
4951                                "v1",
4952                                "v2"
4953                              ],
4954                              "default": "v2",
4955                              "description": "Default version of Docker Compose (v1, v2 or none)"
4956                            },
4957                            "azureDnsAutoDetection": {
4958                              "type": "boolean",
4959                              "default": true,
4960                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4961                            },
4962                            "dockerDefaultAddressPool": {
4963                              "type": "string",
4964                              "default": "",
4965                              "proposals": [],
4966                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4967                            },
4968                            "installDockerBuildx": {
4969                              "type": "boolean",
4970                              "default": true,
4971                              "description": "Install Docker Buildx"
4972                            },
4973                            "installDockerComposeSwitch": {
4974                              "type": "boolean",
4975                              "default": false,
4976                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4977                            },
4978                            "disableIp6tables": {
4979                              "type": "boolean",
4980                              "default": false,
4981                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4982                            }
4983                          },
4984                          "entrypoint": "/usr/local/share/docker-init.sh",
4985                          "privileged": true,
4986                          "containerEnv": {
4987                            "DOCKER_BUILDKIT": "1"
4988                          },
4989                          "customizations": {
4990                            "vscode": {
4991                              "extensions": [
4992                                "ms-azuretools.vscode-containers"
4993                              ],
4994                              "settings": {
4995                                "github.copilot.chat.codeGeneration.instructions": [
4996                                  {
4997                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
4998                                  }
4999                                ]
5000                              }
5001                            }
5002                          },
5003                          "mounts": [
5004                            {
5005                              "source": "dind-var-lib-docker-${devcontainerId}",
5006                              "target": "/var/lib/docker",
5007                              "type": "volume"
5008                            }
5009                          ],
5010                          "installsAfter": [
5011                            "ghcr.io/devcontainers/features/common-utils"
5012                          ]
5013                        }"#),
5014                    ("./install.sh", r#"
5015                    #!/usr/bin/env bash
5016                    #-------------------------------------------------------------------------------------------------------------
5017                    # Copyright (c) Microsoft Corporation. All rights reserved.
5018                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5019                    #-------------------------------------------------------------------------------------------------------------
5020                    #
5021                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5022                    # Maintainer: The Dev Container spec maintainers
5023
5024
5025                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5026                    USE_MOBY="${MOBY:-"true"}"
5027                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5028                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5029                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5030                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5031                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5032                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5033                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5034                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5035                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5036                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5037                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5038                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5039
5040                    # Default: Exit on any failure.
5041                    set -e
5042
5043                    # Clean up
5044                    rm -rf /var/lib/apt/lists/*
5045
5046                    # Setup STDERR.
5047                    err() {
5048                        echo "(!) $*" >&2
5049                    }
5050
5051                    if [ "$(id -u)" -ne 0 ]; then
5052                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5053                        exit 1
5054                    fi
5055
5056                    ###################
5057                    # Helper Functions
5058                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5059                    ###################
5060
5061                    # Determine the appropriate non-root user
5062                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5063                        USERNAME=""
5064                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5065                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5066                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5067                                USERNAME=${CURRENT_USER}
5068                                break
5069                            fi
5070                        done
5071                        if [ "${USERNAME}" = "" ]; then
5072                            USERNAME=root
5073                        fi
5074                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5075                        USERNAME=root
5076                    fi
5077
5078                    # Package manager update function
5079                    pkg_mgr_update() {
5080                        case ${ADJUSTED_ID} in
5081                            debian)
5082                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5083                                    echo "Running apt-get update..."
5084                                    apt-get update -y
5085                                fi
5086                                ;;
5087                            rhel)
5088                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5089                                    cache_check_dir="/var/cache/yum"
5090                                else
5091                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5092                                fi
5093                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5094                                    echo "Running ${PKG_MGR_CMD} makecache ..."
5095                                    ${PKG_MGR_CMD} makecache
5096                                fi
5097                                ;;
5098                        esac
5099                    }
5100
5101                    # Checks if packages are installed and installs them if not
5102                    check_packages() {
5103                        case ${ADJUSTED_ID} in
5104                            debian)
5105                                if ! dpkg -s "$@" > /dev/null 2>&1; then
5106                                    pkg_mgr_update
5107                                    apt-get -y install --no-install-recommends "$@"
5108                                fi
5109                                ;;
5110                            rhel)
5111                                if ! rpm -q "$@" > /dev/null 2>&1; then
5112                                    pkg_mgr_update
5113                                    ${PKG_MGR_CMD} -y install "$@"
5114                                fi
5115                                ;;
5116                        esac
5117                    }
5118
5119                    # Figure out correct version of a three part version number is not passed
5120                    find_version_from_git_tags() {
5121                        local variable_name=$1
5122                        local requested_version=${!variable_name}
5123                        if [ "${requested_version}" = "none" ]; then return; fi
5124                        local repository=$2
5125                        local prefix=${3:-"tags/v"}
5126                        local separator=${4:-"."}
5127                        local last_part_optional=${5:-"false"}
5128                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5129                            local escaped_separator=${separator//./\\.}
5130                            local last_part
5131                            if [ "${last_part_optional}" = "true" ]; then
5132                                last_part="(${escaped_separator}[0-9]+)?"
5133                            else
5134                                last_part="${escaped_separator}[0-9]+"
5135                            fi
5136                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5137                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5138                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5139                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5140                            else
5141                                set +e
5142                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5143                                set -e
5144                            fi
5145                        fi
5146                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5147                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5148                            exit 1
5149                        fi
5150                        echo "${variable_name}=${!variable_name}"
5151                    }
5152
5153                    # Use semver logic to decrement a version number then look for the closest match
5154                    find_prev_version_from_git_tags() {
5155                        local variable_name=$1
5156                        local current_version=${!variable_name}
5157                        local repository=$2
5158                        # Normally a "v" is used before the version number, but support alternate cases
5159                        local prefix=${3:-"tags/v"}
5160                        # Some repositories use "_" instead of "." for version number part separation, support that
5161                        local separator=${4:-"."}
5162                        # Some tools release versions that omit the last digit (e.g. go)
5163                        local last_part_optional=${5:-"false"}
5164                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5165                        local version_suffix_regex=$6
5166                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5167                        set +e
5168                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5169                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5170                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5171
5172                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5173                                ((major=major-1))
5174                                declare -g ${variable_name}="${major}"
5175                                # Look for latest version from previous major release
5176                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5177                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
5178                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5179                                ((minor=minor-1))
5180                                declare -g ${variable_name}="${major}.${minor}"
5181                                # Look for latest version from previous minor release
5182                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5183                            else
5184                                ((breakfix=breakfix-1))
5185                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5186                                    declare -g ${variable_name}="${major}.${minor}"
5187                                else
5188                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5189                                fi
5190                            fi
5191                        set -e
5192                    }
5193
5194                    # Function to fetch the version released prior to the latest version
5195                    get_previous_version() {
5196                        local url=$1
5197                        local repo_url=$2
5198                        local variable_name=$3
5199                        prev_version=${!variable_name}
5200
5201                        output=$(curl -s "$repo_url");
5202                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5203                          message=$(echo "$output" | jq -r '.message')
5204
5205                          if [[ $message == "API rate limit exceeded"* ]]; then
5206                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5207                                echo -e "\nAttempting to find latest version using GitHub tags."
5208                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
5209                                declare -g ${variable_name}="${prev_version}"
5210                           fi
5211                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5212                            echo -e "\nAttempting to find latest version using GitHub Api."
5213                            version=$(echo "$output" | jq -r '.[1].tag_name')
5214                            declare -g ${variable_name}="${version#v}"
5215                        fi
5216                        echo "${variable_name}=${!variable_name}"
5217                    }
5218
5219                    get_github_api_repo_url() {
5220                        local url=$1
5221                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5222                    }
5223
5224                    ###########################################
5225                    # Start docker-in-docker installation
5226                    ###########################################
5227
5228                    # Ensure apt is in non-interactive to avoid prompts
5229                    export DEBIAN_FRONTEND=noninteractive
5230
5231                    # Source /etc/os-release to get OS info
5232                    . /etc/os-release
5233
5234                    # Determine adjusted ID and package manager
5235                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5236                        ADJUSTED_ID="debian"
5237                        PKG_MGR_CMD="apt-get"
5238                        # Use dpkg for Debian-based systems
5239                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5240                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5241                        ADJUSTED_ID="rhel"
5242                        # Determine the appropriate package manager for RHEL-based systems
5243                        for pkg_mgr in tdnf dnf microdnf yum; do
5244                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
5245                                PKG_MGR_CMD="$pkg_mgr"
5246                                break
5247                            fi
5248                        done
5249
5250                        if [ -z "${PKG_MGR_CMD}" ]; then
5251                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5252                            exit 1
5253                        fi
5254
5255                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5256                    else
5257                        err "Linux distro ${ID} not supported."
5258                        exit 1
5259                    fi
5260
5261                    # Azure Linux specific setup
5262                    if [ "${ID}" = "azurelinux" ]; then
5263                        VERSION_CODENAME="azurelinux${VERSION_ID}"
5264                    fi
5265
5266                    # Prevent attempting to install Moby on Debian trixie (packages removed)
5267                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5268                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5269                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5270                        exit 1
5271                    fi
5272
5273                    # Check if distro is supported
5274                    if [ "${USE_MOBY}" = "true" ]; then
5275                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5276                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5277                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5278                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5279                                exit 1
5280                            fi
5281                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
5282                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5283                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5284                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5285                            else
5286                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5287                            fi
5288                        fi
5289                    else
5290                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5291                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5292                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5293                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5294                                exit 1
5295                            fi
5296                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5297                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5298
5299                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5300                        fi
5301                    fi
5302
5303                    # Install base dependencies
5304                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5305                    case ${ADJUSTED_ID} in
5306                        debian)
5307                            check_packages apt-transport-https $base_packages dirmngr
5308                            ;;
5309                        rhel)
5310                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
5311
5312                            ;;
5313                    esac
5314
5315                    # Install git if not already present
5316                    if ! command -v git >/dev/null 2>&1; then
5317                        check_packages git
5318                    fi
5319
5320                    # Update CA certificates to ensure HTTPS connections work properly
5321                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5322                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5323                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5324                        update-ca-certificates
5325                    fi
5326
5327                    # Swap to legacy iptables for compatibility (Debian only)
5328                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5329                        update-alternatives --set iptables /usr/sbin/iptables-legacy
5330                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5331                    fi
5332
5333                    # Set up the necessary repositories
5334                    if [ "${USE_MOBY}" = "true" ]; then
5335                        # Name of open source engine/cli
5336                        engine_package_name="moby-engine"
5337                        cli_package_name="moby-cli"
5338
5339                        case ${ADJUSTED_ID} in
5340                            debian)
5341                                # Import key safely and import Microsoft apt repo
5342                                {
5343                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5344                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5345                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5346                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5347                                ;;
5348                            rhel)
5349                                echo "(*) ${ID} detected - checking for Moby packages..."
5350
5351                                # Check if moby packages are available in default repos
5352                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5353                                    echo "(*) Using built-in ${ID} Moby packages"
5354                                else
5355                                    case "${ID}" in
5356                                        azurelinux)
5357                                            echo "(*) Moby packages not found in Azure Linux repositories"
5358                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5359                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5360                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5361                                            exit 1
5362                                            ;;
5363                                        mariner)
5364                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
5365                                            # Add Microsoft repository if packages aren't available locally
5366                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5367                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
5368                    [microsoft]
5369                    name=Microsoft Repository
5370                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5371                    enabled=1
5372                    gpgcheck=1
5373                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5374                    EOF
5375                                    # Verify packages are available after adding repo
5376                                    pkg_mgr_update
5377                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5378                                        echo "(*) Moby packages not found in Microsoft repository either"
5379                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5380                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5381                                        exit 1
5382                                    fi
5383                                    ;;
5384                                *)
5385                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5386                                    exit 1
5387                                    ;;
5388                                esac
5389                            fi
5390                            ;;
5391                        esac
5392                    else
5393                        # Name of licensed engine/cli
5394                        engine_package_name="docker-ce"
5395                        cli_package_name="docker-ce-cli"
5396                        case ${ADJUSTED_ID} in
5397                            debian)
5398                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5399                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5400                                ;;
5401                            rhel)
5402                                # Docker CE repository setup for RHEL-based systems
5403                                setup_docker_ce_repo() {
5404                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5405                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
5406                    [docker-ce-stable]
5407                    name=Docker CE Stable
5408                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5409                    enabled=1
5410                    gpgcheck=1
5411                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5412                    skip_if_unavailable=1
5413                    module_hotfixes=1
5414                    EOF
5415                                }
5416                                install_azure_linux_deps() {
5417                                    echo "(*) Installing device-mapper libraries for Docker CE..."
5418                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5419                                    echo "(*) Installing additional Docker CE dependencies..."
5420                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5421                                        echo "(*) Some optional dependencies could not be installed, continuing..."
5422                                    }
5423                                }
5424                                setup_selinux_context() {
5425                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5426                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
5427                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5428                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5429                                    fi
5430                                }
5431
5432                                # Special handling for RHEL Docker CE installation
5433                                case "${ID}" in
5434                                    azurelinux|mariner)
5435                                        echo "(*) ${ID} detected"
5436                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5437                                        echo "(*) Setting up Docker CE repository..."
5438
5439                                        setup_docker_ce_repo
5440                                        install_azure_linux_deps
5441
5442                                        if [ "${USE_MOBY}" != "true" ]; then
5443                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5444                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5445                                            setup_selinux_context
5446                                        else
5447                                            echo "(*) Using Moby - container-selinux not required"
5448                                        fi
5449                                        ;;
5450                                    *)
5451                                        # Standard RHEL/CentOS/Fedora approach
5452                                        if command -v dnf >/dev/null 2>&1; then
5453                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5454                                        elif command -v yum-config-manager >/dev/null 2>&1; then
5455                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5456                                        else
5457                                            # Manual fallback
5458                                            setup_docker_ce_repo
5459                                fi
5460                                ;;
5461                            esac
5462                            ;;
5463                        esac
5464                    fi
5465
5466                    # Refresh package database
5467                    case ${ADJUSTED_ID} in
5468                        debian)
5469                            apt-get update
5470                            ;;
5471                        rhel)
5472                            pkg_mgr_update
5473                            ;;
5474                    esac
5475
5476                    # Soft version matching
5477                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5478                        # Empty, meaning grab whatever "latest" is in apt repo
5479                        engine_version_suffix=""
5480                        cli_version_suffix=""
5481                    else
5482                        case ${ADJUSTED_ID} in
5483                            debian)
5484                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5485                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5486                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5487                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5488                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5489                        set +e # Don't exit if finding version fails - will handle gracefully
5490                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5491                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5492                        set -e
5493                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5494                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5495                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5496                            exit 1
5497                        fi
5498                        ;;
5499                    rhel)
5500                         # For RHEL-based systems, use dnf/yum to find versions
5501                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
5502                                set +e # Don't exit if finding version fails - will handle gracefully
5503                                    if [ "${USE_MOBY}" = "true" ]; then
5504                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5505                                    else
5506                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5507                                    fi
5508                                set -e
5509                                if [ -n "${available_versions}" ]; then
5510                                    engine_version_suffix="-${available_versions}"
5511                                    cli_version_suffix="-${available_versions}"
5512                                else
5513                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5514                                    engine_version_suffix=""
5515                                    cli_version_suffix=""
5516                                fi
5517                                ;;
5518                        esac
5519                    fi
5520
5521                    # Version matching for moby-buildx
5522                    if [ "${USE_MOBY}" = "true" ]; then
5523                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5524                            # Empty, meaning grab whatever "latest" is in apt repo
5525                            buildx_version_suffix=""
5526                        else
5527                            case ${ADJUSTED_ID} in
5528                                debian)
5529                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5530                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5531                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5532                            set +e
5533                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5534                            set -e
5535                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5536                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5537                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5538                                exit 1
5539                            fi
5540                            ;;
5541                                rhel)
5542                                    # For RHEL-based systems, try to find buildx version or use latest
5543                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5544                                    set +e
5545                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5546                                    set -e
5547                                    if [ -n "${available_buildx}" ]; then
5548                                        buildx_version_suffix="-${available_buildx}"
5549                                    else
5550                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5551                                        buildx_version_suffix=""
5552                                    fi
5553                                    ;;
5554                            esac
5555                            echo "buildx_version_suffix ${buildx_version_suffix}"
5556                        fi
5557                    fi
5558
5559                    # Install Docker / Moby CLI if not already installed
5560                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5561                        echo "Docker / Moby CLI and Engine already installed."
5562                    else
5563                            case ${ADJUSTED_ID} in
5564                            debian)
5565                                if [ "${USE_MOBY}" = "true" ]; then
5566                                    # Install engine
5567                                    set +e # Handle error gracefully
5568                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5569                                        exit_code=$?
5570                                    set -e
5571
5572                                    if [ ${exit_code} -ne 0 ]; then
5573                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5574                                        exit 1
5575                                    fi
5576
5577                                    # Install compose
5578                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5579                                else
5580                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5581                                    # Install compose
5582                                    apt-mark hold docker-ce docker-ce-cli
5583                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5584                                fi
5585                                ;;
5586                            rhel)
5587                                if [ "${USE_MOBY}" = "true" ]; then
5588                                    set +e # Handle error gracefully
5589                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5590                                        exit_code=$?
5591                                    set -e
5592
5593                                    if [ ${exit_code} -ne 0 ]; then
5594                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5595                                        exit 1
5596                                    fi
5597
5598                                    # Install compose
5599                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5600                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5601                                    fi
5602                                else
5603                                                   # Special handling for Azure Linux Docker CE installation
5604                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5605                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5606
5607                                        # Use rpm with --force and --nodeps for Azure Linux
5608                                        set +e  # Don't exit on error for this section
5609                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5610                                        install_result=$?
5611                                        set -e
5612
5613                                        if [ $install_result -ne 0 ]; then
5614                                            echo "(*) Standard installation failed, trying manual installation..."
5615
5616                                            echo "(*) Standard installation failed, trying manual installation..."
5617
5618                                            # Create directory for downloading packages
5619                                            mkdir -p /tmp/docker-ce-install
5620
5621                                            # Download packages manually using curl since tdnf doesn't support download
5622                                            echo "(*) Downloading Docker CE packages manually..."
5623
5624                                            # Get the repository baseurl
5625                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5626
5627                                            # Download packages directly
5628                                            cd /tmp/docker-ce-install
5629
5630                                            # Get package names with versions
5631                                            if [ -n "${cli_version_suffix}" ]; then
5632                                                docker_ce_version="${cli_version_suffix#-}"
5633                                                docker_cli_version="${engine_version_suffix#-}"
5634                                            else
5635                                                # Get latest version from repository
5636                                                docker_ce_version="latest"
5637                                            fi
5638
5639                                            echo "(*) Attempting to download Docker CE packages from repository..."
5640
5641                                            # Try to download latest packages if specific version fails
5642                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5643                                                # Fallback: try to get latest available version
5644                                                echo "(*) Specific version not found, trying latest..."
5645                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5646                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5647                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5648
5649                                                if [ -n "${latest_docker}" ]; then
5650                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5651                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5652                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5653                                                else
5654                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
5655                                                    echo "(*) Please check repository configuration or use 'moby': true"
5656                                                    exit 1
5657                                                fi
5658                                            fi
5659                                            # Install systemd libraries required by Docker CE
5660                                            echo "(*) Installing systemd libraries required by Docker CE..."
5661                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5662                                                echo "(*) WARNING: Could not install systemd libraries"
5663                                                echo "(*) Docker may fail to start without these"
5664                                            }
5665
5666                                            # Install with rpm --force --nodeps
5667                                            echo "(*) Installing Docker CE packages with dependency override..."
5668                                            rpm -Uvh --force --nodeps *.rpm
5669
5670                                            # Cleanup
5671                                            cd /
5672                                            rm -rf /tmp/docker-ce-install
5673
5674                                            echo "(*) Docker CE installation completed with dependency bypass"
5675                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5676                                        fi
5677                                    else
5678                                        # Standard installation for other RHEL-based systems
5679                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5680                                    fi
5681                                    # Install compose
5682                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5683                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5684                                    fi
5685                                fi
5686                                ;;
5687                        esac
5688                    fi
5689
5690                    echo "Finished installing docker / moby!"
5691
5692                    docker_home="/usr/libexec/docker"
5693                    cli_plugins_dir="${docker_home}/cli-plugins"
5694
5695                    # fallback for docker-compose
5696                    fallback_compose(){
5697                        local url=$1
5698                        local repo_url=$(get_github_api_repo_url "$url")
5699                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5700                        get_previous_version "${url}" "${repo_url}" compose_version
5701                        echo -e "\nAttempting to install v${compose_version}"
5702                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5703                    }
5704
5705                    # If 'docker-compose' command is to be included
5706                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5707                        case "${architecture}" in
5708                        amd64|x86_64) target_compose_arch=x86_64 ;;
5709                        arm64|aarch64) target_compose_arch=aarch64 ;;
5710                        *)
5711                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5712                            exit 1
5713                        esac
5714
5715                        docker_compose_path="/usr/local/bin/docker-compose"
5716                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5717                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5718                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
5719
5720                            if [ "${target_compose_arch}" = "x86_64" ]; then
5721                                echo "(*) Installing docker compose v1..."
5722                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5723                                chmod +x ${docker_compose_path}
5724
5725                                # Download the SHA256 checksum
5726                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5727                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5728                                sha256sum -c docker-compose.sha256sum --ignore-missing
5729                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5730                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5731                                exit 1
5732                            else
5733                                # Use pip to get a version that runs on this architecture
5734                                check_packages python3-minimal python3-pip libffi-dev python3-venv
5735                                echo "(*) Installing docker compose v1 via pip..."
5736                                export PYTHONUSERBASE=/usr/local
5737                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5738                            fi
5739                        else
5740                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5741                            docker_compose_url="https://github.com/docker/compose"
5742                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5743                            echo "(*) Installing docker-compose ${compose_version}..."
5744                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5745                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5746                                     fallback_compose "$docker_compose_url"
5747                            }
5748
5749                            chmod +x ${docker_compose_path}
5750
5751                            # Download the SHA256 checksum
5752                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5753                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5754                            sha256sum -c docker-compose.sha256sum --ignore-missing
5755
5756                            mkdir -p ${cli_plugins_dir}
5757                            cp ${docker_compose_path} ${cli_plugins_dir}
5758                        fi
5759                    fi
5760
5761                    # fallback method for compose-switch
5762                    fallback_compose-switch() {
5763                        local url=$1
5764                        local repo_url=$(get_github_api_repo_url "$url")
5765                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5766                        get_previous_version "$url" "$repo_url" compose_switch_version
5767                        echo -e "\nAttempting to install v${compose_switch_version}"
5768                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5769                    }
5770                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5771                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5772                        if type docker-compose > /dev/null 2>&1; then
5773                            echo "(*) Installing compose-switch..."
5774                            current_compose_path="$(command -v docker-compose)"
5775                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5776                            compose_switch_version="latest"
5777                            compose_switch_url="https://github.com/docker/compose-switch"
5778                            # Try to get latest version, fallback to known stable version if GitHub API fails
5779                            set +e
5780                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
5781                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5782                                echo "(*) GitHub API rate limited or failed, using fallback method"
5783                                fallback_compose-switch "$compose_switch_url"
5784                            fi
5785                            set -e
5786
5787                            # Map architecture for compose-switch downloads
5788                            case "${architecture}" in
5789                                amd64|x86_64) target_switch_arch=amd64 ;;
5790                                arm64|aarch64) target_switch_arch=arm64 ;;
5791                                *) target_switch_arch=${architecture} ;;
5792                            esac
5793                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5794                            chmod +x /usr/local/bin/compose-switch
5795                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5796                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5797                            mv "${current_compose_path}" "${target_compose_path}"
5798                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5799                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5800                        else
5801                            err "Skipping installation of compose-switch as docker compose is unavailable..."
5802                        fi
5803                    fi
5804
5805                    # If init file already exists, exit
5806                    if [ -f "/usr/local/share/docker-init.sh" ]; then
5807                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
5808                        # Clean up
5809                        rm -rf /var/lib/apt/lists/*
5810                        exit 0
5811                    fi
5812                    echo "docker-init doesn't exist, adding..."
5813
5814                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5815                            groupadd -r docker
5816                    fi
5817
5818                    usermod -aG docker ${USERNAME}
5819
5820                    # fallback for docker/buildx
5821                    fallback_buildx() {
5822                        local url=$1
5823                        local repo_url=$(get_github_api_repo_url "$url")
5824                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5825                        get_previous_version "$url" "$repo_url" buildx_version
5826                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5827                        echo -e "\nAttempting to install v${buildx_version}"
5828                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5829                    }
5830
5831                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5832                        buildx_version="latest"
5833                        docker_buildx_url="https://github.com/docker/buildx"
5834                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5835                        echo "(*) Installing buildx ${buildx_version}..."
5836
5837                          # Map architecture for buildx downloads
5838                        case "${architecture}" in
5839                            amd64|x86_64) target_buildx_arch=amd64 ;;
5840                            arm64|aarch64) target_buildx_arch=arm64 ;;
5841                            *) target_buildx_arch=${architecture} ;;
5842                        esac
5843
5844                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5845
5846                        cd /tmp
5847                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5848
5849                        docker_home="/usr/libexec/docker"
5850                        cli_plugins_dir="${docker_home}/cli-plugins"
5851
5852                        mkdir -p ${cli_plugins_dir}
5853                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5854                        chmod +x ${cli_plugins_dir}/docker-buildx
5855
5856                        chown -R "${USERNAME}:docker" "${docker_home}"
5857                        chmod -R g+r+w "${docker_home}"
5858                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5859                    fi
5860
5861                    DOCKER_DEFAULT_IP6_TABLES=""
5862                    if [ "$DISABLE_IP6_TABLES" == true ]; then
5863                        requested_version=""
5864                        # checking whether the version requested either is in semver format or just a number denoting the major version
5865                        # and, extracting the major version number out of the two scenarios
5866                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5867                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5868                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5869                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5870                            requested_version=$DOCKER_VERSION
5871                        fi
5872                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5873                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5874                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5875                        fi
5876                    fi
5877
5878                    if [ ! -d /usr/local/share ]; then
5879                        mkdir -p /usr/local/share
5880                    fi
5881
5882                    tee /usr/local/share/docker-init.sh > /dev/null \
5883                    << EOF
5884                    #!/bin/sh
5885                    #-------------------------------------------------------------------------------------------------------------
5886                    # Copyright (c) Microsoft Corporation. All rights reserved.
5887                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5888                    #-------------------------------------------------------------------------------------------------------------
5889
5890                    set -e
5891
5892                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5893                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5894                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5895                    EOF
5896
5897                    tee -a /usr/local/share/docker-init.sh > /dev/null \
5898                    << 'EOF'
5899                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5900                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5901                        find /run /var/run -iname 'docker*.pid' -delete || :
5902                        find /run /var/run -iname 'container*.pid' -delete || :
5903
5904                        # -- Start: dind wrapper script --
5905                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5906
5907                        export container=docker
5908
5909                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5910                            mount -t securityfs none /sys/kernel/security || {
5911                                echo >&2 'Could not mount /sys/kernel/security.'
5912                                echo >&2 'AppArmor detection and --privileged mode might break.'
5913                            }
5914                        fi
5915
5916                        # Mount /tmp (conditionally)
5917                        if ! mountpoint -q /tmp; then
5918                            mount -t tmpfs none /tmp
5919                        fi
5920
5921                        set_cgroup_nesting()
5922                        {
5923                            # cgroup v2: enable nesting
5924                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5925                                # move the processes from the root group to the /init group,
5926                                # otherwise writing subtree_control fails with EBUSY.
5927                                # An error during moving non-existent process (i.e., "cat") is ignored.
5928                                mkdir -p /sys/fs/cgroup/init
5929                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5930                                # enable controllers
5931                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5932                                    > /sys/fs/cgroup/cgroup.subtree_control
5933                            fi
5934                        }
5935
5936                        # Set cgroup nesting, retrying if necessary
5937                        retry_cgroup_nesting=0
5938
5939                        until [ "${retry_cgroup_nesting}" -eq "5" ];
5940                        do
5941                            set +e
5942                                set_cgroup_nesting
5943
5944                                if [ $? -ne 0 ]; then
5945                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5946                                else
5947                                    break
5948                                fi
5949
5950                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5951                            set -e
5952                        done
5953
5954                        # -- End: dind wrapper script --
5955
5956                        # Handle DNS
5957                        set +e
5958                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5959                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5960                            then
5961                                echo "Setting dockerd Azure DNS."
5962                                CUSTOMDNS="--dns 168.63.129.16"
5963                            else
5964                                echo "Not setting dockerd DNS manually."
5965                                CUSTOMDNS=""
5966                            fi
5967                        set -e
5968
5969                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5970                        then
5971                            DEFAULT_ADDRESS_POOL=""
5972                        else
5973                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5974                        fi
5975
5976                        # Start docker/moby engine
5977                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5978                    INNEREOF
5979                    )"
5980
5981                    sudo_if() {
5982                        COMMAND="$*"
5983
5984                        if [ "$(id -u)" -ne 0 ]; then
5985                            sudo $COMMAND
5986                        else
5987                            $COMMAND
5988                        fi
5989                    }
5990
5991                    retry_docker_start_count=0
5992                    docker_ok="false"
5993
5994                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
5995                    do
5996                        # Start using sudo if not invoked as root
5997                        if [ "$(id -u)" -ne 0 ]; then
5998                            sudo /bin/sh -c "${dockerd_start}"
5999                        else
6000                            eval "${dockerd_start}"
6001                        fi
6002
6003                        retry_count=0
6004                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
6005                        do
6006                            sleep 1s
6007                            set +e
6008                                docker info > /dev/null 2>&1 && docker_ok="true"
6009                            set -e
6010
6011                            retry_count=`expr $retry_count + 1`
6012                        done
6013
6014                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6015                            echo "(*) Failed to start docker, retrying..."
6016                            set +e
6017                                sudo_if pkill dockerd
6018                                sudo_if pkill containerd
6019                            set -e
6020                        fi
6021
6022                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
6023                    done
6024
6025                    # Execute whatever commands were passed in (if any). This allows us
6026                    # to set this script to ENTRYPOINT while still executing the default CMD.
6027                    exec "$@"
6028                    EOF
6029
6030                    chmod +x /usr/local/share/docker-init.sh
6031                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
6032
6033                    # Clean up
6034                    rm -rf /var/lib/apt/lists/*
6035
6036                    echo 'docker-in-docker-debian script has completed!'"#),
6037                ]).await;
6038
6039                return Ok(http::Response::builder()
6040                    .status(200)
6041                    .body(AsyncBody::from(response))
6042                    .unwrap());
6043            }
6044            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6045                let response = r#"
6046                    {
6047                        "schemaVersion": 2,
6048                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6049                        "config": {
6050                            "mediaType": "application/vnd.devcontainers",
6051                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6052                            "size": 2
6053                        },
6054                        "layers": [
6055                            {
6056                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6057                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6058                                "size": 20992,
6059                                "annotations": {
6060                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6061                                }
6062                            }
6063                        ],
6064                        "annotations": {
6065                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6066                            "com.github.package.type": "devcontainer_feature"
6067                        }
6068                    }
6069                    "#;
6070
6071                return Ok(http::Response::builder()
6072                    .status(200)
6073                    .body(http_client::AsyncBody::from(response))
6074                    .unwrap());
6075            }
6076            if parts.uri.path()
6077                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6078            {
6079                let response = build_tarball(vec![
6080                    ("./devcontainer-feature.json", r#"
6081                        {
6082                            "id": "go",
6083                            "version": "1.3.3",
6084                            "name": "Go",
6085                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6086                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6087                            "options": {
6088                                "version": {
6089                                    "type": "string",
6090                                    "proposals": [
6091                                        "latest",
6092                                        "none",
6093                                        "1.24",
6094                                        "1.23"
6095                                    ],
6096                                    "default": "latest",
6097                                    "description": "Select or enter a Go version to install"
6098                                },
6099                                "golangciLintVersion": {
6100                                    "type": "string",
6101                                    "default": "latest",
6102                                    "description": "Version of golangci-lint to install"
6103                                }
6104                            },
6105                            "init": true,
6106                            "customizations": {
6107                                "vscode": {
6108                                    "extensions": [
6109                                        "golang.Go"
6110                                    ],
6111                                    "settings": {
6112                                        "github.copilot.chat.codeGeneration.instructions": [
6113                                            {
6114                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6115                                            }
6116                                        ]
6117                                    }
6118                                }
6119                            },
6120                            "containerEnv": {
6121                                "GOROOT": "/usr/local/go",
6122                                "GOPATH": "/go",
6123                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6124                            },
6125                            "capAdd": [
6126                                "SYS_PTRACE"
6127                            ],
6128                            "securityOpt": [
6129                                "seccomp=unconfined"
6130                            ],
6131                            "installsAfter": [
6132                                "ghcr.io/devcontainers/features/common-utils"
6133                            ]
6134                        }
6135                        "#),
6136                    ("./install.sh", r#"
6137                    #!/usr/bin/env bash
6138                    #-------------------------------------------------------------------------------------------------------------
6139                    # Copyright (c) Microsoft Corporation. All rights reserved.
6140                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6141                    #-------------------------------------------------------------------------------------------------------------
6142                    #
6143                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6144                    # Maintainer: The VS Code and Codespaces Teams
6145
6146                    TARGET_GO_VERSION="${VERSION:-"latest"}"
6147                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6148
6149                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6150                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6151                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6152                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6153
6154                    # https://www.google.com/linuxrepositories/
6155                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6156
6157                    set -e
6158
6159                    if [ "$(id -u)" -ne 0 ]; then
6160                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6161                        exit 1
6162                    fi
6163
6164                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6165                    . /etc/os-release
6166                    # Get an adjusted ID independent of distro variants
6167                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6168                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6169                        ADJUSTED_ID="debian"
6170                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6171                        ADJUSTED_ID="rhel"
6172                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6173                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6174                        else
6175                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6176                        fi
6177                    else
6178                        echo "Linux distro ${ID} not supported."
6179                        exit 1
6180                    fi
6181
6182                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6183                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6184                        # Update the repo files to reference vault.centos.org.
6185                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6186                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6187                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6188                    fi
6189
6190                    # Setup INSTALL_CMD & PKG_MGR_CMD
6191                    if type apt-get > /dev/null 2>&1; then
6192                        PKG_MGR_CMD=apt-get
6193                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6194                    elif type microdnf > /dev/null 2>&1; then
6195                        PKG_MGR_CMD=microdnf
6196                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6197                    elif type dnf > /dev/null 2>&1; then
6198                        PKG_MGR_CMD=dnf
6199                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6200                    else
6201                        PKG_MGR_CMD=yum
6202                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6203                    fi
6204
6205                    # Clean up
6206                    clean_up() {
6207                        case ${ADJUSTED_ID} in
6208                            debian)
6209                                rm -rf /var/lib/apt/lists/*
6210                                ;;
6211                            rhel)
6212                                rm -rf /var/cache/dnf/* /var/cache/yum/*
6213                                rm -rf /tmp/yum.log
6214                                rm -rf ${GPG_INSTALL_PATH}
6215                                ;;
6216                        esac
6217                    }
6218                    clean_up
6219
6220
6221                    # Figure out correct version of a three part version number is not passed
6222                    find_version_from_git_tags() {
6223                        local variable_name=$1
6224                        local requested_version=${!variable_name}
6225                        if [ "${requested_version}" = "none" ]; then return; fi
6226                        local repository=$2
6227                        local prefix=${3:-"tags/v"}
6228                        local separator=${4:-"."}
6229                        local last_part_optional=${5:-"false"}
6230                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6231                            local escaped_separator=${separator//./\\.}
6232                            local last_part
6233                            if [ "${last_part_optional}" = "true" ]; then
6234                                last_part="(${escaped_separator}[0-9]+)?"
6235                            else
6236                                last_part="${escaped_separator}[0-9]+"
6237                            fi
6238                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6239                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6240                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6241                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6242                            else
6243                                set +e
6244                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6245                                set -e
6246                            fi
6247                        fi
6248                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6249                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6250                            exit 1
6251                        fi
6252                        echo "${variable_name}=${!variable_name}"
6253                    }
6254
6255                    pkg_mgr_update() {
6256                        case $ADJUSTED_ID in
6257                            debian)
6258                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6259                                    echo "Running apt-get update..."
6260                                    ${PKG_MGR_CMD} update -y
6261                                fi
6262                                ;;
6263                            rhel)
6264                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6265                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6266                                        echo "Running ${PKG_MGR_CMD} makecache ..."
6267                                        ${PKG_MGR_CMD} makecache
6268                                    fi
6269                                else
6270                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6271                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6272                                        set +e
6273                                        ${PKG_MGR_CMD} check-update
6274                                        rc=$?
6275                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6276                                            exit 1
6277                                        fi
6278                                        set -e
6279                                    fi
6280                                fi
6281                                ;;
6282                        esac
6283                    }
6284
6285                    # Checks if packages are installed and installs them if not
6286                    check_packages() {
6287                        case ${ADJUSTED_ID} in
6288                            debian)
6289                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6290                                    pkg_mgr_update
6291                                    ${INSTALL_CMD} "$@"
6292                                fi
6293                                ;;
6294                            rhel)
6295                                if ! rpm -q "$@" > /dev/null 2>&1; then
6296                                    pkg_mgr_update
6297                                    ${INSTALL_CMD} "$@"
6298                                fi
6299                                ;;
6300                        esac
6301                    }
6302
6303                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6304                    rm -f /etc/profile.d/00-restore-env.sh
6305                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6306                    chmod +x /etc/profile.d/00-restore-env.sh
6307
6308                    # Some distributions do not install awk by default (e.g. Mariner)
6309                    if ! type awk >/dev/null 2>&1; then
6310                        check_packages awk
6311                    fi
6312
6313                    # Determine the appropriate non-root user
6314                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6315                        USERNAME=""
6316                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6317                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6318                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6319                                USERNAME=${CURRENT_USER}
6320                                break
6321                            fi
6322                        done
6323                        if [ "${USERNAME}" = "" ]; then
6324                            USERNAME=root
6325                        fi
6326                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6327                        USERNAME=root
6328                    fi
6329
6330                    export DEBIAN_FRONTEND=noninteractive
6331
6332                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
6333
6334                    if [ $ADJUSTED_ID = "debian" ]; then
6335                        check_packages g++ libc6-dev
6336                    else
6337                        check_packages gcc-c++ glibc-devel
6338                    fi
6339                    # Install curl, git, other dependencies if missing
6340                    if ! type curl > /dev/null 2>&1; then
6341                        check_packages curl
6342                    fi
6343                    if ! type git > /dev/null 2>&1; then
6344                        check_packages git
6345                    fi
6346                    # Some systems, e.g. Mariner, still a few more packages
6347                    if ! type as > /dev/null 2>&1; then
6348                        check_packages binutils
6349                    fi
6350                    if ! [ -f /usr/include/linux/errno.h ]; then
6351                        check_packages kernel-headers
6352                    fi
6353                    # Minimal RHEL install may need findutils installed
6354                    if ! [ -f /usr/bin/find ]; then
6355                        check_packages findutils
6356                    fi
6357
6358                    # Get closest match for version number specified
6359                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6360
6361                    architecture="$(uname -m)"
6362                    case $architecture in
6363                        x86_64) architecture="amd64";;
6364                        aarch64 | armv8*) architecture="arm64";;
6365                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
6366                        i?86) architecture="386";;
6367                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6368                    esac
6369
6370                    # Install Go
6371                    umask 0002
6372                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6373                        groupadd -r golang
6374                    fi
6375                    usermod -a -G golang "${USERNAME}"
6376                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6377
6378                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6379                        # Use a temporary location for gpg keys to avoid polluting image
6380                        export GNUPGHOME="/tmp/tmp-gnupg"
6381                        mkdir -p ${GNUPGHOME}
6382                        chmod 700 ${GNUPGHOME}
6383                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6384                        gpg -q --import /tmp/tmp-gnupg/golang_key
6385                        echo "Downloading Go ${TARGET_GO_VERSION}..."
6386                        set +e
6387                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6388                        exit_code=$?
6389                        set -e
6390                        if [ "$exit_code" != "0" ]; then
6391                            echo "(!) Download failed."
6392                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6393                            set +e
6394                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6395                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6396                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6397                            # Handle Go's odd version pattern where "0" releases omit the last part
6398                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6399                                ((minor=minor-1))
6400                                TARGET_GO_VERSION="${major}.${minor}"
6401                                # Look for latest version from previous minor release
6402                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6403                            else
6404                                ((breakfix=breakfix-1))
6405                                if [ "${breakfix}" = "0" ]; then
6406                                    TARGET_GO_VERSION="${major}.${minor}"
6407                                else
6408                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6409                                fi
6410                            fi
6411                            set -e
6412                            echo "Trying ${TARGET_GO_VERSION}..."
6413                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6414                        fi
6415                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6416                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6417                        echo "Extracting Go ${TARGET_GO_VERSION}..."
6418                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6419                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6420                    else
6421                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6422                    fi
6423
6424                    # Install Go tools that are isImportant && !replacedByGopls based on
6425                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6426                    GO_TOOLS="\
6427                        golang.org/x/tools/gopls@latest \
6428                        honnef.co/go/tools/cmd/staticcheck@latest \
6429                        golang.org/x/lint/golint@latest \
6430                        github.com/mgechev/revive@latest \
6431                        github.com/go-delve/delve/cmd/dlv@latest \
6432                        github.com/fatih/gomodifytags@latest \
6433                        github.com/haya14busa/goplay/cmd/goplay@latest \
6434                        github.com/cweill/gotests/gotests@latest \
6435                        github.com/josharian/impl@latest"
6436
6437                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6438                        echo "Installing common Go tools..."
6439                        export PATH=${TARGET_GOROOT}/bin:${PATH}
6440                        export GOPATH=/tmp/gotools
6441                        export GOCACHE="${GOPATH}/cache"
6442
6443                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6444                        cd "${GOPATH}"
6445
6446                        # Use go get for versions of go under 1.16
6447                        go_install_command=install
6448                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6449                            export GO111MODULE=on
6450                            go_install_command=get
6451                            echo "Go version < 1.16, using go get."
6452                        fi
6453
6454                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6455
6456                        # Move Go tools into path
6457                        if [ -d "${GOPATH}/bin" ]; then
6458                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6459                        fi
6460
6461                        # Install golangci-lint from precompiled binaries
6462                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6463                            echo "Installing golangci-lint latest..."
6464                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6465                                sh -s -- -b "${TARGET_GOPATH}/bin"
6466                        else
6467                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6468                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6469                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6470                        fi
6471
6472                        # Remove Go tools temp directory
6473                        rm -rf "${GOPATH}"
6474                    fi
6475
6476
6477                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6478                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6479                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6480                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6481
6482                    # Clean up
6483                    clean_up
6484
6485                    echo "Done!"
6486                        "#),
6487                ])
6488                .await;
6489                return Ok(http::Response::builder()
6490                    .status(200)
6491                    .body(AsyncBody::from(response))
6492                    .unwrap());
6493            }
6494            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6495                let response = r#"
6496                    {
6497                        "schemaVersion": 2,
6498                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6499                        "config": {
6500                            "mediaType": "application/vnd.devcontainers",
6501                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6502                            "size": 2
6503                        },
6504                        "layers": [
6505                            {
6506                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6507                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6508                                "size": 19968,
6509                                "annotations": {
6510                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6511                                }
6512                            }
6513                        ],
6514                        "annotations": {
6515                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6516                            "com.github.package.type": "devcontainer_feature"
6517                        }
6518                    }"#;
6519                return Ok(http::Response::builder()
6520                    .status(200)
6521                    .body(AsyncBody::from(response))
6522                    .unwrap());
6523            }
6524            if parts.uri.path()
6525                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6526            {
6527                let response = build_tarball(vec![
6528                    (
6529                        "./devcontainer-feature.json",
6530                        r#"
6531{
6532    "id": "aws-cli",
6533    "version": "1.1.3",
6534    "name": "AWS CLI",
6535    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6536    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6537    "options": {
6538        "version": {
6539            "type": "string",
6540            "proposals": [
6541                "latest"
6542            ],
6543            "default": "latest",
6544            "description": "Select or enter an AWS CLI version."
6545        },
6546        "verbose": {
6547            "type": "boolean",
6548            "default": true,
6549            "description": "Suppress verbose output."
6550        }
6551    },
6552    "customizations": {
6553        "vscode": {
6554            "extensions": [
6555                "AmazonWebServices.aws-toolkit-vscode"
6556            ],
6557            "settings": {
6558                "github.copilot.chat.codeGeneration.instructions": [
6559                    {
6560                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6561                    }
6562                ]
6563            }
6564        }
6565    },
6566    "installsAfter": [
6567        "ghcr.io/devcontainers/features/common-utils"
6568    ]
6569}
6570                    "#,
6571                    ),
6572                    (
6573                        "./install.sh",
6574                        r#"#!/usr/bin/env bash
6575                    #-------------------------------------------------------------------------------------------------------------
6576                    # Copyright (c) Microsoft Corporation. All rights reserved.
6577                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6578                    #-------------------------------------------------------------------------------------------------------------
6579                    #
6580                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6581                    # Maintainer: The VS Code and Codespaces Teams
6582
6583                    set -e
6584
6585                    # Clean up
6586                    rm -rf /var/lib/apt/lists/*
6587
6588                    VERSION=${VERSION:-"latest"}
6589                    VERBOSE=${VERBOSE:-"true"}
6590
6591                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6592                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6593
6594                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6595                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6596                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6597                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6598                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6599                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6600                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6601                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6602                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6603                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6604                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6605                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6606                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6607                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6608                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6609                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6610                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6611                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6612                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6613                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6614                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6615                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6616                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6617                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6618                    YLZATHZKTJyiqA==
6619                    =vYOk
6620                    -----END PGP PUBLIC KEY BLOCK-----"
6621
6622                    if [ "$(id -u)" -ne 0 ]; then
6623                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6624                        exit 1
6625                    fi
6626
6627                    apt_get_update()
6628                    {
6629                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6630                            echo "Running apt-get update..."
6631                            apt-get update -y
6632                        fi
6633                    }
6634
6635                    # Checks if packages are installed and installs them if not
6636                    check_packages() {
6637                        if ! dpkg -s "$@" > /dev/null 2>&1; then
6638                            apt_get_update
6639                            apt-get -y install --no-install-recommends "$@"
6640                        fi
6641                    }
6642
6643                    export DEBIAN_FRONTEND=noninteractive
6644
6645                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6646
6647                    verify_aws_cli_gpg_signature() {
6648                        local filePath=$1
6649                        local sigFilePath=$2
6650                        local awsGpgKeyring=aws-cli-public-key.gpg
6651
6652                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6653                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6654                        local status=$?
6655
6656                        rm "./${awsGpgKeyring}"
6657
6658                        return ${status}
6659                    }
6660
6661                    install() {
6662                        local scriptZipFile=awscli.zip
6663                        local scriptSigFile=awscli.sig
6664
6665                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6666                        if [ "${VERSION}" != "latest" ]; then
6667                            local versionStr=-${VERSION}
6668                        fi
6669                        architecture=$(dpkg --print-architecture)
6670                        case "${architecture}" in
6671                            amd64) architectureStr=x86_64 ;;
6672                            arm64) architectureStr=aarch64 ;;
6673                            *)
6674                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6675                                exit 1
6676                        esac
6677                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6678                        curl "${scriptUrl}" -o "${scriptZipFile}"
6679                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6680
6681                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6682                        if (( $? > 0 )); then
6683                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6684                            exit 1
6685                        fi
6686
6687                        if [ "${VERBOSE}" = "false" ]; then
6688                            unzip -q "${scriptZipFile}"
6689                        else
6690                            unzip "${scriptZipFile}"
6691                        fi
6692
6693                        ./aws/install
6694
6695                        # kubectl bash completion
6696                        mkdir -p /etc/bash_completion.d
6697                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6698
6699                        # kubectl zsh completion
6700                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6701                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6702                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6703                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6704                        fi
6705
6706                        rm -rf ./aws
6707                    }
6708
6709                    echo "(*) Installing AWS CLI..."
6710
6711                    install
6712
6713                    # Clean up
6714                    rm -rf /var/lib/apt/lists/*
6715
6716                    echo "Done!""#,
6717                    ),
6718                    ("./scripts/", r#""#),
6719                    (
6720                        "./scripts/fetch-latest-completer-scripts.sh",
6721                        r#"
6722                        #!/bin/bash
6723                        #-------------------------------------------------------------------------------------------------------------
6724                        # Copyright (c) Microsoft Corporation. All rights reserved.
6725                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6726                        #-------------------------------------------------------------------------------------------------------------
6727                        #
6728                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6729                        # Maintainer: The Dev Container spec maintainers
6730                        #
6731                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6732                        #
6733                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6734                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6735                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6736
6737                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6738                        chmod +x "$BASH_COMPLETER_SCRIPT"
6739
6740                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6741                        chmod +x "$ZSH_COMPLETER_SCRIPT"
6742                        "#,
6743                    ),
6744                    ("./scripts/vendor/", r#""#),
6745                    (
6746                        "./scripts/vendor/aws_bash_completer",
6747                        r#"
6748                        # Typically that would be added under one of the following paths:
6749                        # - /etc/bash_completion.d
6750                        # - /usr/local/etc/bash_completion.d
6751                        # - /usr/share/bash-completion/completions
6752
6753                        complete -C aws_completer aws
6754                        "#,
6755                    ),
6756                    (
6757                        "./scripts/vendor/aws_zsh_completer.sh",
6758                        r#"
6759                        # Source this file to activate auto completion for zsh using the bash
6760                        # compatibility helper.  Make sure to run `compinit` before, which should be
6761                        # given usually.
6762                        #
6763                        # % source /path/to/zsh_complete.sh
6764                        #
6765                        # Typically that would be called somewhere in your .zshrc.
6766                        #
6767                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6768                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6769                        #
6770                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6771                        #
6772                        # zsh releases prior to that version do not export the required env variables!
6773
6774                        autoload -Uz bashcompinit
6775                        bashcompinit -i
6776
6777                        _bash_complete() {
6778                          local ret=1
6779                          local -a suf matches
6780                          local -x COMP_POINT COMP_CWORD
6781                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6782                          local -x COMP_LINE="$words"
6783                          local -A savejobstates savejobtexts
6784
6785                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6786                          (( COMP_CWORD = CURRENT - 1))
6787                          COMP_WORDS=( $words )
6788                          BASH_VERSINFO=( 2 05b 0 1 release )
6789
6790                          savejobstates=( ${(kv)jobstates} )
6791                          savejobtexts=( ${(kv)jobtexts} )
6792
6793                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6794
6795                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6796
6797                          if [[ -n $matches ]]; then
6798                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6799                              compset -P '*/' && matches=( ${matches##*/} )
6800                              compset -S '/*' && matches=( ${matches%%/*} )
6801                              compadd -Q -f "${suf[@]}" -a matches && ret=0
6802                            else
6803                              compadd -Q "${suf[@]}" -a matches && ret=0
6804                            fi
6805                          fi
6806
6807                          if (( ret )); then
6808                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6809                              _default "${suf[@]}" && ret=0
6810                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6811                              _directories "${suf[@]}" && ret=0
6812                            fi
6813                          fi
6814
6815                          return ret
6816                        }
6817
6818                        complete -C aws_completer aws
6819                        "#,
6820                    ),
6821                ]).await;
6822
6823                return Ok(http::Response::builder()
6824                    .status(200)
6825                    .body(AsyncBody::from(response))
6826                    .unwrap());
6827            }
6828
6829            Ok(http::Response::builder()
6830                .status(404)
6831                .body(http_client::AsyncBody::default())
6832                .unwrap())
6833        })
6834    }
6835}