devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use fs::Fs;
  10use http_client::HttpClient;
  11use util::{ResultExt, command::Command};
  12
  13use crate::{
  14    DevContainerConfig, DevContainerContext,
  15    command_json::{CommandRunner, DefaultCommandRunner},
  16    devcontainer_api::{DevContainerError, DevContainerUp},
  17    devcontainer_json::{
  18        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
  19        deserialize_devcontainer_json,
  20    },
  21    docker::{
  22        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  23        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  24        get_remote_dir_from_config,
  25    },
  26    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  27    get_oci_token,
  28    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  29    safe_id_lower,
  30};
  31
  32enum ConfigStatus {
  33    Deserialized(DevContainer),
  34    VariableParsed(DevContainer),
  35}
  36
  37#[derive(Debug, Clone, Eq, PartialEq, Default)]
  38pub(crate) struct DockerComposeResources {
  39    files: Vec<PathBuf>,
  40    config: DockerComposeConfig,
  41}
  42
  43struct DevContainerManifest {
  44    http_client: Arc<dyn HttpClient>,
  45    fs: Arc<dyn Fs>,
  46    docker_client: Arc<dyn DockerClient>,
  47    command_runner: Arc<dyn CommandRunner>,
  48    raw_config: String,
  49    config: ConfigStatus,
  50    local_environment: HashMap<String, String>,
  51    local_project_directory: PathBuf,
  52    config_directory: PathBuf,
  53    file_name: String,
  54    root_image: Option<DockerInspect>,
  55    features_build_info: Option<FeaturesBuildInfo>,
  56    features: Vec<FeatureManifest>,
  57}
  58const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
  59impl DevContainerManifest {
  60    async fn new(
  61        context: &DevContainerContext,
  62        environment: HashMap<String, String>,
  63        docker_client: Arc<dyn DockerClient>,
  64        command_runner: Arc<dyn CommandRunner>,
  65        local_config: DevContainerConfig,
  66        local_project_path: &Path,
  67    ) -> Result<Self, DevContainerError> {
  68        let config_path = local_project_path.join(local_config.config_path.clone());
  69        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  70        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  71            log::error!("Unable to read devcontainer contents: {e}");
  72            DevContainerError::DevContainerParseFailed
  73        })?;
  74
  75        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  76
  77        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  78            log::error!("Dev container file should be in a directory");
  79            DevContainerError::NotInValidProject
  80        })?;
  81        let file_name = config_path
  82            .file_name()
  83            .and_then(|f| f.to_str())
  84            .ok_or_else(|| {
  85                log::error!("Dev container file has no file name, or is invalid unicode");
  86                DevContainerError::DevContainerParseFailed
  87            })?;
  88
  89        Ok(Self {
  90            fs: context.fs.clone(),
  91            http_client: context.http_client.clone(),
  92            docker_client,
  93            command_runner,
  94            raw_config: devcontainer_contents,
  95            config: ConfigStatus::Deserialized(devcontainer),
  96            local_project_directory: local_project_path.to_path_buf(),
  97            local_environment: environment,
  98            config_directory: devcontainer_directory.to_path_buf(),
  99            file_name: file_name.to_string(),
 100            root_image: None,
 101            features_build_info: None,
 102            features: Vec::new(),
 103        })
 104    }
 105
 106    fn devcontainer_id(&self) -> String {
 107        let mut labels = self.identifying_labels();
 108        labels.sort_by_key(|(key, _)| *key);
 109
 110        let mut hasher = DefaultHasher::new();
 111        for (key, value) in &labels {
 112            key.hash(&mut hasher);
 113            value.hash(&mut hasher);
 114        }
 115
 116        format!("{:016x}", hasher.finish())
 117    }
 118
 119    fn identifying_labels(&self) -> Vec<(&str, String)> {
 120        let labels = vec![
 121            (
 122                "devcontainer.local_folder",
 123                (self.local_project_directory.display()).to_string(),
 124            ),
 125            (
 126                "devcontainer.config_file",
 127                (self.config_file().display()).to_string(),
 128            ),
 129        ];
 130        labels
 131    }
 132
 133    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
 134        let mut replaced_content = content
 135            .replace("${devcontainerId}", &self.devcontainer_id())
 136            .replace(
 137                "${containerWorkspaceFolderBasename}",
 138                &self.remote_workspace_base_name().unwrap_or_default(),
 139            )
 140            .replace(
 141                "${localWorkspaceFolderBasename}",
 142                &self.local_workspace_base_name()?,
 143            )
 144            .replace(
 145                "${containerWorkspaceFolder}",
 146                &self
 147                    .remote_workspace_folder()
 148                    .map(|path| path.display().to_string())
 149                    .unwrap_or_default()
 150                    .replace('\\', "/"),
 151            )
 152            .replace(
 153                "${localWorkspaceFolder}",
 154                &self.local_workspace_folder().replace('\\', "/"),
 155            );
 156        for (k, v) in &self.local_environment {
 157            let find = format!("${{localEnv:{k}}}");
 158            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
 159        }
 160
 161        Ok(replaced_content)
 162    }
 163
 164    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 165        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 166        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
 167
 168        self.config = ConfigStatus::VariableParsed(parsed_config);
 169
 170        Ok(())
 171    }
 172
 173    fn runtime_remote_env(
 174        &self,
 175        container_env: &HashMap<String, String>,
 176    ) -> Result<HashMap<String, String>, DevContainerError> {
 177        let mut merged_remote_env = container_env.clone();
 178        // HOME is user-specific, and we will often not run as the image user
 179        merged_remote_env.remove("HOME");
 180        if let Some(remote_env) = self.dev_container().remote_env.clone() {
 181            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
 182                log::error!(
 183                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
 184                    remote_env
 185                );
 186                DevContainerError::DevContainerParseFailed
 187            })?;
 188            for (k, v) in container_env {
 189                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
 190            }
 191            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
 192                .map_err(|e| {
 193                    log::error!(
 194                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
 195                        &raw
 196                    );
 197                    DevContainerError::DevContainerParseFailed
 198                })?;
 199            for (k, v) in reserialized {
 200                merged_remote_env.insert(k, v);
 201            }
 202        }
 203        Ok(merged_remote_env)
 204    }
 205
 206    fn config_file(&self) -> PathBuf {
 207        self.config_directory.join(&self.file_name)
 208    }
 209
 210    fn dev_container(&self) -> &DevContainer {
 211        match &self.config {
 212            ConfigStatus::Deserialized(dev_container) => dev_container,
 213            ConfigStatus::VariableParsed(dev_container) => dev_container,
 214        }
 215    }
 216
 217    async fn dockerfile_location(&self) -> Option<PathBuf> {
 218        let dev_container = self.dev_container();
 219        match dev_container.build_type() {
 220            DevContainerBuildType::Image => None,
 221            DevContainerBuildType::Dockerfile => dev_container
 222                .build
 223                .as_ref()
 224                .map(|build| self.config_directory.join(&build.dockerfile)),
 225            DevContainerBuildType::DockerCompose => {
 226                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 227                    return None;
 228                };
 229                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 230                else {
 231                    return None;
 232                };
 233                main_service
 234                    .build
 235                    .and_then(|b| b.dockerfile)
 236                    .map(|dockerfile| self.config_directory.join(dockerfile))
 237            }
 238            DevContainerBuildType::None => None,
 239        }
 240    }
 241
 242    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 243        let mut hasher = DefaultHasher::new();
 244        let prefix = match &self.dev_container().name {
 245            Some(name) => &safe_id_lower(name),
 246            None => "zed-dc",
 247        };
 248        let prefix = prefix.get(..6).unwrap_or(prefix);
 249
 250        dockerfile_build_path.hash(&mut hasher);
 251
 252        let hash = hasher.finish();
 253        format!("{}-{:x}-features", prefix, hash)
 254    }
 255
 256    /// Gets the base image from the devcontainer with the following precedence:
 257    /// - The devcontainer image if an image is specified
 258    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 259    /// - The image sourced in the docker-compose main service, if one is specified
 260    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 261    /// If no such image is available, return an error
 262    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 263        if let Some(image) = &self.dev_container().image {
 264            return Ok(image.to_string());
 265        }
 266        if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
 267            let dockerfile_contents = self
 268                .fs
 269                .load(&self.config_directory.join(dockerfile))
 270                .await
 271                .map_err(|e| {
 272                    log::error!("Error reading dockerfile: {e}");
 273                    DevContainerError::DevContainerParseFailed
 274                })?;
 275            return image_from_dockerfile(self, dockerfile_contents);
 276        }
 277        if self.dev_container().docker_compose_file.is_some() {
 278            let docker_compose_manifest = self.docker_compose_manifest().await?;
 279            let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 280
 281            if let Some(dockerfile) = main_service
 282                .build
 283                .as_ref()
 284                .and_then(|b| b.dockerfile.as_ref())
 285            {
 286                let dockerfile_contents = self
 287                    .fs
 288                    .load(&self.config_directory.join(dockerfile))
 289                    .await
 290                    .map_err(|e| {
 291                        log::error!("Error reading dockerfile: {e}");
 292                        DevContainerError::DevContainerParseFailed
 293                    })?;
 294                return image_from_dockerfile(self, dockerfile_contents);
 295            }
 296            if let Some(image) = &main_service.image {
 297                return Ok(image.to_string());
 298            }
 299
 300            log::error!("No valid base image found in docker-compose configuration");
 301            return Err(DevContainerError::DevContainerParseFailed);
 302        }
 303        log::error!("No valid base image found in dev container configuration");
 304        Err(DevContainerError::DevContainerParseFailed)
 305    }
 306
 307    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 308        let dev_container = match &self.config {
 309            ConfigStatus::Deserialized(_) => {
 310                log::error!(
 311                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 312                );
 313                return Err(DevContainerError::DevContainerParseFailed);
 314            }
 315            ConfigStatus::VariableParsed(dev_container) => dev_container,
 316        };
 317        let root_image_tag = self.get_base_image_from_config().await?;
 318        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 319
 320        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 321        let timestamp = std::time::SystemTime::now()
 322            .duration_since(std::time::UNIX_EPOCH)
 323            .map(|d| d.as_millis())
 324            .unwrap_or(0);
 325
 326        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 327        let empty_context_dir = temp_base.join("empty-folder");
 328
 329        self.fs
 330            .create_dir(&features_content_dir)
 331            .await
 332            .map_err(|e| {
 333                log::error!("Failed to create features content dir: {e}");
 334                DevContainerError::FilesystemError
 335            })?;
 336
 337        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 338            log::error!("Failed to create empty context dir: {e}");
 339            DevContainerError::FilesystemError
 340        })?;
 341
 342        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 343        let image_tag =
 344            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 345
 346        let build_info = FeaturesBuildInfo {
 347            dockerfile_path,
 348            features_content_dir,
 349            empty_context_dir,
 350            build_image: dev_container.image.clone(),
 351            image_tag,
 352        };
 353
 354        let features = match &dev_container.features {
 355            Some(features) => features,
 356            None => &HashMap::new(),
 357        };
 358
 359        let container_user = get_container_user_from_config(&root_image, self)?;
 360        let remote_user = get_remote_user_from_config(&root_image, self)?;
 361
 362        let builtin_env_content = format!(
 363            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 364            container_user, remote_user
 365        );
 366
 367        let builtin_env_path = build_info
 368            .features_content_dir
 369            .join("devcontainer-features.builtin.env");
 370
 371        self.fs
 372            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 373            .await
 374            .map_err(|e| {
 375                log::error!("Failed to write builtin env file: {e}");
 376                DevContainerError::FilesystemError
 377            })?;
 378
 379        let ordered_features =
 380            resolve_feature_order(features, &dev_container.override_feature_install_order);
 381
 382        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 383            if matches!(options, FeatureOptions::Bool(false)) {
 384                log::debug!(
 385                    "Feature '{}' is disabled (set to false), skipping",
 386                    feature_ref
 387                );
 388                continue;
 389            }
 390
 391            let feature_id = extract_feature_id(feature_ref);
 392            let consecutive_id = format!("{}_{}", feature_id, index);
 393            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 394
 395            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 396                log::error!(
 397                    "Failed to create feature directory for {}: {e}",
 398                    feature_ref
 399                );
 400                DevContainerError::FilesystemError
 401            })?;
 402
 403            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 404                log::error!(
 405                    "Feature '{}' is not a supported OCI feature reference",
 406                    feature_ref
 407                );
 408                DevContainerError::DevContainerParseFailed
 409            })?;
 410            let TokenResponse { token } =
 411                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 412                    .await
 413                    .map_err(|e| {
 414                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 415                        DevContainerError::ResourceFetchFailed
 416                    })?;
 417            let manifest = get_oci_manifest(
 418                &oci_ref.registry,
 419                &oci_ref.path,
 420                &token,
 421                &self.http_client,
 422                &oci_ref.version,
 423                None,
 424            )
 425            .await
 426            .map_err(|e| {
 427                log::error!(
 428                    "Failed to fetch OCI manifest for feature '{}': {e}",
 429                    feature_ref
 430                );
 431                DevContainerError::ResourceFetchFailed
 432            })?;
 433            let digest = &manifest
 434                .layers
 435                .first()
 436                .ok_or_else(|| {
 437                    log::error!(
 438                        "OCI manifest for feature '{}' contains no layers",
 439                        feature_ref
 440                    );
 441                    DevContainerError::ResourceFetchFailed
 442                })?
 443                .digest;
 444            download_oci_tarball(
 445                &token,
 446                &oci_ref.registry,
 447                &oci_ref.path,
 448                digest,
 449                "application/vnd.devcontainers.layer.v1+tar",
 450                &feature_dir,
 451                &self.http_client,
 452                &self.fs,
 453                None,
 454            )
 455            .await?;
 456
 457            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 458            if !self.fs.is_file(feature_json_path).await {
 459                let message = format!(
 460                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 461                    feature_json_path
 462                );
 463                log::error!("{}", &message);
 464                return Err(DevContainerError::ResourceFetchFailed);
 465            }
 466
 467            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 468                log::error!("error reading devcontainer-feature.json: {:?}", e);
 469                DevContainerError::FilesystemError
 470            })?;
 471
 472            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 473
 474            let feature_json: DevContainerFeatureJson =
 475                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
 476                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 477                    DevContainerError::ResourceFetchFailed
 478                })?;
 479
 480            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 481
 482            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 483
 484            let env_content = feature_manifest
 485                .write_feature_env(&self.fs, options)
 486                .await?;
 487
 488            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 489
 490            self.fs
 491                .write(
 492                    &feature_manifest
 493                        .file_path()
 494                        .join("devcontainer-features-install.sh"),
 495                    &wrapper_content.as_bytes(),
 496                )
 497                .await
 498                .map_err(|e| {
 499                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 500                    DevContainerError::FilesystemError
 501                })?;
 502
 503            self.features.push(feature_manifest);
 504        }
 505
 506        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 507
 508        let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
 509        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 510
 511        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 512            self.fs.load(location).await.log_err()
 513        } else {
 514            None
 515        };
 516
 517        let dockerfile_content = self.generate_dockerfile_extended(
 518            &container_user,
 519            &remote_user,
 520            dockerfile_base_content,
 521            use_buildkit,
 522        );
 523
 524        self.fs
 525            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 526            .await
 527            .map_err(|e| {
 528                log::error!("Failed to write Dockerfile.extended: {e}");
 529                DevContainerError::FilesystemError
 530            })?;
 531
 532        log::debug!(
 533            "Features build resources written to {:?}",
 534            build_info.features_content_dir
 535        );
 536
 537        self.root_image = Some(root_image);
 538        self.features_build_info = Some(build_info);
 539
 540        Ok(())
 541    }
 542
 543    fn generate_dockerfile_extended(
 544        &self,
 545        container_user: &str,
 546        remote_user: &str,
 547        dockerfile_content: Option<String>,
 548        use_buildkit: bool,
 549    ) -> String {
 550        #[cfg(not(target_os = "windows"))]
 551        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 552        #[cfg(target_os = "windows")]
 553        let update_remote_user_uid = false;
 554        let feature_layers: String = self
 555            .features
 556            .iter()
 557            .map(|manifest| {
 558                manifest.generate_dockerfile_feature_layer(
 559                    use_buildkit,
 560                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 561                )
 562            })
 563            .collect();
 564
 565        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 566        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 567
 568        let dockerfile_content = dockerfile_content
 569            .map(|content| {
 570                if dockerfile_alias(&content).is_some() {
 571                    content
 572                } else {
 573                    dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
 574                }
 575            })
 576            .unwrap_or("".to_string());
 577
 578        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 579
 580        let feature_content_source_stage = if use_buildkit {
 581            "".to_string()
 582        } else {
 583            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 584                .to_string()
 585        };
 586
 587        let builtin_env_source_path = if use_buildkit {
 588            "./devcontainer-features.builtin.env"
 589        } else {
 590            "/tmp/build-features/devcontainer-features.builtin.env"
 591        };
 592
 593        let mut extended_dockerfile = format!(
 594            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 595
 596{dockerfile_content}
 597{feature_content_source_stage}
 598FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 599USER root
 600COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 601RUN chmod -R 0755 /tmp/build-features/
 602
 603FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 604
 605USER root
 606
 607RUN mkdir -p {dest}
 608COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 609
 610RUN \
 611echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 612echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 613
 614{feature_layers}
 615
 616ARG _DEV_CONTAINERS_IMAGE_USER=root
 617USER $_DEV_CONTAINERS_IMAGE_USER
 618"#
 619        );
 620
 621        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 622        if !update_remote_user_uid {
 623            extended_dockerfile = format!(
 624                r#"{extended_dockerfile}
 625# Ensure that /etc/profile does not clobber the existing path
 626RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 627"#
 628            );
 629
 630            for feature in &self.features {
 631                let container_env_layer = feature.generate_dockerfile_env();
 632                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 633            }
 634
 635            if let Some(env) = &self.dev_container().container_env {
 636                for (key, value) in env {
 637                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 638                }
 639            }
 640        }
 641
 642        extended_dockerfile
 643    }
 644
 645    fn build_merged_resources(
 646        &self,
 647        base_image: DockerInspect,
 648    ) -> Result<DockerBuildResources, DevContainerError> {
 649        let dev_container = match &self.config {
 650            ConfigStatus::Deserialized(_) => {
 651                log::error!(
 652                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 653                );
 654                return Err(DevContainerError::DevContainerParseFailed);
 655            }
 656            ConfigStatus::VariableParsed(dev_container) => dev_container,
 657        };
 658        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 659
 660        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 661
 662        mounts.append(&mut feature_mounts);
 663
 664        let privileged = dev_container.privileged.unwrap_or(false)
 665            || self.features.iter().any(|f| f.privileged());
 666
 667        let mut entrypoint_script_lines = vec![
 668            "echo Container started".to_string(),
 669            "trap \"exit 0\" 15".to_string(),
 670        ];
 671
 672        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 673            entrypoint_script_lines.push(entrypoint.clone());
 674        }
 675        entrypoint_script_lines.append(&mut vec![
 676            "exec \"$@\"".to_string(),
 677            "while sleep 1 & wait $!; do :; done".to_string(),
 678        ]);
 679
 680        Ok(DockerBuildResources {
 681            image: base_image,
 682            additional_mounts: mounts,
 683            privileged,
 684            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 685        })
 686    }
 687
 688    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 689        if let ConfigStatus::Deserialized(_) = &self.config {
 690            log::error!(
 691                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 692            );
 693            return Err(DevContainerError::DevContainerParseFailed);
 694        }
 695        let dev_container = self.dev_container();
 696        match dev_container.build_type() {
 697            DevContainerBuildType::Image => {
 698                let built_docker_image = self.build_docker_image().await?;
 699                let Some(base_image) = dev_container.image.as_ref() else {
 700                    log::error!("Dev container is using and image which can't be referenced");
 701                    return Err(DevContainerError::DevContainerParseFailed);
 702                };
 703                let built_docker_image = self
 704                    .update_remote_user_uid(built_docker_image, base_image)
 705                    .await?;
 706
 707                let resources = self.build_merged_resources(built_docker_image)?;
 708                Ok(DevContainerBuildResources::Docker(resources))
 709            }
 710            DevContainerBuildType::Dockerfile => {
 711                let built_docker_image = self.build_docker_image().await?;
 712                let Some(features_build_info) = &self.features_build_info else {
 713                    log::error!(
 714                        "Can't attempt to build update UID dockerfile before initial docker build"
 715                    );
 716                    return Err(DevContainerError::DevContainerParseFailed);
 717                };
 718                let built_docker_image = self
 719                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 720                    .await?;
 721
 722                let resources = self.build_merged_resources(built_docker_image)?;
 723                Ok(DevContainerBuildResources::Docker(resources))
 724            }
 725            DevContainerBuildType::DockerCompose => {
 726                log::debug!("Using docker compose. Building extended compose files");
 727                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 728
 729                return Ok(DevContainerBuildResources::DockerCompose(
 730                    docker_compose_resources,
 731                ));
 732            }
 733            DevContainerBuildType::None => {
 734                return Err(DevContainerError::DevContainerParseFailed);
 735            }
 736        }
 737    }
 738
 739    async fn run_dev_container(
 740        &self,
 741        build_resources: DevContainerBuildResources,
 742    ) -> Result<DevContainerUp, DevContainerError> {
 743        let ConfigStatus::VariableParsed(_) = &self.config else {
 744            log::error!(
 745                "Variables have not been parsed; cannot proceed with running the dev container"
 746            );
 747            return Err(DevContainerError::DevContainerParseFailed);
 748        };
 749        let running_container = match build_resources {
 750            DevContainerBuildResources::DockerCompose(resources) => {
 751                self.run_docker_compose(resources).await?
 752            }
 753            DevContainerBuildResources::Docker(resources) => {
 754                self.run_docker_image(resources).await?
 755            }
 756        };
 757
 758        let remote_user = get_remote_user_from_config(&running_container, self)?;
 759        let remote_workspace_folder = get_remote_dir_from_config(
 760            &running_container,
 761            (&self.local_project_directory.display()).to_string(),
 762        )?;
 763
 764        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 765
 766        Ok(DevContainerUp {
 767            container_id: running_container.id,
 768            remote_user,
 769            remote_workspace_folder,
 770            extension_ids: self.extension_ids(),
 771            remote_env,
 772        })
 773    }
 774
 775    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 776        let dev_container = match &self.config {
 777            ConfigStatus::Deserialized(_) => {
 778                log::error!(
 779                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 780                );
 781                return Err(DevContainerError::DevContainerParseFailed);
 782            }
 783            ConfigStatus::VariableParsed(dev_container) => dev_container,
 784        };
 785        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 786            return Err(DevContainerError::DevContainerParseFailed);
 787        };
 788        let docker_compose_full_paths = docker_compose_files
 789            .iter()
 790            .map(|relative| self.config_directory.join(relative))
 791            .collect::<Vec<PathBuf>>();
 792
 793        let Some(config) = self
 794            .docker_client
 795            .get_docker_compose_config(&docker_compose_full_paths)
 796            .await?
 797        else {
 798            log::error!("Output could not deserialize into DockerComposeConfig");
 799            return Err(DevContainerError::DevContainerParseFailed);
 800        };
 801        Ok(DockerComposeResources {
 802            files: docker_compose_full_paths,
 803            config,
 804        })
 805    }
 806
 807    async fn build_and_extend_compose_files(
 808        &self,
 809    ) -> Result<DockerComposeResources, DevContainerError> {
 810        let dev_container = match &self.config {
 811            ConfigStatus::Deserialized(_) => {
 812                log::error!(
 813                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 814                );
 815                return Err(DevContainerError::DevContainerParseFailed);
 816            }
 817            ConfigStatus::VariableParsed(dev_container) => dev_container,
 818        };
 819
 820        let Some(features_build_info) = &self.features_build_info else {
 821            log::error!(
 822                "Cannot build and extend compose files: features build info is not yet constructed"
 823            );
 824            return Err(DevContainerError::DevContainerParseFailed);
 825        };
 826        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 827        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 828
 829        let (main_service_name, main_service) =
 830            find_primary_service(&docker_compose_resources, self)?;
 831        let (built_service_image, built_service_image_tag) = if main_service
 832            .build
 833            .as_ref()
 834            .map(|b| b.dockerfile.as_ref())
 835            .is_some()
 836        {
 837            if !supports_buildkit {
 838                self.build_feature_content_image().await?;
 839            }
 840
 841            let dockerfile_path = &features_build_info.dockerfile_path;
 842
 843            let build_args = if !supports_buildkit {
 844                HashMap::from([
 845                    (
 846                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 847                        "dev_container_auto_added_stage_label".to_string(),
 848                    ),
 849                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 850                ])
 851            } else {
 852                HashMap::from([
 853                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 854                    (
 855                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 856                        "dev_container_auto_added_stage_label".to_string(),
 857                    ),
 858                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 859                ])
 860            };
 861
 862            let additional_contexts = if !supports_buildkit {
 863                None
 864            } else {
 865                Some(HashMap::from([(
 866                    "dev_containers_feature_content_source".to_string(),
 867                    features_build_info
 868                        .features_content_dir
 869                        .display()
 870                        .to_string(),
 871                )]))
 872            };
 873
 874            let build_override = DockerComposeConfig {
 875                name: None,
 876                services: HashMap::from([(
 877                    main_service_name.clone(),
 878                    DockerComposeService {
 879                        image: Some(features_build_info.image_tag.clone()),
 880                        entrypoint: None,
 881                        cap_add: None,
 882                        security_opt: None,
 883                        labels: None,
 884                        build: Some(DockerComposeServiceBuild {
 885                            context: Some(
 886                                features_build_info.empty_context_dir.display().to_string(),
 887                            ),
 888                            dockerfile: Some(dockerfile_path.display().to_string()),
 889                            args: Some(build_args),
 890                            additional_contexts,
 891                        }),
 892                        volumes: Vec::new(),
 893                        ..Default::default()
 894                    },
 895                )]),
 896                volumes: HashMap::new(),
 897            };
 898
 899            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 900            let config_location = temp_base.join("docker_compose_build.json");
 901
 902            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 903                log::error!("Error serializing docker compose runtime override: {e}");
 904                DevContainerError::DevContainerParseFailed
 905            })?;
 906
 907            self.fs
 908                .write(&config_location, config_json.as_bytes())
 909                .await
 910                .map_err(|e| {
 911                    log::error!("Error writing the runtime override file: {e}");
 912                    DevContainerError::FilesystemError
 913                })?;
 914
 915            docker_compose_resources.files.push(config_location);
 916
 917            self.docker_client
 918                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 919                .await?;
 920            (
 921                self.docker_client
 922                    .inspect(&features_build_info.image_tag)
 923                    .await?,
 924                &features_build_info.image_tag,
 925            )
 926        } else if let Some(image) = &main_service.image {
 927            if dev_container
 928                .features
 929                .as_ref()
 930                .is_none_or(|features| features.is_empty())
 931            {
 932                (self.docker_client.inspect(image).await?, image)
 933            } else {
 934                if !supports_buildkit {
 935                    self.build_feature_content_image().await?;
 936                }
 937
 938                let dockerfile_path = &features_build_info.dockerfile_path;
 939
 940                let build_args = if !supports_buildkit {
 941                    HashMap::from([
 942                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 943                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 944                    ])
 945                } else {
 946                    HashMap::from([
 947                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 948                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 949                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 950                    ])
 951                };
 952
 953                let additional_contexts = if !supports_buildkit {
 954                    None
 955                } else {
 956                    Some(HashMap::from([(
 957                        "dev_containers_feature_content_source".to_string(),
 958                        features_build_info
 959                            .features_content_dir
 960                            .display()
 961                            .to_string(),
 962                    )]))
 963                };
 964
 965                let build_override = DockerComposeConfig {
 966                    name: None,
 967                    services: HashMap::from([(
 968                        main_service_name.clone(),
 969                        DockerComposeService {
 970                            image: Some(features_build_info.image_tag.clone()),
 971                            entrypoint: None,
 972                            cap_add: None,
 973                            security_opt: None,
 974                            labels: None,
 975                            build: Some(DockerComposeServiceBuild {
 976                                context: Some(
 977                                    features_build_info.empty_context_dir.display().to_string(),
 978                                ),
 979                                dockerfile: Some(dockerfile_path.display().to_string()),
 980                                args: Some(build_args),
 981                                additional_contexts,
 982                            }),
 983                            volumes: Vec::new(),
 984                            ..Default::default()
 985                        },
 986                    )]),
 987                    volumes: HashMap::new(),
 988                };
 989
 990                let temp_base = std::env::temp_dir().join("devcontainer-zed");
 991                let config_location = temp_base.join("docker_compose_build.json");
 992
 993                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 994                    log::error!("Error serializing docker compose runtime override: {e}");
 995                    DevContainerError::DevContainerParseFailed
 996                })?;
 997
 998                self.fs
 999                    .write(&config_location, config_json.as_bytes())
1000                    .await
1001                    .map_err(|e| {
1002                        log::error!("Error writing the runtime override file: {e}");
1003                        DevContainerError::FilesystemError
1004                    })?;
1005
1006                docker_compose_resources.files.push(config_location);
1007
1008                self.docker_client
1009                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1010                    .await?;
1011
1012                (
1013                    self.docker_client
1014                        .inspect(&features_build_info.image_tag)
1015                        .await?,
1016                    &features_build_info.image_tag,
1017                )
1018            }
1019        } else {
1020            log::error!("Docker compose must have either image or dockerfile defined");
1021            return Err(DevContainerError::DevContainerParseFailed);
1022        };
1023
1024        let built_service_image = self
1025            .update_remote_user_uid(built_service_image, built_service_image_tag)
1026            .await?;
1027
1028        let resources = self.build_merged_resources(built_service_image)?;
1029
1030        let network_mode = main_service.network_mode.as_ref();
1031        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1032        let runtime_override_file = self
1033            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1034            .await?;
1035
1036        docker_compose_resources.files.push(runtime_override_file);
1037
1038        Ok(docker_compose_resources)
1039    }
1040
1041    async fn write_runtime_override_file(
1042        &self,
1043        main_service_name: &str,
1044        network_mode_service: Option<&str>,
1045        resources: DockerBuildResources,
1046    ) -> Result<PathBuf, DevContainerError> {
1047        let config =
1048            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1049        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1050        let config_location = temp_base.join("docker_compose_runtime.json");
1051
1052        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1053            log::error!("Error serializing docker compose runtime override: {e}");
1054            DevContainerError::DevContainerParseFailed
1055        })?;
1056
1057        self.fs
1058            .write(&config_location, config_json.as_bytes())
1059            .await
1060            .map_err(|e| {
1061                log::error!("Error writing the runtime override file: {e}");
1062                DevContainerError::FilesystemError
1063            })?;
1064
1065        Ok(config_location)
1066    }
1067
1068    fn build_runtime_override(
1069        &self,
1070        main_service_name: &str,
1071        network_mode_service: Option<&str>,
1072        resources: DockerBuildResources,
1073    ) -> Result<DockerComposeConfig, DevContainerError> {
1074        let mut runtime_labels = HashMap::new();
1075
1076        if let Some(metadata) = &resources.image.config.labels.metadata {
1077            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1078                log::error!("Error serializing docker image metadata: {e}");
1079                DevContainerError::ContainerNotValid(resources.image.id.clone())
1080            })?;
1081
1082            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1083        }
1084
1085        for (k, v) in self.identifying_labels() {
1086            runtime_labels.insert(k.to_string(), v.to_string());
1087        }
1088
1089        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1090            .additional_mounts
1091            .iter()
1092            .filter_map(|mount| {
1093                if let Some(mount_type) = &mount.mount_type
1094                    && mount_type.to_lowercase() == "volume"
1095                {
1096                    Some((
1097                        mount.source.clone(),
1098                        DockerComposeVolume {
1099                            name: mount.source.clone(),
1100                        },
1101                    ))
1102                } else {
1103                    None
1104                }
1105            })
1106            .collect();
1107
1108        let volumes: Vec<MountDefinition> = resources
1109            .additional_mounts
1110            .iter()
1111            .map(|v| MountDefinition {
1112                source: v.source.clone(),
1113                target: v.target.clone(),
1114                mount_type: v.mount_type.clone(),
1115            })
1116            .collect();
1117
1118        let mut main_service = DockerComposeService {
1119            entrypoint: Some(vec![
1120                "/bin/sh".to_string(),
1121                "-c".to_string(),
1122                resources.entrypoint_script,
1123                "-".to_string(),
1124            ]),
1125            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1126            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1127            labels: Some(runtime_labels),
1128            volumes,
1129            privileged: Some(resources.privileged),
1130            ..Default::default()
1131        };
1132        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1133        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1134        if let Some(forward_ports) = &self.dev_container().forward_ports {
1135            let main_service_ports: Vec<String> = forward_ports
1136                .iter()
1137                .filter_map(|f| match f {
1138                    ForwardPort::Number(port) => Some(port.to_string()),
1139                    ForwardPort::String(port) => {
1140                        let parts: Vec<&str> = port.split(":").collect();
1141                        if parts.len() <= 1 {
1142                            Some(port.to_string())
1143                        } else if parts.len() == 2 {
1144                            if parts[0] == main_service_name {
1145                                Some(parts[1].to_string())
1146                            } else {
1147                                None
1148                            }
1149                        } else {
1150                            None
1151                        }
1152                    }
1153                })
1154                .collect();
1155            for port in main_service_ports {
1156                // If the main service uses a different service's network bridge, append to that service's ports instead
1157                if let Some(network_service_name) = network_mode_service {
1158                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1159                        service.ports.push(DockerComposeServicePort {
1160                            target: port.clone(),
1161                            published: port.clone(),
1162                            ..Default::default()
1163                        });
1164                    } else {
1165                        service_declarations.insert(
1166                            network_service_name.to_string(),
1167                            DockerComposeService {
1168                                ports: vec![DockerComposeServicePort {
1169                                    target: port.clone(),
1170                                    published: port.clone(),
1171                                    ..Default::default()
1172                                }],
1173                                ..Default::default()
1174                            },
1175                        );
1176                    }
1177                } else {
1178                    main_service.ports.push(DockerComposeServicePort {
1179                        target: port.clone(),
1180                        published: port.clone(),
1181                        ..Default::default()
1182                    });
1183                }
1184            }
1185            let other_service_ports: Vec<(&str, &str)> = forward_ports
1186                .iter()
1187                .filter_map(|f| match f {
1188                    ForwardPort::Number(_) => None,
1189                    ForwardPort::String(port) => {
1190                        let parts: Vec<&str> = port.split(":").collect();
1191                        if parts.len() != 2 {
1192                            None
1193                        } else {
1194                            if parts[0] == main_service_name {
1195                                None
1196                            } else {
1197                                Some((parts[0], parts[1]))
1198                            }
1199                        }
1200                    }
1201                })
1202                .collect();
1203            for (service_name, port) in other_service_ports {
1204                if let Some(service) = service_declarations.get_mut(service_name) {
1205                    service.ports.push(DockerComposeServicePort {
1206                        target: port.to_string(),
1207                        published: port.to_string(),
1208                        ..Default::default()
1209                    });
1210                } else {
1211                    service_declarations.insert(
1212                        service_name.to_string(),
1213                        DockerComposeService {
1214                            ports: vec![DockerComposeServicePort {
1215                                target: port.to_string(),
1216                                published: port.to_string(),
1217                                ..Default::default()
1218                            }],
1219                            ..Default::default()
1220                        },
1221                    );
1222                }
1223            }
1224        }
1225        if let Some(port) = &self.dev_container().app_port {
1226            if let Some(network_service_name) = network_mode_service {
1227                if let Some(service) = service_declarations.get_mut(network_service_name) {
1228                    service.ports.push(DockerComposeServicePort {
1229                        target: port.clone(),
1230                        published: port.clone(),
1231                        ..Default::default()
1232                    });
1233                } else {
1234                    service_declarations.insert(
1235                        network_service_name.to_string(),
1236                        DockerComposeService {
1237                            ports: vec![DockerComposeServicePort {
1238                                target: port.clone(),
1239                                published: port.clone(),
1240                                ..Default::default()
1241                            }],
1242                            ..Default::default()
1243                        },
1244                    );
1245                }
1246            } else {
1247                main_service.ports.push(DockerComposeServicePort {
1248                    target: port.clone(),
1249                    published: port.clone(),
1250                    ..Default::default()
1251                });
1252            }
1253        }
1254
1255        service_declarations.insert(main_service_name.to_string(), main_service);
1256        let new_docker_compose_config = DockerComposeConfig {
1257            name: None,
1258            services: service_declarations,
1259            volumes: config_volumes,
1260        };
1261
1262        Ok(new_docker_compose_config)
1263    }
1264
1265    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1266        let dev_container = match &self.config {
1267            ConfigStatus::Deserialized(_) => {
1268                log::error!(
1269                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1270                );
1271                return Err(DevContainerError::DevContainerParseFailed);
1272            }
1273            ConfigStatus::VariableParsed(dev_container) => dev_container,
1274        };
1275
1276        match dev_container.build_type() {
1277            DevContainerBuildType::Image => {
1278                let Some(image_tag) = &dev_container.image else {
1279                    return Err(DevContainerError::DevContainerParseFailed);
1280                };
1281                let base_image = self.docker_client.inspect(image_tag).await?;
1282                if dev_container
1283                    .features
1284                    .as_ref()
1285                    .is_none_or(|features| features.is_empty())
1286                {
1287                    log::debug!("No features to add. Using base image");
1288                    return Ok(base_image);
1289                }
1290            }
1291            DevContainerBuildType::Dockerfile => {}
1292            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1293                return Err(DevContainerError::DevContainerParseFailed);
1294            }
1295        };
1296
1297        let mut command = self.create_docker_build()?;
1298
1299        let output = self
1300            .command_runner
1301            .run_command(&mut command)
1302            .await
1303            .map_err(|e| {
1304                log::error!("Error building docker image: {e}");
1305                DevContainerError::CommandFailed(command.get_program().display().to_string())
1306            })?;
1307
1308        if !output.status.success() {
1309            let stderr = String::from_utf8_lossy(&output.stderr);
1310            log::error!("docker buildx build failed: {stderr}");
1311            return Err(DevContainerError::CommandFailed(
1312                command.get_program().display().to_string(),
1313            ));
1314        }
1315
1316        // After a successful build, inspect the newly tagged image to get its metadata
1317        let Some(features_build_info) = &self.features_build_info else {
1318            log::error!("Features build info expected, but not created");
1319            return Err(DevContainerError::DevContainerParseFailed);
1320        };
1321        let image = self
1322            .docker_client
1323            .inspect(&features_build_info.image_tag)
1324            .await?;
1325
1326        Ok(image)
1327    }
1328
1329    #[cfg(target_os = "windows")]
1330    async fn update_remote_user_uid(
1331        &self,
1332        image: DockerInspect,
1333        _base_image: &str,
1334    ) -> Result<DockerInspect, DevContainerError> {
1335        Ok(image)
1336    }
1337    #[cfg(not(target_os = "windows"))]
1338    async fn update_remote_user_uid(
1339        &self,
1340        image: DockerInspect,
1341        base_image: &str,
1342    ) -> Result<DockerInspect, DevContainerError> {
1343        let dev_container = self.dev_container();
1344
1345        let Some(features_build_info) = &self.features_build_info else {
1346            return Ok(image);
1347        };
1348
1349        // updateRemoteUserUID defaults to true per the devcontainers spec
1350        if dev_container.update_remote_user_uid == Some(false) {
1351            return Ok(image);
1352        }
1353
1354        let remote_user = get_remote_user_from_config(&image, self)?;
1355        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1356            return Ok(image);
1357        }
1358
1359        let image_user = image
1360            .config
1361            .image_user
1362            .as_deref()
1363            .unwrap_or("root")
1364            .to_string();
1365
1366        let host_uid = Command::new("id")
1367            .arg("-u")
1368            .output()
1369            .await
1370            .map_err(|e| {
1371                log::error!("Failed to get host UID: {e}");
1372                DevContainerError::CommandFailed("id -u".to_string())
1373            })
1374            .and_then(|output| {
1375                String::from_utf8_lossy(&output.stdout)
1376                    .trim()
1377                    .parse::<u32>()
1378                    .map_err(|e| {
1379                        log::error!("Failed to parse host UID: {e}");
1380                        DevContainerError::CommandFailed("id -u".to_string())
1381                    })
1382            })?;
1383
1384        let host_gid = Command::new("id")
1385            .arg("-g")
1386            .output()
1387            .await
1388            .map_err(|e| {
1389                log::error!("Failed to get host GID: {e}");
1390                DevContainerError::CommandFailed("id -g".to_string())
1391            })
1392            .and_then(|output| {
1393                String::from_utf8_lossy(&output.stdout)
1394                    .trim()
1395                    .parse::<u32>()
1396                    .map_err(|e| {
1397                        log::error!("Failed to parse host GID: {e}");
1398                        DevContainerError::CommandFailed("id -g".to_string())
1399                    })
1400            })?;
1401
1402        let dockerfile_content = self.generate_update_uid_dockerfile();
1403
1404        let dockerfile_path = features_build_info
1405            .features_content_dir
1406            .join("updateUID.Dockerfile");
1407        self.fs
1408            .write(&dockerfile_path, dockerfile_content.as_bytes())
1409            .await
1410            .map_err(|e| {
1411                log::error!("Failed to write updateUID Dockerfile: {e}");
1412                DevContainerError::FilesystemError
1413            })?;
1414
1415        let updated_image_tag = format!("{}-uid", features_build_info.image_tag);
1416
1417        let mut command = Command::new(self.docker_client.docker_cli());
1418        command.args(["build"]);
1419        command.args(["-f", &dockerfile_path.display().to_string()]);
1420        command.args(["-t", &updated_image_tag]);
1421        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1422        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1423        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1424        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1425        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1426        command.arg(features_build_info.empty_context_dir.display().to_string());
1427
1428        let output = self
1429            .command_runner
1430            .run_command(&mut command)
1431            .await
1432            .map_err(|e| {
1433                log::error!("Error building UID update image: {e}");
1434                DevContainerError::CommandFailed(command.get_program().display().to_string())
1435            })?;
1436
1437        if !output.status.success() {
1438            let stderr = String::from_utf8_lossy(&output.stderr);
1439            log::error!("UID update build failed: {stderr}");
1440            return Err(DevContainerError::CommandFailed(
1441                command.get_program().display().to_string(),
1442            ));
1443        }
1444
1445        self.docker_client.inspect(&updated_image_tag).await
1446    }
1447
1448    #[cfg(not(target_os = "windows"))]
1449    fn generate_update_uid_dockerfile(&self) -> String {
1450        let mut dockerfile = r#"ARG BASE_IMAGE
1451FROM $BASE_IMAGE
1452
1453USER root
1454
1455ARG REMOTE_USER
1456ARG NEW_UID
1457ARG NEW_GID
1458SHELL ["/bin/sh", "-c"]
1459RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1460	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1461	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1462	if [ -z "$OLD_UID" ]; then \
1463		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1464	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1465		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1466	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1467		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1468	else \
1469		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1470			FREE_GID=65532; \
1471			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1472			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1473			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1474		fi; \
1475		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1476		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1477		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1478			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1479		fi; \
1480		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1481	fi;
1482
1483ARG IMAGE_USER
1484USER $IMAGE_USER
1485
1486# Ensure that /etc/profile does not clobber the existing path
1487RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1488"#.to_string();
1489        for feature in &self.features {
1490            let container_env_layer = feature.generate_dockerfile_env();
1491            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1492        }
1493
1494        if let Some(env) = &self.dev_container().container_env {
1495            for (key, value) in env {
1496                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1497            }
1498        }
1499        dockerfile
1500    }
1501
1502    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1503        let Some(features_build_info) = &self.features_build_info else {
1504            log::error!("Features build info not available for building feature content image");
1505            return Err(DevContainerError::DevContainerParseFailed);
1506        };
1507        let features_content_dir = &features_build_info.features_content_dir;
1508
1509        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1510        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1511
1512        self.fs
1513            .write(&dockerfile_path, dockerfile_content.as_bytes())
1514            .await
1515            .map_err(|e| {
1516                log::error!("Failed to write feature content Dockerfile: {e}");
1517                DevContainerError::FilesystemError
1518            })?;
1519
1520        let mut command = Command::new(self.docker_client.docker_cli());
1521        command.args([
1522            "build",
1523            "-t",
1524            "dev_container_feature_content_temp",
1525            "-f",
1526            &dockerfile_path.display().to_string(),
1527            &features_content_dir.display().to_string(),
1528        ]);
1529
1530        let output = self
1531            .command_runner
1532            .run_command(&mut command)
1533            .await
1534            .map_err(|e| {
1535                log::error!("Error building feature content image: {e}");
1536                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1537            })?;
1538
1539        if !output.status.success() {
1540            let stderr = String::from_utf8_lossy(&output.stderr);
1541            log::error!("Feature content image build failed: {stderr}");
1542            return Err(DevContainerError::CommandFailed(
1543                self.docker_client.docker_cli(),
1544            ));
1545        }
1546
1547        Ok(())
1548    }
1549
1550    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1551        let dev_container = match &self.config {
1552            ConfigStatus::Deserialized(_) => {
1553                log::error!(
1554                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1555                );
1556                return Err(DevContainerError::DevContainerParseFailed);
1557            }
1558            ConfigStatus::VariableParsed(dev_container) => dev_container,
1559        };
1560
1561        let Some(features_build_info) = &self.features_build_info else {
1562            log::error!(
1563                "Cannot create docker build command; features build info has not been constructed"
1564            );
1565            return Err(DevContainerError::DevContainerParseFailed);
1566        };
1567        let mut command = Command::new(self.docker_client.docker_cli());
1568
1569        command.args(["buildx", "build"]);
1570
1571        // --load is short for --output=docker, loading the built image into the local docker images
1572        command.arg("--load");
1573
1574        // BuildKit build context: provides the features content directory as a named context
1575        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1576        command.args([
1577            "--build-context",
1578            &format!(
1579                "dev_containers_feature_content_source={}",
1580                features_build_info.features_content_dir.display()
1581            ),
1582        ]);
1583
1584        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1585        if let Some(build_image) = &features_build_info.build_image {
1586            command.args([
1587                "--build-arg",
1588                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1589            ]);
1590        } else {
1591            command.args([
1592                "--build-arg",
1593                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1594            ]);
1595        }
1596
1597        command.args([
1598            "--build-arg",
1599            &format!(
1600                "_DEV_CONTAINERS_IMAGE_USER={}",
1601                self.root_image
1602                    .as_ref()
1603                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1604                    .unwrap_or(&"root".to_string())
1605            ),
1606        ]);
1607
1608        command.args([
1609            "--build-arg",
1610            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1611        ]);
1612
1613        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1614            for (key, value) in args {
1615                command.args(["--build-arg", &format!("{}={}", key, value)]);
1616            }
1617        }
1618
1619        command.args(["--target", "dev_containers_target_stage"]);
1620
1621        command.args([
1622            "-f",
1623            &features_build_info.dockerfile_path.display().to_string(),
1624        ]);
1625
1626        command.args(["-t", &features_build_info.image_tag]);
1627
1628        if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1629            command.arg(self.config_directory.display().to_string());
1630        } else {
1631            // Use an empty folder as the build context to avoid pulling in unneeded files.
1632            // The actual feature content is supplied via the BuildKit build context above.
1633            command.arg(features_build_info.empty_context_dir.display().to_string());
1634        }
1635
1636        Ok(command)
1637    }
1638
1639    async fn run_docker_compose(
1640        &self,
1641        resources: DockerComposeResources,
1642    ) -> Result<DockerInspect, DevContainerError> {
1643        let mut command = Command::new(self.docker_client.docker_cli());
1644        command.args(&["compose", "--project-name", &self.project_name()]);
1645        for docker_compose_file in resources.files {
1646            command.args(&["-f", &docker_compose_file.display().to_string()]);
1647        }
1648        command.args(&["up", "-d"]);
1649
1650        let output = self
1651            .command_runner
1652            .run_command(&mut command)
1653            .await
1654            .map_err(|e| {
1655                log::error!("Error running docker compose up: {e}");
1656                DevContainerError::CommandFailed(command.get_program().display().to_string())
1657            })?;
1658
1659        if !output.status.success() {
1660            let stderr = String::from_utf8_lossy(&output.stderr);
1661            log::error!("Non-success status from docker compose up: {}", stderr);
1662            return Err(DevContainerError::CommandFailed(
1663                command.get_program().display().to_string(),
1664            ));
1665        }
1666
1667        if let Some(docker_ps) = self.check_for_existing_container().await? {
1668            log::debug!("Found newly created dev container");
1669            return self.docker_client.inspect(&docker_ps.id).await;
1670        }
1671
1672        log::error!("Could not find existing container after docker compose up");
1673
1674        Err(DevContainerError::DevContainerParseFailed)
1675    }
1676
1677    async fn run_docker_image(
1678        &self,
1679        build_resources: DockerBuildResources,
1680    ) -> Result<DockerInspect, DevContainerError> {
1681        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1682
1683        let output = self
1684            .command_runner
1685            .run_command(&mut docker_run_command)
1686            .await
1687            .map_err(|e| {
1688                log::error!("Error running docker run: {e}");
1689                DevContainerError::CommandFailed(
1690                    docker_run_command.get_program().display().to_string(),
1691                )
1692            })?;
1693
1694        if !output.status.success() {
1695            let std_err = String::from_utf8_lossy(&output.stderr);
1696            log::error!("Non-success status from docker run. StdErr: {std_err}");
1697            return Err(DevContainerError::CommandFailed(
1698                docker_run_command.get_program().display().to_string(),
1699            ));
1700        }
1701
1702        log::debug!("Checking for container that was started");
1703        let Some(docker_ps) = self.check_for_existing_container().await? else {
1704            log::error!("Could not locate container just created");
1705            return Err(DevContainerError::DevContainerParseFailed);
1706        };
1707        self.docker_client.inspect(&docker_ps.id).await
1708    }
1709
1710    fn local_workspace_folder(&self) -> String {
1711        self.local_project_directory.display().to_string()
1712    }
1713    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1714        self.local_project_directory
1715            .file_name()
1716            .map(|f| f.display().to_string())
1717            .ok_or(DevContainerError::DevContainerParseFailed)
1718    }
1719
1720    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1721        self.dev_container()
1722            .workspace_folder
1723            .as_ref()
1724            .map(|folder| PathBuf::from(folder))
1725            .or(Some(
1726                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1727            ))
1728            .ok_or(DevContainerError::DevContainerParseFailed)
1729    }
1730    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1731        self.remote_workspace_folder().and_then(|f| {
1732            f.file_name()
1733                .map(|file_name| file_name.display().to_string())
1734                .ok_or(DevContainerError::DevContainerParseFailed)
1735        })
1736    }
1737
1738    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1739        if let Some(mount) = &self.dev_container().workspace_mount {
1740            return Ok(mount.clone());
1741        }
1742        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1743            return Err(DevContainerError::DevContainerParseFailed);
1744        };
1745
1746        Ok(MountDefinition {
1747            source: self.local_workspace_folder(),
1748            target: format!("/workspaces/{}", project_directory_name.display()),
1749            mount_type: None,
1750        })
1751    }
1752
1753    fn create_docker_run_command(
1754        &self,
1755        build_resources: DockerBuildResources,
1756    ) -> Result<Command, DevContainerError> {
1757        let remote_workspace_mount = self.remote_workspace_mount()?;
1758
1759        let docker_cli = self.docker_client.docker_cli();
1760        let mut command = Command::new(&docker_cli);
1761
1762        command.arg("run");
1763
1764        if build_resources.privileged {
1765            command.arg("--privileged");
1766        }
1767
1768        if &docker_cli == "podman" {
1769            command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1770        }
1771
1772        command.arg("--sig-proxy=false");
1773        command.arg("-d");
1774        command.arg("--mount");
1775        command.arg(remote_workspace_mount.to_string());
1776
1777        for mount in &build_resources.additional_mounts {
1778            command.arg("--mount");
1779            command.arg(mount.to_string());
1780        }
1781
1782        for (key, val) in self.identifying_labels() {
1783            command.arg("-l");
1784            command.arg(format!("{}={}", key, val));
1785        }
1786
1787        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1788            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1789                log::error!("Problem serializing image metadata: {e}");
1790                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1791            })?;
1792            command.arg("-l");
1793            command.arg(format!(
1794                "{}={}",
1795                "devcontainer.metadata", serialized_metadata
1796            ));
1797        }
1798
1799        if let Some(forward_ports) = &self.dev_container().forward_ports {
1800            for port in forward_ports {
1801                if let ForwardPort::Number(port_number) = port {
1802                    command.arg("-p");
1803                    command.arg(format!("{port_number}:{port_number}"));
1804                }
1805            }
1806        }
1807        if let Some(app_port) = &self.dev_container().app_port {
1808            command.arg("-p");
1809            command.arg(format!("{app_port}:{app_port}"));
1810        }
1811
1812        command.arg("--entrypoint");
1813        command.arg("/bin/sh");
1814        command.arg(&build_resources.image.id);
1815        command.arg("-c");
1816
1817        command.arg(build_resources.entrypoint_script);
1818        command.arg("-");
1819
1820        Ok(command)
1821    }
1822
1823    fn extension_ids(&self) -> Vec<String> {
1824        self.dev_container()
1825            .customizations
1826            .as_ref()
1827            .map(|c| c.zed.extensions.clone())
1828            .unwrap_or_default()
1829    }
1830
1831    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1832        self.run_initialize_commands().await?;
1833
1834        self.download_feature_and_dockerfile_resources().await?;
1835
1836        let build_resources = self.build_resources().await?;
1837
1838        let devcontainer_up = self.run_dev_container(build_resources).await?;
1839
1840        self.run_remote_scripts(&devcontainer_up, true).await?;
1841
1842        Ok(devcontainer_up)
1843    }
1844
1845    async fn run_remote_scripts(
1846        &self,
1847        devcontainer_up: &DevContainerUp,
1848        new_container: bool,
1849    ) -> Result<(), DevContainerError> {
1850        let ConfigStatus::VariableParsed(config) = &self.config else {
1851            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1852            return Err(DevContainerError::DevContainerScriptsFailed);
1853        };
1854        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1855
1856        if new_container {
1857            if let Some(on_create_command) = &config.on_create_command {
1858                for (command_name, command) in on_create_command.script_commands() {
1859                    log::debug!("Running on create command {command_name}");
1860                    self.docker_client
1861                        .run_docker_exec(
1862                            &devcontainer_up.container_id,
1863                            &remote_folder,
1864                            "root",
1865                            &devcontainer_up.remote_env,
1866                            command,
1867                        )
1868                        .await?;
1869                }
1870            }
1871            if let Some(update_content_command) = &config.update_content_command {
1872                for (command_name, command) in update_content_command.script_commands() {
1873                    log::debug!("Running update content command {command_name}");
1874                    self.docker_client
1875                        .run_docker_exec(
1876                            &devcontainer_up.container_id,
1877                            &remote_folder,
1878                            "root",
1879                            &devcontainer_up.remote_env,
1880                            command,
1881                        )
1882                        .await?;
1883                }
1884            }
1885
1886            if let Some(post_create_command) = &config.post_create_command {
1887                for (command_name, command) in post_create_command.script_commands() {
1888                    log::debug!("Running post create command {command_name}");
1889                    self.docker_client
1890                        .run_docker_exec(
1891                            &devcontainer_up.container_id,
1892                            &remote_folder,
1893                            &devcontainer_up.remote_user,
1894                            &devcontainer_up.remote_env,
1895                            command,
1896                        )
1897                        .await?;
1898                }
1899            }
1900            if let Some(post_start_command) = &config.post_start_command {
1901                for (command_name, command) in post_start_command.script_commands() {
1902                    log::debug!("Running post start command {command_name}");
1903                    self.docker_client
1904                        .run_docker_exec(
1905                            &devcontainer_up.container_id,
1906                            &remote_folder,
1907                            &devcontainer_up.remote_user,
1908                            &devcontainer_up.remote_env,
1909                            command,
1910                        )
1911                        .await?;
1912                }
1913            }
1914        }
1915        if let Some(post_attach_command) = &config.post_attach_command {
1916            for (command_name, command) in post_attach_command.script_commands() {
1917                log::debug!("Running post attach command {command_name}");
1918                self.docker_client
1919                    .run_docker_exec(
1920                        &devcontainer_up.container_id,
1921                        &remote_folder,
1922                        &devcontainer_up.remote_user,
1923                        &devcontainer_up.remote_env,
1924                        command,
1925                    )
1926                    .await?;
1927            }
1928        }
1929
1930        Ok(())
1931    }
1932
1933    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1934        let ConfigStatus::VariableParsed(config) = &self.config else {
1935            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1936            return Err(DevContainerError::DevContainerParseFailed);
1937        };
1938
1939        if let Some(initialize_command) = &config.initialize_command {
1940            log::debug!("Running initialize command");
1941            initialize_command
1942                .run(&self.command_runner, &self.local_project_directory)
1943                .await
1944        } else {
1945            log::warn!("No initialize command found");
1946            Ok(())
1947        }
1948    }
1949
1950    async fn check_for_existing_devcontainer(
1951        &self,
1952    ) -> Result<Option<DevContainerUp>, DevContainerError> {
1953        if let Some(docker_ps) = self.check_for_existing_container().await? {
1954            log::debug!("Dev container already found. Proceeding with it");
1955
1956            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1957
1958            if !docker_inspect.is_running() {
1959                log::debug!("Container not running. Will attempt to start, and then proceed");
1960                self.docker_client.start_container(&docker_ps.id).await?;
1961            }
1962
1963            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1964
1965            let remote_folder = get_remote_dir_from_config(
1966                &docker_inspect,
1967                (&self.local_project_directory.display()).to_string(),
1968            )?;
1969
1970            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1971
1972            let dev_container_up = DevContainerUp {
1973                container_id: docker_ps.id,
1974                remote_user: remote_user,
1975                remote_workspace_folder: remote_folder,
1976                extension_ids: self.extension_ids(),
1977                remote_env,
1978            };
1979
1980            self.run_remote_scripts(&dev_container_up, false).await?;
1981
1982            Ok(Some(dev_container_up))
1983        } else {
1984            log::debug!("Existing container not found.");
1985
1986            Ok(None)
1987        }
1988    }
1989
1990    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1991        self.docker_client
1992            .find_process_by_filters(
1993                self.identifying_labels()
1994                    .iter()
1995                    .map(|(k, v)| format!("label={k}={v}"))
1996                    .collect(),
1997            )
1998            .await
1999    }
2000
2001    fn project_name(&self) -> String {
2002        if let Some(name) = &self.dev_container().name {
2003            safe_id_lower(name)
2004        } else {
2005            let alternate_name = &self
2006                .local_workspace_base_name()
2007                .unwrap_or(self.local_workspace_folder());
2008            safe_id_lower(alternate_name)
2009        }
2010    }
2011}
2012
2013/// Holds all the information needed to construct a `docker buildx build` command
2014/// that extends a base image with dev container features.
2015///
2016/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2017/// (cli/src/spec-node/containerFeatures.ts).
2018#[derive(Debug, Eq, PartialEq)]
2019pub(crate) struct FeaturesBuildInfo {
2020    /// Path to the generated Dockerfile.extended
2021    pub dockerfile_path: PathBuf,
2022    /// Path to the features content directory (used as a BuildKit build context)
2023    pub features_content_dir: PathBuf,
2024    /// Path to an empty directory used as the Docker build context
2025    pub empty_context_dir: PathBuf,
2026    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2027    pub build_image: Option<String>,
2028    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2029    pub image_tag: String,
2030}
2031
2032pub(crate) async fn read_devcontainer_configuration(
2033    config: DevContainerConfig,
2034    context: &DevContainerContext,
2035    environment: HashMap<String, String>,
2036) -> Result<DevContainer, DevContainerError> {
2037    let docker = if context.use_podman {
2038        Docker::new("podman")
2039    } else {
2040        Docker::new("docker")
2041    };
2042    let mut dev_container = DevContainerManifest::new(
2043        context,
2044        environment,
2045        Arc::new(docker),
2046        Arc::new(DefaultCommandRunner::new()),
2047        config,
2048        &context.project_directory.as_ref(),
2049    )
2050    .await?;
2051    dev_container.parse_nonremote_vars()?;
2052    Ok(dev_container.dev_container().clone())
2053}
2054
2055pub(crate) async fn spawn_dev_container(
2056    context: &DevContainerContext,
2057    environment: HashMap<String, String>,
2058    config: DevContainerConfig,
2059    local_project_path: &Path,
2060) -> Result<DevContainerUp, DevContainerError> {
2061    let docker = if context.use_podman {
2062        Docker::new("podman")
2063    } else {
2064        Docker::new("docker")
2065    };
2066    let mut devcontainer_manifest = DevContainerManifest::new(
2067        context,
2068        environment,
2069        Arc::new(docker),
2070        Arc::new(DefaultCommandRunner::new()),
2071        config,
2072        local_project_path,
2073    )
2074    .await?;
2075
2076    devcontainer_manifest.parse_nonremote_vars()?;
2077
2078    log::debug!("Checking for existing container");
2079    if let Some(devcontainer) = devcontainer_manifest
2080        .check_for_existing_devcontainer()
2081        .await?
2082    {
2083        Ok(devcontainer)
2084    } else {
2085        log::debug!("Existing container not found. Building");
2086
2087        devcontainer_manifest.build_and_run().await
2088    }
2089}
2090
2091#[derive(Debug)]
2092struct DockerBuildResources {
2093    image: DockerInspect,
2094    additional_mounts: Vec<MountDefinition>,
2095    privileged: bool,
2096    entrypoint_script: String,
2097}
2098
2099#[derive(Debug)]
2100enum DevContainerBuildResources {
2101    DockerCompose(DockerComposeResources),
2102    Docker(DockerBuildResources),
2103}
2104
2105fn find_primary_service(
2106    docker_compose: &DockerComposeResources,
2107    devcontainer: &DevContainerManifest,
2108) -> Result<(String, DockerComposeService), DevContainerError> {
2109    let Some(service_name) = &devcontainer.dev_container().service else {
2110        return Err(DevContainerError::DevContainerParseFailed);
2111    };
2112
2113    match docker_compose.config.services.get(service_name) {
2114        Some(service) => Ok((service_name.clone(), service.clone())),
2115        None => Err(DevContainerError::DevContainerParseFailed),
2116    }
2117}
2118
2119/// Destination folder inside the container where feature content is staged during build.
2120/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2121const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2122
2123/// Escapes regex special characters in a string.
2124fn escape_regex_chars(input: &str) -> String {
2125    let mut result = String::with_capacity(input.len() * 2);
2126    for c in input.chars() {
2127        if ".*+?^${}()|[]\\".contains(c) {
2128            result.push('\\');
2129        }
2130        result.push(c);
2131    }
2132    result
2133}
2134
2135/// Extracts the short feature ID from a full feature reference string.
2136///
2137/// Examples:
2138/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2139/// - `ghcr.io/user/repo/go` → `go`
2140/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2141/// - `./myFeature` → `myFeature`
2142fn extract_feature_id(feature_ref: &str) -> &str {
2143    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2144        &feature_ref[..at_idx]
2145    } else {
2146        let last_slash = feature_ref.rfind('/');
2147        let last_colon = feature_ref.rfind(':');
2148        match (last_slash, last_colon) {
2149            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2150            _ => feature_ref,
2151        }
2152    };
2153    match without_version.rfind('/') {
2154        Some(idx) => &without_version[idx + 1..],
2155        None => without_version,
2156    }
2157}
2158
2159/// Generates a shell command that looks up a user's passwd entry.
2160///
2161/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2162/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2163fn get_ent_passwd_shell_command(user: &str) -> String {
2164    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2165    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2166    format!(
2167        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2168        shell = escaped_for_shell,
2169        re = escaped_for_regex,
2170    )
2171}
2172
2173/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2174///
2175/// Features listed in the override come first (in the specified order), followed
2176/// by any remaining features sorted lexicographically by their full reference ID.
2177fn resolve_feature_order<'a>(
2178    features: &'a HashMap<String, FeatureOptions>,
2179    override_order: &Option<Vec<String>>,
2180) -> Vec<(&'a String, &'a FeatureOptions)> {
2181    if let Some(order) = override_order {
2182        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2183        for ordered_id in order {
2184            if let Some((key, options)) = features.get_key_value(ordered_id) {
2185                ordered.push((key, options));
2186            }
2187        }
2188        let mut remaining: Vec<_> = features
2189            .iter()
2190            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2191            .collect();
2192        remaining.sort_by_key(|(id, _)| id.as_str());
2193        ordered.extend(remaining);
2194        ordered
2195    } else {
2196        let mut entries: Vec<_> = features.iter().collect();
2197        entries.sort_by_key(|(id, _)| id.as_str());
2198        entries
2199    }
2200}
2201
2202/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2203///
2204/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2205/// `containerFeaturesConfiguration.ts`.
2206fn generate_install_wrapper(
2207    feature_ref: &str,
2208    feature_id: &str,
2209    env_variables: &str,
2210) -> Result<String, DevContainerError> {
2211    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2212        log::error!("Error escaping feature ref {feature_ref}: {e}");
2213        DevContainerError::DevContainerParseFailed
2214    })?;
2215    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2216        log::error!("Error escaping feature {feature_id}: {e}");
2217        DevContainerError::DevContainerParseFailed
2218    })?;
2219    let options_indented: String = env_variables
2220        .lines()
2221        .filter(|l| !l.is_empty())
2222        .map(|l| format!("    {}", l))
2223        .collect::<Vec<_>>()
2224        .join("\n");
2225    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2226        log::error!("Error escaping options {options_indented}: {e}");
2227        DevContainerError::DevContainerParseFailed
2228    })?;
2229
2230    let script = format!(
2231        r#"#!/bin/sh
2232set -e
2233
2234on_exit () {{
2235    [ $? -eq 0 ] && exit
2236    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2237}}
2238
2239trap on_exit EXIT
2240
2241echo ===========================================================================
2242echo 'Feature       : {escaped_name}'
2243echo 'Id            : {escaped_id}'
2244echo 'Options       :'
2245echo {escaped_options}
2246echo ===========================================================================
2247
2248set -a
2249. ../devcontainer-features.builtin.env
2250. ./devcontainer-features.env
2251set +a
2252
2253chmod +x ./install.sh
2254./install.sh
2255"#
2256    );
2257
2258    Ok(script)
2259}
2260
2261// Dockerfile actions need to be moved to their own file
2262fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2263    dockerfile_content
2264        .lines()
2265        .find(|line| line.starts_with("FROM"))
2266        .and_then(|line| {
2267            let words: Vec<&str> = line.split(" ").collect();
2268            if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2269                return Some(words[words.len() - 1].to_string());
2270            } else {
2271                return None;
2272            }
2273        })
2274}
2275
2276fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2277    if dockerfile_alias(dockerfile_content).is_some() {
2278        dockerfile_content.to_string()
2279    } else {
2280        dockerfile_content
2281            .lines()
2282            .map(|line| {
2283                if line.starts_with("FROM") {
2284                    format!("{} AS {}", line, alias)
2285                } else {
2286                    line.to_string()
2287                }
2288            })
2289            .collect::<Vec<String>>()
2290            .join("\n")
2291    }
2292}
2293
2294fn image_from_dockerfile(
2295    devcontainer: &DevContainerManifest,
2296    dockerfile_contents: String,
2297) -> Result<String, DevContainerError> {
2298    let mut raw_contents = dockerfile_contents
2299        .lines()
2300        .find(|line| line.starts_with("FROM"))
2301        .and_then(|from_line| {
2302            from_line
2303                .split(' ')
2304                .collect::<Vec<&str>>()
2305                .get(1)
2306                .map(|s| s.to_string())
2307        })
2308        .ok_or_else(|| {
2309            log::error!("Could not find an image definition in dockerfile");
2310            DevContainerError::DevContainerParseFailed
2311        })?;
2312
2313    for (k, v) in devcontainer
2314        .dev_container()
2315        .build
2316        .as_ref()
2317        .and_then(|b| b.args.as_ref())
2318        .unwrap_or(&HashMap::new())
2319    {
2320        raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2321    }
2322    Ok(raw_contents)
2323}
2324
2325// Container user things
2326// This should come from spec - see the docs
2327fn get_remote_user_from_config(
2328    docker_config: &DockerInspect,
2329    devcontainer: &DevContainerManifest,
2330) -> Result<String, DevContainerError> {
2331    if let DevContainer {
2332        remote_user: Some(user),
2333        ..
2334    } = &devcontainer.dev_container()
2335    {
2336        return Ok(user.clone());
2337    }
2338    if let Some(metadata) = &docker_config.config.labels.metadata {
2339        for metadatum in metadata {
2340            if let Some(remote_user) = metadatum.get("remoteUser") {
2341                if let Some(remote_user_str) = remote_user.as_str() {
2342                    return Ok(remote_user_str.to_string());
2343                }
2344            }
2345        }
2346    }
2347    if let Some(image_user) = &docker_config.config.image_user {
2348        if !image_user.is_empty() {
2349            return Ok(image_user.to_string());
2350        }
2351    }
2352    Ok("root".to_string())
2353}
2354
2355// This should come from spec - see the docs
2356fn get_container_user_from_config(
2357    docker_config: &DockerInspect,
2358    devcontainer: &DevContainerManifest,
2359) -> Result<String, DevContainerError> {
2360    if let Some(user) = &devcontainer.dev_container().container_user {
2361        return Ok(user.to_string());
2362    }
2363    if let Some(metadata) = &docker_config.config.labels.metadata {
2364        for metadatum in metadata {
2365            if let Some(container_user) = metadatum.get("containerUser") {
2366                if let Some(container_user_str) = container_user.as_str() {
2367                    return Ok(container_user_str.to_string());
2368                }
2369            }
2370        }
2371    }
2372    if let Some(image_user) = &docker_config.config.image_user {
2373        return Ok(image_user.to_string());
2374    }
2375
2376    Ok("root".to_string())
2377}
2378
2379#[cfg(test)]
2380mod test {
2381    use std::{
2382        collections::HashMap,
2383        ffi::OsStr,
2384        path::PathBuf,
2385        process::{ExitStatus, Output},
2386        sync::{Arc, Mutex},
2387    };
2388
2389    use async_trait::async_trait;
2390    use fs::{FakeFs, Fs};
2391    use gpui::{AppContext, TestAppContext};
2392    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2393    use project::{
2394        ProjectEnvironment,
2395        worktree_store::{WorktreeIdCounter, WorktreeStore},
2396    };
2397    use serde_json_lenient::Value;
2398    use util::{command::Command, paths::SanitizedPath};
2399
2400    #[cfg(not(target_os = "windows"))]
2401    use crate::docker::DockerComposeServicePort;
2402    use crate::{
2403        DevContainerConfig, DevContainerContext,
2404        command_json::CommandRunner,
2405        devcontainer_api::DevContainerError,
2406        devcontainer_json::MountDefinition,
2407        devcontainer_manifest::{
2408            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2409            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2410        },
2411        docker::{
2412            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2413            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2414            DockerPs,
2415        },
2416        oci::TokenResponse,
2417    };
2418    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2419
2420    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2421        let buffer = futures::io::Cursor::new(Vec::new());
2422        let mut builder = async_tar::Builder::new(buffer);
2423        for (file_name, content) in content {
2424            if content.is_empty() {
2425                let mut header = async_tar::Header::new_gnu();
2426                header.set_size(0);
2427                header.set_mode(0o755);
2428                header.set_entry_type(async_tar::EntryType::Directory);
2429                header.set_cksum();
2430                builder
2431                    .append_data(&mut header, file_name, &[] as &[u8])
2432                    .await
2433                    .unwrap();
2434            } else {
2435                let data = content.as_bytes();
2436                let mut header = async_tar::Header::new_gnu();
2437                header.set_size(data.len() as u64);
2438                header.set_mode(0o755);
2439                header.set_entry_type(async_tar::EntryType::Regular);
2440                header.set_cksum();
2441                builder
2442                    .append_data(&mut header, file_name, data)
2443                    .await
2444                    .unwrap();
2445            }
2446        }
2447        let buffer = builder.into_inner().await.unwrap();
2448        buffer.into_inner()
2449    }
2450
2451    fn test_project_filename() -> String {
2452        PathBuf::from(TEST_PROJECT_PATH)
2453            .file_name()
2454            .expect("is valid")
2455            .display()
2456            .to_string()
2457    }
2458
2459    async fn init_devcontainer_config(
2460        fs: &Arc<FakeFs>,
2461        devcontainer_contents: &str,
2462    ) -> DevContainerConfig {
2463        fs.insert_tree(
2464            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2465            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2466        )
2467        .await;
2468
2469        DevContainerConfig::default_config()
2470    }
2471
2472    struct TestDependencies {
2473        fs: Arc<FakeFs>,
2474        _http_client: Arc<dyn HttpClient>,
2475        docker: Arc<FakeDocker>,
2476        command_runner: Arc<TestCommandRunner>,
2477    }
2478
2479    async fn init_default_devcontainer_manifest(
2480        cx: &mut TestAppContext,
2481        devcontainer_contents: &str,
2482    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2483        let fs = FakeFs::new(cx.executor());
2484        let http_client = fake_http_client();
2485        let command_runner = Arc::new(TestCommandRunner::new());
2486        let docker = Arc::new(FakeDocker::new());
2487        let environment = HashMap::new();
2488
2489        init_devcontainer_manifest(
2490            cx,
2491            fs,
2492            http_client,
2493            docker,
2494            command_runner,
2495            environment,
2496            devcontainer_contents,
2497        )
2498        .await
2499    }
2500
2501    async fn init_devcontainer_manifest(
2502        cx: &mut TestAppContext,
2503        fs: Arc<FakeFs>,
2504        http_client: Arc<dyn HttpClient>,
2505        docker_client: Arc<FakeDocker>,
2506        command_runner: Arc<TestCommandRunner>,
2507        environment: HashMap<String, String>,
2508        devcontainer_contents: &str,
2509    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2510        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2511        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2512        let worktree_store =
2513            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2514        let project_environment =
2515            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2516
2517        let context = DevContainerContext {
2518            project_directory: SanitizedPath::cast_arc(project_path),
2519            use_podman: false,
2520            fs: fs.clone(),
2521            http_client: http_client.clone(),
2522            environment: project_environment.downgrade(),
2523        };
2524
2525        let test_dependencies = TestDependencies {
2526            fs: fs.clone(),
2527            _http_client: http_client.clone(),
2528            docker: docker_client.clone(),
2529            command_runner: command_runner.clone(),
2530        };
2531        let manifest = DevContainerManifest::new(
2532            &context,
2533            environment,
2534            docker_client,
2535            command_runner,
2536            local_config,
2537            &PathBuf::from(TEST_PROJECT_PATH),
2538        )
2539        .await?;
2540
2541        Ok((test_dependencies, manifest))
2542    }
2543
2544    #[gpui::test]
2545    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2546        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2547            cx,
2548            r#"
2549// These are some external comments. serde_lenient should handle them
2550{
2551    // These are some internal comments
2552    "image": "image",
2553    "remoteUser": "root",
2554}
2555            "#,
2556        )
2557        .await
2558        .unwrap();
2559
2560        let mut metadata = HashMap::new();
2561        metadata.insert(
2562            "remoteUser".to_string(),
2563            serde_json_lenient::Value::String("vsCode".to_string()),
2564        );
2565        let given_docker_config = DockerInspect {
2566            id: "docker_id".to_string(),
2567            config: DockerInspectConfig {
2568                labels: DockerConfigLabels {
2569                    metadata: Some(vec![metadata]),
2570                },
2571                image_user: None,
2572                env: Vec::new(),
2573            },
2574            mounts: None,
2575            state: None,
2576        };
2577
2578        let remote_user =
2579            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2580
2581        assert_eq!(remote_user, "root".to_string())
2582    }
2583
2584    #[gpui::test]
2585    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2586        let (_, devcontainer_manifest) =
2587            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2588        let mut metadata = HashMap::new();
2589        metadata.insert(
2590            "remoteUser".to_string(),
2591            serde_json_lenient::Value::String("vsCode".to_string()),
2592        );
2593        let given_docker_config = DockerInspect {
2594            id: "docker_id".to_string(),
2595            config: DockerInspectConfig {
2596                labels: DockerConfigLabels {
2597                    metadata: Some(vec![metadata]),
2598                },
2599                image_user: None,
2600                env: Vec::new(),
2601            },
2602            mounts: None,
2603            state: None,
2604        };
2605
2606        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2607
2608        assert!(remote_user.is_ok());
2609        let remote_user = remote_user.expect("ok");
2610        assert_eq!(&remote_user, "vsCode")
2611    }
2612
2613    #[test]
2614    fn should_extract_feature_id_from_references() {
2615        assert_eq!(
2616            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2617            "aws-cli"
2618        );
2619        assert_eq!(
2620            extract_feature_id("ghcr.io/devcontainers/features/go"),
2621            "go"
2622        );
2623        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2624        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2625        assert_eq!(
2626            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2627            "rust"
2628        );
2629    }
2630
2631    #[gpui::test]
2632    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2633        let mut metadata = HashMap::new();
2634        metadata.insert(
2635            "remoteUser".to_string(),
2636            serde_json_lenient::Value::String("vsCode".to_string()),
2637        );
2638
2639        let (_, devcontainer_manifest) =
2640            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2641        let build_resources = DockerBuildResources {
2642            image: DockerInspect {
2643                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2644                config: DockerInspectConfig {
2645                    labels: DockerConfigLabels { metadata: None },
2646                    image_user: None,
2647                    env: Vec::new(),
2648                },
2649                mounts: None,
2650                state: None,
2651            },
2652            additional_mounts: vec![],
2653            privileged: false,
2654            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2655        };
2656        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2657
2658        assert!(docker_run_command.is_ok());
2659        let docker_run_command = docker_run_command.expect("ok");
2660
2661        assert_eq!(docker_run_command.get_program(), "docker");
2662        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2663            .join(".devcontainer")
2664            .join("devcontainer.json");
2665        let expected_config_file_label = expected_config_file_label.display();
2666        assert_eq!(
2667            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2668            vec![
2669                OsStr::new("run"),
2670                OsStr::new("--sig-proxy=false"),
2671                OsStr::new("-d"),
2672                OsStr::new("--mount"),
2673                OsStr::new(
2674                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2675                ),
2676                OsStr::new("-l"),
2677                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2678                OsStr::new("-l"),
2679                OsStr::new(&format!(
2680                    "devcontainer.config_file={expected_config_file_label}"
2681                )),
2682                OsStr::new("--entrypoint"),
2683                OsStr::new("/bin/sh"),
2684                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2685                OsStr::new("-c"),
2686                OsStr::new(
2687                    "
2688    echo Container started
2689    trap \"exit 0\" 15
2690    exec \"$@\"
2691    while sleep 1 & wait $!; do :; done
2692                        "
2693                    .trim()
2694                ),
2695                OsStr::new("-"),
2696            ]
2697        )
2698    }
2699
2700    #[gpui::test]
2701    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2702        // State where service not defined in dev container
2703        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2704        let given_docker_compose_config = DockerComposeResources {
2705            config: DockerComposeConfig {
2706                name: Some("devcontainers".to_string()),
2707                services: HashMap::new(),
2708                ..Default::default()
2709            },
2710            ..Default::default()
2711        };
2712
2713        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2714
2715        assert!(bad_result.is_err());
2716
2717        // State where service defined in devcontainer, not found in DockerCompose config
2718        let (_, given_dev_container) =
2719            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2720                .await
2721                .unwrap();
2722        let given_docker_compose_config = DockerComposeResources {
2723            config: DockerComposeConfig {
2724                name: Some("devcontainers".to_string()),
2725                services: HashMap::new(),
2726                ..Default::default()
2727            },
2728            ..Default::default()
2729        };
2730
2731        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2732
2733        assert!(bad_result.is_err());
2734        // State where service defined in devcontainer and in DockerCompose config
2735
2736        let (_, given_dev_container) =
2737            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2738                .await
2739                .unwrap();
2740        let given_docker_compose_config = DockerComposeResources {
2741            config: DockerComposeConfig {
2742                name: Some("devcontainers".to_string()),
2743                services: HashMap::from([(
2744                    "found_service".to_string(),
2745                    DockerComposeService {
2746                        ..Default::default()
2747                    },
2748                )]),
2749                ..Default::default()
2750            },
2751            ..Default::default()
2752        };
2753
2754        let (service_name, _) =
2755            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2756
2757        assert_eq!(service_name, "found_service".to_string());
2758    }
2759
2760    #[gpui::test]
2761    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2762        let fs = FakeFs::new(cx.executor());
2763        let given_devcontainer_contents = r#"
2764// These are some external comments. serde_lenient should handle them
2765{
2766    // These are some internal comments
2767    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2768    "name": "myDevContainer-${devcontainerId}",
2769    "remoteUser": "root",
2770    "remoteEnv": {
2771        "DEVCONTAINER_ID": "${devcontainerId}",
2772        "MYVAR2": "myvarothervalue",
2773        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2774        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2775        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2776        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2777        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2778        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2779
2780    }
2781}
2782                    "#;
2783        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2784            cx,
2785            fs,
2786            fake_http_client(),
2787            Arc::new(FakeDocker::new()),
2788            Arc::new(TestCommandRunner::new()),
2789            HashMap::from([
2790                ("local_env_1".to_string(), "local_env_value1".to_string()),
2791                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2792            ]),
2793            given_devcontainer_contents,
2794        )
2795        .await
2796        .unwrap();
2797
2798        devcontainer_manifest.parse_nonremote_vars().unwrap();
2799
2800        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2801            &devcontainer_manifest.config
2802        else {
2803            panic!("Config not parsed");
2804        };
2805
2806        // ${devcontainerId}
2807        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2808        assert_eq!(
2809            variable_replaced_devcontainer.name,
2810            Some(format!("myDevContainer-{devcontainer_id}"))
2811        );
2812        assert_eq!(
2813            variable_replaced_devcontainer
2814                .remote_env
2815                .as_ref()
2816                .and_then(|env| env.get("DEVCONTAINER_ID")),
2817            Some(&devcontainer_id)
2818        );
2819
2820        // ${containerWorkspaceFolderBasename}
2821        assert_eq!(
2822            variable_replaced_devcontainer
2823                .remote_env
2824                .as_ref()
2825                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2826            Some(&test_project_filename())
2827        );
2828
2829        // ${localWorkspaceFolderBasename}
2830        assert_eq!(
2831            variable_replaced_devcontainer
2832                .remote_env
2833                .as_ref()
2834                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2835            Some(&test_project_filename())
2836        );
2837
2838        // ${containerWorkspaceFolder}
2839        assert_eq!(
2840            variable_replaced_devcontainer
2841                .remote_env
2842                .as_ref()
2843                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2844            Some(&format!("/workspaces/{}", test_project_filename()))
2845        );
2846
2847        // ${localWorkspaceFolder}
2848        assert_eq!(
2849            variable_replaced_devcontainer
2850                .remote_env
2851                .as_ref()
2852                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2853            Some(&TEST_PROJECT_PATH.to_string())
2854        );
2855
2856        // ${localEnv:VARIABLE_NAME}
2857        assert_eq!(
2858            variable_replaced_devcontainer
2859                .remote_env
2860                .as_ref()
2861                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2862            Some(&"local_env_value1".to_string())
2863        );
2864        assert_eq!(
2865            variable_replaced_devcontainer
2866                .remote_env
2867                .as_ref()
2868                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2869            Some(&"THISVALUEHERE".to_string())
2870        );
2871    }
2872
2873    #[gpui::test]
2874    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2875        let given_devcontainer_contents = r#"
2876                // These are some external comments. serde_lenient should handle them
2877                {
2878                    // These are some internal comments
2879                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2880                    "name": "myDevContainer-${devcontainerId}",
2881                    "remoteUser": "root",
2882                    "remoteEnv": {
2883                        "DEVCONTAINER_ID": "${devcontainerId}",
2884                        "MYVAR2": "myvarothervalue",
2885                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2886                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2887                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2888                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2889
2890                    },
2891                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2892                    "workspaceFolder": "/workspace/customfolder"
2893                }
2894            "#;
2895
2896        let (_, mut devcontainer_manifest) =
2897            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2898                .await
2899                .unwrap();
2900
2901        devcontainer_manifest.parse_nonremote_vars().unwrap();
2902
2903        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2904            &devcontainer_manifest.config
2905        else {
2906            panic!("Config not parsed");
2907        };
2908
2909        // ${devcontainerId}
2910        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2911        assert_eq!(
2912            variable_replaced_devcontainer.name,
2913            Some(format!("myDevContainer-{devcontainer_id}"))
2914        );
2915        assert_eq!(
2916            variable_replaced_devcontainer
2917                .remote_env
2918                .as_ref()
2919                .and_then(|env| env.get("DEVCONTAINER_ID")),
2920            Some(&devcontainer_id)
2921        );
2922
2923        // ${containerWorkspaceFolderBasename}
2924        assert_eq!(
2925            variable_replaced_devcontainer
2926                .remote_env
2927                .as_ref()
2928                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2929            Some(&"customfolder".to_string())
2930        );
2931
2932        // ${localWorkspaceFolderBasename}
2933        assert_eq!(
2934            variable_replaced_devcontainer
2935                .remote_env
2936                .as_ref()
2937                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2938            Some(&"project".to_string())
2939        );
2940
2941        // ${containerWorkspaceFolder}
2942        assert_eq!(
2943            variable_replaced_devcontainer
2944                .remote_env
2945                .as_ref()
2946                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2947            Some(&"/workspace/customfolder".to_string())
2948        );
2949
2950        // ${localWorkspaceFolder}
2951        assert_eq!(
2952            variable_replaced_devcontainer
2953                .remote_env
2954                .as_ref()
2955                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2956            Some(&TEST_PROJECT_PATH.to_string())
2957        );
2958    }
2959
2960    // updateRemoteUserUID is treated as false in Windows, so this test will fail
2961    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2962    #[cfg(not(target_os = "windows"))]
2963    #[gpui::test]
2964    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2965        cx.executor().allow_parking();
2966        env_logger::try_init().ok();
2967        let given_devcontainer_contents = r#"
2968            /*---------------------------------------------------------------------------------------------
2969             *  Copyright (c) Microsoft Corporation. All rights reserved.
2970             *  Licensed under the MIT License. See License.txt in the project root for license information.
2971             *--------------------------------------------------------------------------------------------*/
2972            {
2973              "name": "cli-${devcontainerId}",
2974              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2975              "build": {
2976                "dockerfile": "Dockerfile",
2977                "args": {
2978                  "VARIANT": "18-bookworm",
2979                  "FOO": "bar",
2980                },
2981              },
2982              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2983              "workspaceFolder": "/workspace2",
2984              "mounts": [
2985                // Keep command history across instances
2986                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2987              ],
2988
2989              "forwardPorts": [
2990                8082,
2991                8083,
2992              ],
2993              "appPort": "8084",
2994
2995              "containerEnv": {
2996                "VARIABLE_VALUE": "value",
2997              },
2998
2999              "initializeCommand": "touch IAM.md",
3000
3001              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3002
3003              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3004
3005              "postCreateCommand": {
3006                "yarn": "yarn install",
3007                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3008              },
3009
3010              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3011
3012              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3013
3014              "remoteUser": "node",
3015
3016              "remoteEnv": {
3017                "PATH": "${containerEnv:PATH}:/some/other/path",
3018                "OTHER_ENV": "other_env_value"
3019              },
3020
3021              "features": {
3022                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3023                  "moby": false,
3024                },
3025                "ghcr.io/devcontainers/features/go:1": {},
3026              },
3027
3028              "customizations": {
3029                "vscode": {
3030                  "extensions": [
3031                    "dbaeumer.vscode-eslint",
3032                    "GitHub.vscode-pull-request-github",
3033                  ],
3034                },
3035                "zed": {
3036                  "extensions": ["vue", "ruby"],
3037                },
3038                "codespaces": {
3039                  "repositories": {
3040                    "devcontainers/features": {
3041                      "permissions": {
3042                        "contents": "write",
3043                        "workflows": "write",
3044                      },
3045                    },
3046                  },
3047                },
3048              },
3049            }
3050            "#;
3051
3052        let (test_dependencies, mut devcontainer_manifest) =
3053            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3054                .await
3055                .unwrap();
3056
3057        test_dependencies
3058            .fs
3059            .atomic_write(
3060                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3061                r#"
3062#  Copyright (c) Microsoft Corporation. All rights reserved.
3063#  Licensed under the MIT License. See License.txt in the project root for license information.
3064ARG VARIANT="16-bullseye"
3065FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3066
3067RUN mkdir -p /workspaces && chown node:node /workspaces
3068
3069ARG USERNAME=node
3070USER $USERNAME
3071
3072# Save command line history
3073RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3074&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3075&& mkdir -p /home/$USERNAME/commandhistory \
3076&& touch /home/$USERNAME/commandhistory/.bash_history \
3077&& chown -R $USERNAME /home/$USERNAME/commandhistory
3078                    "#.trim().to_string(),
3079            )
3080            .await
3081            .unwrap();
3082
3083        devcontainer_manifest.parse_nonremote_vars().unwrap();
3084
3085        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3086
3087        assert_eq!(
3088            devcontainer_up.extension_ids,
3089            vec!["vue".to_string(), "ruby".to_string()]
3090        );
3091
3092        let files = test_dependencies.fs.files();
3093        let feature_dockerfile = files
3094            .iter()
3095            .find(|f| {
3096                f.file_name()
3097                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3098            })
3099            .expect("to be found");
3100        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3101        assert_eq!(
3102            &feature_dockerfile,
3103            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3104
3105#  Copyright (c) Microsoft Corporation. All rights reserved.
3106#  Licensed under the MIT License. See License.txt in the project root for license information.
3107ARG VARIANT="16-bullseye"
3108FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3109
3110RUN mkdir -p /workspaces && chown node:node /workspaces
3111
3112ARG USERNAME=node
3113USER $USERNAME
3114
3115# Save command line history
3116RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3117&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3118&& mkdir -p /home/$USERNAME/commandhistory \
3119&& touch /home/$USERNAME/commandhistory/.bash_history \
3120&& chown -R $USERNAME /home/$USERNAME/commandhistory
3121
3122FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3123USER root
3124COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3125RUN chmod -R 0755 /tmp/build-features/
3126
3127FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3128
3129USER root
3130
3131RUN mkdir -p /tmp/dev-container-features
3132COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3133
3134RUN \
3135echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3136echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3137
3138
3139RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3140cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3141&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3142&& cd /tmp/dev-container-features/docker-in-docker_0 \
3143&& chmod +x ./devcontainer-features-install.sh \
3144&& ./devcontainer-features-install.sh \
3145&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3146
3147RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3148cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3149&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3150&& cd /tmp/dev-container-features/go_1 \
3151&& chmod +x ./devcontainer-features-install.sh \
3152&& ./devcontainer-features-install.sh \
3153&& rm -rf /tmp/dev-container-features/go_1
3154
3155
3156ARG _DEV_CONTAINERS_IMAGE_USER=root
3157USER $_DEV_CONTAINERS_IMAGE_USER
3158"#
3159        );
3160
3161        let uid_dockerfile = files
3162            .iter()
3163            .find(|f| {
3164                f.file_name()
3165                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3166            })
3167            .expect("to be found");
3168        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3169
3170        assert_eq!(
3171            &uid_dockerfile,
3172            r#"ARG BASE_IMAGE
3173FROM $BASE_IMAGE
3174
3175USER root
3176
3177ARG REMOTE_USER
3178ARG NEW_UID
3179ARG NEW_GID
3180SHELL ["/bin/sh", "-c"]
3181RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3182	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3183	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3184	if [ -z "$OLD_UID" ]; then \
3185		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3186	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3187		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3188	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3189		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3190	else \
3191		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3192			FREE_GID=65532; \
3193			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3194			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3195			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3196		fi; \
3197		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3198		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3199		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3200			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3201		fi; \
3202		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3203	fi;
3204
3205ARG IMAGE_USER
3206USER $IMAGE_USER
3207
3208# Ensure that /etc/profile does not clobber the existing path
3209RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3210
3211ENV DOCKER_BUILDKIT=1
3212
3213ENV GOPATH=/go
3214ENV GOROOT=/usr/local/go
3215ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3216ENV VARIABLE_VALUE=value
3217"#
3218        );
3219
3220        let golang_install_wrapper = files
3221            .iter()
3222            .find(|f| {
3223                f.file_name()
3224                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3225                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3226            })
3227            .expect("to be found");
3228        let golang_install_wrapper = test_dependencies
3229            .fs
3230            .load(golang_install_wrapper)
3231            .await
3232            .unwrap();
3233        assert_eq!(
3234            &golang_install_wrapper,
3235            r#"#!/bin/sh
3236set -e
3237
3238on_exit () {
3239    [ $? -eq 0 ] && exit
3240    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3241}
3242
3243trap on_exit EXIT
3244
3245echo ===========================================================================
3246echo 'Feature       : go'
3247echo 'Id            : ghcr.io/devcontainers/features/go:1'
3248echo 'Options       :'
3249echo '    GOLANGCILINTVERSION=latest
3250    VERSION=latest'
3251echo ===========================================================================
3252
3253set -a
3254. ../devcontainer-features.builtin.env
3255. ./devcontainer-features.env
3256set +a
3257
3258chmod +x ./install.sh
3259./install.sh
3260"#
3261        );
3262
3263        let docker_commands = test_dependencies
3264            .command_runner
3265            .commands_by_program("docker");
3266
3267        let docker_run_command = docker_commands
3268            .iter()
3269            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3270            .expect("found");
3271
3272        assert_eq!(
3273            docker_run_command.args,
3274            vec![
3275                "run".to_string(),
3276                "--privileged".to_string(),
3277                "--sig-proxy=false".to_string(),
3278                "-d".to_string(),
3279                "--mount".to_string(),
3280                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3281                "--mount".to_string(),
3282                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3283                "--mount".to_string(),
3284                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3285                "-l".to_string(),
3286                "devcontainer.local_folder=/path/to/local/project".to_string(),
3287                "-l".to_string(),
3288                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3289                "-l".to_string(),
3290                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3291                "-p".to_string(),
3292                "8082:8082".to_string(),
3293                "-p".to_string(),
3294                "8083:8083".to_string(),
3295                "-p".to_string(),
3296                "8084:8084".to_string(),
3297                "--entrypoint".to_string(),
3298                "/bin/sh".to_string(),
3299                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3300                "-c".to_string(),
3301                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3302                "-".to_string()
3303            ]
3304        );
3305
3306        let docker_exec_commands = test_dependencies
3307            .docker
3308            .exec_commands_recorded
3309            .lock()
3310            .unwrap();
3311
3312        assert!(docker_exec_commands.iter().all(|exec| {
3313            exec.env
3314                == HashMap::from([
3315                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3316                    (
3317                        "PATH".to_string(),
3318                        "/initial/path:/some/other/path".to_string(),
3319                    ),
3320                ])
3321        }))
3322    }
3323
3324    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3325    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3326    #[cfg(not(target_os = "windows"))]
3327    #[gpui::test]
3328    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3329        cx.executor().allow_parking();
3330        env_logger::try_init().ok();
3331        let given_devcontainer_contents = r#"
3332            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3333            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3334            {
3335              "features": {
3336                "ghcr.io/devcontainers/features/aws-cli:1": {},
3337                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3338              },
3339              "name": "Rust and PostgreSQL",
3340              "dockerComposeFile": "docker-compose.yml",
3341              "service": "app",
3342              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3343
3344              // Features to add to the dev container. More info: https://containers.dev/features.
3345              // "features": {},
3346
3347              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3348              "forwardPorts": [
3349                8083,
3350                "db:5432",
3351                "db:1234",
3352              ],
3353              "appPort": "8084",
3354
3355              // Use 'postCreateCommand' to run commands after the container is created.
3356              // "postCreateCommand": "rustc --version",
3357
3358              // Configure tool-specific properties.
3359              // "customizations": {},
3360
3361              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3362              // "remoteUser": "root"
3363            }
3364            "#;
3365        let (test_dependencies, mut devcontainer_manifest) =
3366            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3367                .await
3368                .unwrap();
3369
3370        test_dependencies
3371            .fs
3372            .atomic_write(
3373                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3374                r#"
3375version: '3.8'
3376
3377volumes:
3378    postgres-data:
3379
3380services:
3381    app:
3382        build:
3383            context: .
3384            dockerfile: Dockerfile
3385        env_file:
3386            # Ensure that the variables in .env match the same variables in devcontainer.json
3387            - .env
3388
3389        volumes:
3390            - ../..:/workspaces:cached
3391
3392        # Overrides default command so things don't shut down after the process ends.
3393        command: sleep infinity
3394
3395        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3396        network_mode: service:db
3397
3398        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3399        # (Adding the "ports" property to this file will not forward from a Codespace.)
3400
3401    db:
3402        image: postgres:14.1
3403        restart: unless-stopped
3404        volumes:
3405            - postgres-data:/var/lib/postgresql/data
3406        env_file:
3407            # Ensure that the variables in .env match the same variables in devcontainer.json
3408            - .env
3409
3410        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3411        # (Adding the "ports" property to this file will not forward from a Codespace.)
3412                    "#.trim().to_string(),
3413            )
3414            .await
3415            .unwrap();
3416
3417        test_dependencies.fs.atomic_write(
3418            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3419            r#"
3420FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3421
3422# Include lld linker to improve build times either by using environment variable
3423# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3424RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3425    && apt-get -y install clang lld \
3426    && apt-get autoremove -y && apt-get clean -y
3427            "#.trim().to_string()).await.unwrap();
3428
3429        devcontainer_manifest.parse_nonremote_vars().unwrap();
3430
3431        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3432
3433        let files = test_dependencies.fs.files();
3434        let feature_dockerfile = files
3435            .iter()
3436            .find(|f| {
3437                f.file_name()
3438                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3439            })
3440            .expect("to be found");
3441        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3442        assert_eq!(
3443            &feature_dockerfile,
3444            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3445
3446FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3447
3448# Include lld linker to improve build times either by using environment variable
3449# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3450RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3451    && apt-get -y install clang lld \
3452    && apt-get autoremove -y && apt-get clean -y
3453
3454FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3455USER root
3456COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3457RUN chmod -R 0755 /tmp/build-features/
3458
3459FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3460
3461USER root
3462
3463RUN mkdir -p /tmp/dev-container-features
3464COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3465
3466RUN \
3467echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3468echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3469
3470
3471RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3472cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3473&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3474&& cd /tmp/dev-container-features/aws-cli_0 \
3475&& chmod +x ./devcontainer-features-install.sh \
3476&& ./devcontainer-features-install.sh \
3477&& rm -rf /tmp/dev-container-features/aws-cli_0
3478
3479RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3480cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3481&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3482&& cd /tmp/dev-container-features/docker-in-docker_1 \
3483&& chmod +x ./devcontainer-features-install.sh \
3484&& ./devcontainer-features-install.sh \
3485&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3486
3487
3488ARG _DEV_CONTAINERS_IMAGE_USER=root
3489USER $_DEV_CONTAINERS_IMAGE_USER
3490"#
3491        );
3492
3493        let uid_dockerfile = files
3494            .iter()
3495            .find(|f| {
3496                f.file_name()
3497                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3498            })
3499            .expect("to be found");
3500        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3501
3502        assert_eq!(
3503            &uid_dockerfile,
3504            r#"ARG BASE_IMAGE
3505FROM $BASE_IMAGE
3506
3507USER root
3508
3509ARG REMOTE_USER
3510ARG NEW_UID
3511ARG NEW_GID
3512SHELL ["/bin/sh", "-c"]
3513RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3514	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3515	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3516	if [ -z "$OLD_UID" ]; then \
3517		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3518	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3519		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3520	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3521		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3522	else \
3523		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3524			FREE_GID=65532; \
3525			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3526			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3527			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3528		fi; \
3529		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3530		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3531		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3532			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3533		fi; \
3534		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3535	fi;
3536
3537ARG IMAGE_USER
3538USER $IMAGE_USER
3539
3540# Ensure that /etc/profile does not clobber the existing path
3541RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3542
3543
3544ENV DOCKER_BUILDKIT=1
3545"#
3546        );
3547
3548        let runtime_override = files
3549            .iter()
3550            .find(|f| {
3551                f.file_name()
3552                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3553            })
3554            .expect("to be found");
3555        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3556
3557        let expected_runtime_override = DockerComposeConfig {
3558            name: None,
3559            services: HashMap::from([
3560                (
3561                    "app".to_string(),
3562                    DockerComposeService {
3563                        entrypoint: Some(vec![
3564                            "/bin/sh".to_string(),
3565                            "-c".to_string(),
3566                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3567                            "-".to_string(),
3568                        ]),
3569                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3570                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3571                        privileged: Some(true),
3572                        labels: Some(HashMap::from([
3573                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3574                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3575                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3576                        ])),
3577                        volumes: vec![
3578                            MountDefinition {
3579                                source: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3580                                target: "/var/lib/docker".to_string(),
3581                                mount_type: Some("volume".to_string())
3582                            }
3583                        ],
3584                        ..Default::default()
3585                    },
3586                ),
3587                (
3588                    "db".to_string(),
3589                    DockerComposeService {
3590                        ports: vec![
3591                            DockerComposeServicePort {
3592                                target: "8083".to_string(),
3593                                published: "8083".to_string(),
3594                                ..Default::default()
3595                            },
3596                            DockerComposeServicePort {
3597                                target: "5432".to_string(),
3598                                published: "5432".to_string(),
3599                                ..Default::default()
3600                            },
3601                            DockerComposeServicePort {
3602                                target: "1234".to_string(),
3603                                published: "1234".to_string(),
3604                                ..Default::default()
3605                            },
3606                            DockerComposeServicePort {
3607                                target: "8084".to_string(),
3608                                published: "8084".to_string(),
3609                                ..Default::default()
3610                            },
3611                        ],
3612                        ..Default::default()
3613                    },
3614                ),
3615            ]),
3616            volumes: HashMap::from([(
3617                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3618                DockerComposeVolume {
3619                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3620                },
3621            )]),
3622        };
3623
3624        assert_eq!(
3625            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3626            expected_runtime_override
3627        )
3628    }
3629
3630    #[gpui::test]
3631    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3632        cx: &mut TestAppContext,
3633    ) {
3634        cx.executor().allow_parking();
3635        env_logger::try_init().ok();
3636        let given_devcontainer_contents = r#"
3637        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3638        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3639        {
3640          "features": {
3641            "ghcr.io/devcontainers/features/aws-cli:1": {},
3642            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3643          },
3644          "name": "Rust and PostgreSQL",
3645          "dockerComposeFile": "docker-compose.yml",
3646          "service": "app",
3647          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3648
3649          // Features to add to the dev container. More info: https://containers.dev/features.
3650          // "features": {},
3651
3652          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3653          "forwardPorts": [
3654            8083,
3655            "db:5432",
3656            "db:1234",
3657          ],
3658          "updateRemoteUserUID": false,
3659          "appPort": "8084",
3660
3661          // Use 'postCreateCommand' to run commands after the container is created.
3662          // "postCreateCommand": "rustc --version",
3663
3664          // Configure tool-specific properties.
3665          // "customizations": {},
3666
3667          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3668          // "remoteUser": "root"
3669        }
3670        "#;
3671        let (test_dependencies, mut devcontainer_manifest) =
3672            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3673                .await
3674                .unwrap();
3675
3676        test_dependencies
3677        .fs
3678        .atomic_write(
3679            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3680            r#"
3681version: '3.8'
3682
3683volumes:
3684postgres-data:
3685
3686services:
3687app:
3688    build:
3689        context: .
3690        dockerfile: Dockerfile
3691    env_file:
3692        # Ensure that the variables in .env match the same variables in devcontainer.json
3693        - .env
3694
3695    volumes:
3696        - ../..:/workspaces:cached
3697
3698    # Overrides default command so things don't shut down after the process ends.
3699    command: sleep infinity
3700
3701    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3702    network_mode: service:db
3703
3704    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3705    # (Adding the "ports" property to this file will not forward from a Codespace.)
3706
3707db:
3708    image: postgres:14.1
3709    restart: unless-stopped
3710    volumes:
3711        - postgres-data:/var/lib/postgresql/data
3712    env_file:
3713        # Ensure that the variables in .env match the same variables in devcontainer.json
3714        - .env
3715
3716    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3717    # (Adding the "ports" property to this file will not forward from a Codespace.)
3718                "#.trim().to_string(),
3719        )
3720        .await
3721        .unwrap();
3722
3723        test_dependencies.fs.atomic_write(
3724        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3725        r#"
3726FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3727
3728# Include lld linker to improve build times either by using environment variable
3729# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3730RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3731&& apt-get -y install clang lld \
3732&& apt-get autoremove -y && apt-get clean -y
3733        "#.trim().to_string()).await.unwrap();
3734
3735        devcontainer_manifest.parse_nonremote_vars().unwrap();
3736
3737        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3738
3739        let files = test_dependencies.fs.files();
3740        let feature_dockerfile = files
3741            .iter()
3742            .find(|f| {
3743                f.file_name()
3744                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3745            })
3746            .expect("to be found");
3747        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3748        assert_eq!(
3749            &feature_dockerfile,
3750            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3751
3752FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3753
3754# Include lld linker to improve build times either by using environment variable
3755# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3756RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3757&& apt-get -y install clang lld \
3758&& apt-get autoremove -y && apt-get clean -y
3759
3760FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3761USER root
3762COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3763RUN chmod -R 0755 /tmp/build-features/
3764
3765FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3766
3767USER root
3768
3769RUN mkdir -p /tmp/dev-container-features
3770COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3771
3772RUN \
3773echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3774echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3775
3776
3777RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3778cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3779&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3780&& cd /tmp/dev-container-features/aws-cli_0 \
3781&& chmod +x ./devcontainer-features-install.sh \
3782&& ./devcontainer-features-install.sh \
3783&& rm -rf /tmp/dev-container-features/aws-cli_0
3784
3785RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3786cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3787&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3788&& cd /tmp/dev-container-features/docker-in-docker_1 \
3789&& chmod +x ./devcontainer-features-install.sh \
3790&& ./devcontainer-features-install.sh \
3791&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3792
3793
3794ARG _DEV_CONTAINERS_IMAGE_USER=root
3795USER $_DEV_CONTAINERS_IMAGE_USER
3796
3797# Ensure that /etc/profile does not clobber the existing path
3798RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3799
3800
3801ENV DOCKER_BUILDKIT=1
3802"#
3803        );
3804    }
3805
3806    #[cfg(not(target_os = "windows"))]
3807    #[gpui::test]
3808    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3809        cx.executor().allow_parking();
3810        env_logger::try_init().ok();
3811        let given_devcontainer_contents = r#"
3812        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3813        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3814        {
3815          "features": {
3816            "ghcr.io/devcontainers/features/aws-cli:1": {},
3817            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3818          },
3819          "name": "Rust and PostgreSQL",
3820          "dockerComposeFile": "docker-compose.yml",
3821          "service": "app",
3822          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3823
3824          // Features to add to the dev container. More info: https://containers.dev/features.
3825          // "features": {},
3826
3827          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3828          // "forwardPorts": [5432],
3829
3830          // Use 'postCreateCommand' to run commands after the container is created.
3831          // "postCreateCommand": "rustc --version",
3832
3833          // Configure tool-specific properties.
3834          // "customizations": {},
3835
3836          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3837          // "remoteUser": "root"
3838        }
3839        "#;
3840        let mut fake_docker = FakeDocker::new();
3841        fake_docker.set_podman(true);
3842        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3843            cx,
3844            FakeFs::new(cx.executor()),
3845            fake_http_client(),
3846            Arc::new(fake_docker),
3847            Arc::new(TestCommandRunner::new()),
3848            HashMap::new(),
3849            given_devcontainer_contents,
3850        )
3851        .await
3852        .unwrap();
3853
3854        test_dependencies
3855        .fs
3856        .atomic_write(
3857            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3858            r#"
3859version: '3.8'
3860
3861volumes:
3862postgres-data:
3863
3864services:
3865app:
3866build:
3867    context: .
3868    dockerfile: Dockerfile
3869env_file:
3870    # Ensure that the variables in .env match the same variables in devcontainer.json
3871    - .env
3872
3873volumes:
3874    - ../..:/workspaces:cached
3875
3876# Overrides default command so things don't shut down after the process ends.
3877command: sleep infinity
3878
3879# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3880network_mode: service:db
3881
3882# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3883# (Adding the "ports" property to this file will not forward from a Codespace.)
3884
3885db:
3886image: postgres:14.1
3887restart: unless-stopped
3888volumes:
3889    - postgres-data:/var/lib/postgresql/data
3890env_file:
3891    # Ensure that the variables in .env match the same variables in devcontainer.json
3892    - .env
3893
3894# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3895# (Adding the "ports" property to this file will not forward from a Codespace.)
3896                "#.trim().to_string(),
3897        )
3898        .await
3899        .unwrap();
3900
3901        test_dependencies.fs.atomic_write(
3902        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3903        r#"
3904FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3905
3906# Include lld linker to improve build times either by using environment variable
3907# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3908RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3909&& apt-get -y install clang lld \
3910&& apt-get autoremove -y && apt-get clean -y
3911        "#.trim().to_string()).await.unwrap();
3912
3913        devcontainer_manifest.parse_nonremote_vars().unwrap();
3914
3915        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3916
3917        let files = test_dependencies.fs.files();
3918
3919        let feature_dockerfile = files
3920            .iter()
3921            .find(|f| {
3922                f.file_name()
3923                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3924            })
3925            .expect("to be found");
3926        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3927        assert_eq!(
3928            &feature_dockerfile,
3929            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3930
3931FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3932
3933# Include lld linker to improve build times either by using environment variable
3934# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3935RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3936&& apt-get -y install clang lld \
3937&& apt-get autoremove -y && apt-get clean -y
3938
3939FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3940
3941FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3942USER root
3943COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3944RUN chmod -R 0755 /tmp/build-features/
3945
3946FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3947
3948USER root
3949
3950RUN mkdir -p /tmp/dev-container-features
3951COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3952
3953RUN \
3954echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3955echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3956
3957
3958COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3959RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3960&& cd /tmp/dev-container-features/aws-cli_0 \
3961&& chmod +x ./devcontainer-features-install.sh \
3962&& ./devcontainer-features-install.sh
3963
3964COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3965RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3966&& cd /tmp/dev-container-features/docker-in-docker_1 \
3967&& chmod +x ./devcontainer-features-install.sh \
3968&& ./devcontainer-features-install.sh
3969
3970
3971ARG _DEV_CONTAINERS_IMAGE_USER=root
3972USER $_DEV_CONTAINERS_IMAGE_USER
3973"#
3974        );
3975
3976        let uid_dockerfile = files
3977            .iter()
3978            .find(|f| {
3979                f.file_name()
3980                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3981            })
3982            .expect("to be found");
3983        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3984
3985        assert_eq!(
3986            &uid_dockerfile,
3987            r#"ARG BASE_IMAGE
3988FROM $BASE_IMAGE
3989
3990USER root
3991
3992ARG REMOTE_USER
3993ARG NEW_UID
3994ARG NEW_GID
3995SHELL ["/bin/sh", "-c"]
3996RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3997	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3998	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3999	if [ -z "$OLD_UID" ]; then \
4000		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4001	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4002		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4003	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4004		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4005	else \
4006		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4007			FREE_GID=65532; \
4008			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4009			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4010			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4011		fi; \
4012		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4013		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4014		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4015			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4016		fi; \
4017		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4018	fi;
4019
4020ARG IMAGE_USER
4021USER $IMAGE_USER
4022
4023# Ensure that /etc/profile does not clobber the existing path
4024RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4025
4026
4027ENV DOCKER_BUILDKIT=1
4028"#
4029        );
4030    }
4031
4032    #[gpui::test]
4033    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4034        cx.executor().allow_parking();
4035        env_logger::try_init().ok();
4036        let given_devcontainer_contents = r#"
4037            /*---------------------------------------------------------------------------------------------
4038             *  Copyright (c) Microsoft Corporation. All rights reserved.
4039             *  Licensed under the MIT License. See License.txt in the project root for license information.
4040             *--------------------------------------------------------------------------------------------*/
4041            {
4042              "name": "cli-${devcontainerId}",
4043              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4044              "build": {
4045                "dockerfile": "Dockerfile",
4046                "args": {
4047                  "VARIANT": "18-bookworm",
4048                  "FOO": "bar",
4049                },
4050              },
4051              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4052              "workspaceFolder": "/workspace2",
4053              "mounts": [
4054                // Keep command history across instances
4055                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4056              ],
4057
4058              "forwardPorts": [
4059                8082,
4060                8083,
4061              ],
4062              "appPort": "8084",
4063              "updateRemoteUserUID": false,
4064
4065              "containerEnv": {
4066                "VARIABLE_VALUE": "value",
4067              },
4068
4069              "initializeCommand": "touch IAM.md",
4070
4071              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4072
4073              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4074
4075              "postCreateCommand": {
4076                "yarn": "yarn install",
4077                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4078              },
4079
4080              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4081
4082              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4083
4084              "remoteUser": "node",
4085
4086              "remoteEnv": {
4087                "PATH": "${containerEnv:PATH}:/some/other/path",
4088                "OTHER_ENV": "other_env_value"
4089              },
4090
4091              "features": {
4092                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4093                  "moby": false,
4094                },
4095                "ghcr.io/devcontainers/features/go:1": {},
4096              },
4097
4098              "customizations": {
4099                "vscode": {
4100                  "extensions": [
4101                    "dbaeumer.vscode-eslint",
4102                    "GitHub.vscode-pull-request-github",
4103                  ],
4104                },
4105                "zed": {
4106                  "extensions": ["vue", "ruby"],
4107                },
4108                "codespaces": {
4109                  "repositories": {
4110                    "devcontainers/features": {
4111                      "permissions": {
4112                        "contents": "write",
4113                        "workflows": "write",
4114                      },
4115                    },
4116                  },
4117                },
4118              },
4119            }
4120            "#;
4121
4122        let (test_dependencies, mut devcontainer_manifest) =
4123            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4124                .await
4125                .unwrap();
4126
4127        test_dependencies
4128            .fs
4129            .atomic_write(
4130                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4131                r#"
4132#  Copyright (c) Microsoft Corporation. All rights reserved.
4133#  Licensed under the MIT License. See License.txt in the project root for license information.
4134ARG VARIANT="16-bullseye"
4135FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4136
4137RUN mkdir -p /workspaces && chown node:node /workspaces
4138
4139ARG USERNAME=node
4140USER $USERNAME
4141
4142# Save command line history
4143RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4144&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4145&& mkdir -p /home/$USERNAME/commandhistory \
4146&& touch /home/$USERNAME/commandhistory/.bash_history \
4147&& chown -R $USERNAME /home/$USERNAME/commandhistory
4148                    "#.trim().to_string(),
4149            )
4150            .await
4151            .unwrap();
4152
4153        devcontainer_manifest.parse_nonremote_vars().unwrap();
4154
4155        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4156
4157        assert_eq!(
4158            devcontainer_up.extension_ids,
4159            vec!["vue".to_string(), "ruby".to_string()]
4160        );
4161
4162        let files = test_dependencies.fs.files();
4163        let feature_dockerfile = files
4164            .iter()
4165            .find(|f| {
4166                f.file_name()
4167                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4168            })
4169            .expect("to be found");
4170        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4171        assert_eq!(
4172            &feature_dockerfile,
4173            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4174
4175#  Copyright (c) Microsoft Corporation. All rights reserved.
4176#  Licensed under the MIT License. See License.txt in the project root for license information.
4177ARG VARIANT="16-bullseye"
4178FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4179
4180RUN mkdir -p /workspaces && chown node:node /workspaces
4181
4182ARG USERNAME=node
4183USER $USERNAME
4184
4185# Save command line history
4186RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4187&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4188&& mkdir -p /home/$USERNAME/commandhistory \
4189&& touch /home/$USERNAME/commandhistory/.bash_history \
4190&& chown -R $USERNAME /home/$USERNAME/commandhistory
4191
4192FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4193USER root
4194COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4195RUN chmod -R 0755 /tmp/build-features/
4196
4197FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4198
4199USER root
4200
4201RUN mkdir -p /tmp/dev-container-features
4202COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4203
4204RUN \
4205echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4206echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4207
4208
4209RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4210cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4211&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4212&& cd /tmp/dev-container-features/docker-in-docker_0 \
4213&& chmod +x ./devcontainer-features-install.sh \
4214&& ./devcontainer-features-install.sh \
4215&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4216
4217RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4218cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4219&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4220&& cd /tmp/dev-container-features/go_1 \
4221&& chmod +x ./devcontainer-features-install.sh \
4222&& ./devcontainer-features-install.sh \
4223&& rm -rf /tmp/dev-container-features/go_1
4224
4225
4226ARG _DEV_CONTAINERS_IMAGE_USER=root
4227USER $_DEV_CONTAINERS_IMAGE_USER
4228
4229# Ensure that /etc/profile does not clobber the existing path
4230RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4231
4232ENV DOCKER_BUILDKIT=1
4233
4234ENV GOPATH=/go
4235ENV GOROOT=/usr/local/go
4236ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4237ENV VARIABLE_VALUE=value
4238"#
4239        );
4240
4241        let golang_install_wrapper = files
4242            .iter()
4243            .find(|f| {
4244                f.file_name()
4245                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4246                    && f.to_str().is_some_and(|s| s.contains("go_"))
4247            })
4248            .expect("to be found");
4249        let golang_install_wrapper = test_dependencies
4250            .fs
4251            .load(golang_install_wrapper)
4252            .await
4253            .unwrap();
4254        assert_eq!(
4255            &golang_install_wrapper,
4256            r#"#!/bin/sh
4257set -e
4258
4259on_exit () {
4260    [ $? -eq 0 ] && exit
4261    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4262}
4263
4264trap on_exit EXIT
4265
4266echo ===========================================================================
4267echo 'Feature       : go'
4268echo 'Id            : ghcr.io/devcontainers/features/go:1'
4269echo 'Options       :'
4270echo '    GOLANGCILINTVERSION=latest
4271    VERSION=latest'
4272echo ===========================================================================
4273
4274set -a
4275. ../devcontainer-features.builtin.env
4276. ./devcontainer-features.env
4277set +a
4278
4279chmod +x ./install.sh
4280./install.sh
4281"#
4282        );
4283
4284        let docker_commands = test_dependencies
4285            .command_runner
4286            .commands_by_program("docker");
4287
4288        let docker_run_command = docker_commands
4289            .iter()
4290            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4291
4292        assert!(docker_run_command.is_some());
4293
4294        let docker_exec_commands = test_dependencies
4295            .docker
4296            .exec_commands_recorded
4297            .lock()
4298            .unwrap();
4299
4300        assert!(docker_exec_commands.iter().all(|exec| {
4301            exec.env
4302                == HashMap::from([
4303                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4304                    (
4305                        "PATH".to_string(),
4306                        "/initial/path:/some/other/path".to_string(),
4307                    ),
4308                ])
4309        }))
4310    }
4311
4312    #[cfg(not(target_os = "windows"))]
4313    #[gpui::test]
4314    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4315        cx.executor().allow_parking();
4316        env_logger::try_init().ok();
4317        let given_devcontainer_contents = r#"
4318            {
4319              "name": "cli-${devcontainerId}",
4320              "image": "test_image:latest",
4321            }
4322            "#;
4323
4324        let (test_dependencies, mut devcontainer_manifest) =
4325            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4326                .await
4327                .unwrap();
4328
4329        devcontainer_manifest.parse_nonremote_vars().unwrap();
4330
4331        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4332
4333        let files = test_dependencies.fs.files();
4334        let uid_dockerfile = files
4335            .iter()
4336            .find(|f| {
4337                f.file_name()
4338                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4339            })
4340            .expect("to be found");
4341        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4342
4343        assert_eq!(
4344            &uid_dockerfile,
4345            r#"ARG BASE_IMAGE
4346FROM $BASE_IMAGE
4347
4348USER root
4349
4350ARG REMOTE_USER
4351ARG NEW_UID
4352ARG NEW_GID
4353SHELL ["/bin/sh", "-c"]
4354RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4355	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4356	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4357	if [ -z "$OLD_UID" ]; then \
4358		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4359	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4360		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4361	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4362		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4363	else \
4364		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4365			FREE_GID=65532; \
4366			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4367			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4368			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4369		fi; \
4370		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4371		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4372		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4373			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4374		fi; \
4375		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4376	fi;
4377
4378ARG IMAGE_USER
4379USER $IMAGE_USER
4380
4381# Ensure that /etc/profile does not clobber the existing path
4382RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4383"#
4384        );
4385    }
4386
4387    #[cfg(not(target_os = "windows"))]
4388    #[gpui::test]
4389    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4390        cx.executor().allow_parking();
4391        env_logger::try_init().ok();
4392        let given_devcontainer_contents = r#"
4393            {
4394              "name": "cli-${devcontainerId}",
4395              "dockerComposeFile": "docker-compose-plain.yml",
4396              "service": "app",
4397            }
4398            "#;
4399
4400        let (test_dependencies, mut devcontainer_manifest) =
4401            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4402                .await
4403                .unwrap();
4404
4405        test_dependencies
4406            .fs
4407            .atomic_write(
4408                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4409                r#"
4410services:
4411    app:
4412        image: test_image:latest
4413        command: sleep infinity
4414        volumes:
4415            - ..:/workspace:cached
4416                "#
4417                .trim()
4418                .to_string(),
4419            )
4420            .await
4421            .unwrap();
4422
4423        devcontainer_manifest.parse_nonremote_vars().unwrap();
4424
4425        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4426
4427        let files = test_dependencies.fs.files();
4428        let uid_dockerfile = files
4429            .iter()
4430            .find(|f| {
4431                f.file_name()
4432                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4433            })
4434            .expect("to be found");
4435        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4436
4437        assert_eq!(
4438            &uid_dockerfile,
4439            r#"ARG BASE_IMAGE
4440FROM $BASE_IMAGE
4441
4442USER root
4443
4444ARG REMOTE_USER
4445ARG NEW_UID
4446ARG NEW_GID
4447SHELL ["/bin/sh", "-c"]
4448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4449	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4450	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4451	if [ -z "$OLD_UID" ]; then \
4452		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4453	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4454		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4455	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4456		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4457	else \
4458		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4459			FREE_GID=65532; \
4460			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4461			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4462			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4463		fi; \
4464		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4465		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4466		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4467			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4468		fi; \
4469		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4470	fi;
4471
4472ARG IMAGE_USER
4473USER $IMAGE_USER
4474
4475# Ensure that /etc/profile does not clobber the existing path
4476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4477"#
4478        );
4479    }
4480
4481    pub(crate) struct RecordedExecCommand {
4482        pub(crate) _container_id: String,
4483        pub(crate) _remote_folder: String,
4484        pub(crate) _user: String,
4485        pub(crate) env: HashMap<String, String>,
4486        pub(crate) _inner_command: Command,
4487    }
4488
4489    pub(crate) struct FakeDocker {
4490        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4491        podman: bool,
4492    }
4493
4494    impl FakeDocker {
4495        pub(crate) fn new() -> Self {
4496            Self {
4497                podman: false,
4498                exec_commands_recorded: Mutex::new(Vec::new()),
4499            }
4500        }
4501        #[cfg(not(target_os = "windows"))]
4502        fn set_podman(&mut self, podman: bool) {
4503            self.podman = podman;
4504        }
4505    }
4506
4507    #[async_trait]
4508    impl DockerClient for FakeDocker {
4509        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4510            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4511                return Ok(DockerInspect {
4512                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4513                        .to_string(),
4514                    config: DockerInspectConfig {
4515                        labels: DockerConfigLabels {
4516                            metadata: Some(vec![HashMap::from([(
4517                                "remoteUser".to_string(),
4518                                Value::String("node".to_string()),
4519                            )])]),
4520                        },
4521                        env: Vec::new(),
4522                        image_user: Some("root".to_string()),
4523                    },
4524                    mounts: None,
4525                    state: None,
4526                });
4527            }
4528            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4529                return Ok(DockerInspect {
4530                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4531                        .to_string(),
4532                    config: DockerInspectConfig {
4533                        labels: DockerConfigLabels {
4534                            metadata: Some(vec![HashMap::from([(
4535                                "remoteUser".to_string(),
4536                                Value::String("vscode".to_string()),
4537                            )])]),
4538                        },
4539                        image_user: Some("root".to_string()),
4540                        env: Vec::new(),
4541                    },
4542                    mounts: None,
4543                    state: None,
4544                });
4545            }
4546            if id.starts_with("cli_") {
4547                return Ok(DockerInspect {
4548                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4549                        .to_string(),
4550                    config: DockerInspectConfig {
4551                        labels: DockerConfigLabels {
4552                            metadata: Some(vec![HashMap::from([(
4553                                "remoteUser".to_string(),
4554                                Value::String("node".to_string()),
4555                            )])]),
4556                        },
4557                        image_user: Some("root".to_string()),
4558                        env: vec!["PATH=/initial/path".to_string()],
4559                    },
4560                    mounts: None,
4561                    state: None,
4562                });
4563            }
4564            if id == "found_docker_ps" {
4565                return Ok(DockerInspect {
4566                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4567                        .to_string(),
4568                    config: DockerInspectConfig {
4569                        labels: DockerConfigLabels {
4570                            metadata: Some(vec![HashMap::from([(
4571                                "remoteUser".to_string(),
4572                                Value::String("node".to_string()),
4573                            )])]),
4574                        },
4575                        image_user: Some("root".to_string()),
4576                        env: vec!["PATH=/initial/path".to_string()],
4577                    },
4578                    mounts: Some(vec![DockerInspectMount {
4579                        source: "/path/to/local/project".to_string(),
4580                        destination: "/workspaces/project".to_string(),
4581                    }]),
4582                    state: None,
4583                });
4584            }
4585            if id.starts_with("rust_a-") {
4586                return Ok(DockerInspect {
4587                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4588                        .to_string(),
4589                    config: DockerInspectConfig {
4590                        labels: DockerConfigLabels {
4591                            metadata: Some(vec![HashMap::from([(
4592                                "remoteUser".to_string(),
4593                                Value::String("vscode".to_string()),
4594                            )])]),
4595                        },
4596                        image_user: Some("root".to_string()),
4597                        env: Vec::new(),
4598                    },
4599                    mounts: None,
4600                    state: None,
4601                });
4602            }
4603            if id == "test_image:latest" {
4604                return Ok(DockerInspect {
4605                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4606                        .to_string(),
4607                    config: DockerInspectConfig {
4608                        labels: DockerConfigLabels {
4609                            metadata: Some(vec![HashMap::from([(
4610                                "remoteUser".to_string(),
4611                                Value::String("node".to_string()),
4612                            )])]),
4613                        },
4614                        env: Vec::new(),
4615                        image_user: Some("root".to_string()),
4616                    },
4617                    mounts: None,
4618                    state: None,
4619                });
4620            }
4621
4622            Err(DevContainerError::DockerNotAvailable)
4623        }
4624        async fn get_docker_compose_config(
4625            &self,
4626            config_files: &Vec<PathBuf>,
4627        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4628            if config_files.len() == 1
4629                && config_files.get(0)
4630                    == Some(&PathBuf::from(
4631                        "/path/to/local/project/.devcontainer/docker-compose.yml",
4632                    ))
4633            {
4634                return Ok(Some(DockerComposeConfig {
4635                    name: None,
4636                    services: HashMap::from([
4637                        (
4638                            "app".to_string(),
4639                            DockerComposeService {
4640                                build: Some(DockerComposeServiceBuild {
4641                                    context: Some(".".to_string()),
4642                                    dockerfile: Some("Dockerfile".to_string()),
4643                                    args: None,
4644                                    additional_contexts: None,
4645                                }),
4646                                volumes: vec![MountDefinition {
4647                                    source: "../..".to_string(),
4648                                    target: "/workspaces".to_string(),
4649                                    mount_type: Some("bind".to_string()),
4650                                }],
4651                                network_mode: Some("service:db".to_string()),
4652                                ..Default::default()
4653                            },
4654                        ),
4655                        (
4656                            "db".to_string(),
4657                            DockerComposeService {
4658                                image: Some("postgres:14.1".to_string()),
4659                                volumes: vec![MountDefinition {
4660                                    source: "postgres-data".to_string(),
4661                                    target: "/var/lib/postgresql/data".to_string(),
4662                                    mount_type: Some("volume".to_string()),
4663                                }],
4664                                env_file: Some(vec![".env".to_string()]),
4665                                ..Default::default()
4666                            },
4667                        ),
4668                    ]),
4669                    volumes: HashMap::from([(
4670                        "postgres-data".to_string(),
4671                        DockerComposeVolume::default(),
4672                    )]),
4673                }));
4674            }
4675            if config_files.len() == 1
4676                && config_files.get(0)
4677                    == Some(&PathBuf::from(
4678                        "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4679                    ))
4680            {
4681                return Ok(Some(DockerComposeConfig {
4682                    name: None,
4683                    services: HashMap::from([(
4684                        "app".to_string(),
4685                        DockerComposeService {
4686                            image: Some("test_image:latest".to_string()),
4687                            command: vec!["sleep".to_string(), "infinity".to_string()],
4688                            ..Default::default()
4689                        },
4690                    )]),
4691                    ..Default::default()
4692                }));
4693            }
4694            Err(DevContainerError::DockerNotAvailable)
4695        }
4696        async fn docker_compose_build(
4697            &self,
4698            _config_files: &Vec<PathBuf>,
4699            _project_name: &str,
4700        ) -> Result<(), DevContainerError> {
4701            Ok(())
4702        }
4703        async fn run_docker_exec(
4704            &self,
4705            container_id: &str,
4706            remote_folder: &str,
4707            user: &str,
4708            env: &HashMap<String, String>,
4709            inner_command: Command,
4710        ) -> Result<(), DevContainerError> {
4711            let mut record = self
4712                .exec_commands_recorded
4713                .lock()
4714                .expect("should be available");
4715            record.push(RecordedExecCommand {
4716                _container_id: container_id.to_string(),
4717                _remote_folder: remote_folder.to_string(),
4718                _user: user.to_string(),
4719                env: env.clone(),
4720                _inner_command: inner_command,
4721            });
4722            Ok(())
4723        }
4724        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4725            Err(DevContainerError::DockerNotAvailable)
4726        }
4727        async fn find_process_by_filters(
4728            &self,
4729            _filters: Vec<String>,
4730        ) -> Result<Option<DockerPs>, DevContainerError> {
4731            Ok(Some(DockerPs {
4732                id: "found_docker_ps".to_string(),
4733            }))
4734        }
4735        fn supports_compose_buildkit(&self) -> bool {
4736            !self.podman
4737        }
4738        fn docker_cli(&self) -> String {
4739            if self.podman {
4740                "podman".to_string()
4741            } else {
4742                "docker".to_string()
4743            }
4744        }
4745    }
4746
4747    #[derive(Debug, Clone)]
4748    pub(crate) struct TestCommand {
4749        pub(crate) program: String,
4750        pub(crate) args: Vec<String>,
4751    }
4752
4753    pub(crate) struct TestCommandRunner {
4754        commands_recorded: Mutex<Vec<TestCommand>>,
4755    }
4756
4757    impl TestCommandRunner {
4758        fn new() -> Self {
4759            Self {
4760                commands_recorded: Mutex::new(Vec::new()),
4761            }
4762        }
4763
4764        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4765            let record = self.commands_recorded.lock().expect("poisoned");
4766            record
4767                .iter()
4768                .filter(|r| r.program == program)
4769                .map(|r| r.clone())
4770                .collect()
4771        }
4772    }
4773
4774    #[async_trait]
4775    impl CommandRunner for TestCommandRunner {
4776        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4777            let mut record = self.commands_recorded.lock().expect("poisoned");
4778
4779            record.push(TestCommand {
4780                program: command.get_program().display().to_string(),
4781                args: command
4782                    .get_args()
4783                    .map(|a| a.display().to_string())
4784                    .collect(),
4785            });
4786
4787            Ok(Output {
4788                status: ExitStatus::default(),
4789                stdout: vec![],
4790                stderr: vec![],
4791            })
4792        }
4793    }
4794
4795    fn fake_http_client() -> Arc<dyn HttpClient> {
4796        FakeHttpClient::create(|request| async move {
4797            let (parts, _body) = request.into_parts();
4798            if parts.uri.path() == "/token" {
4799                let token_response = TokenResponse {
4800                    token: "token".to_string(),
4801                };
4802                return Ok(http::Response::builder()
4803                    .status(200)
4804                    .body(http_client::AsyncBody::from(
4805                        serde_json_lenient::to_string(&token_response).unwrap(),
4806                    ))
4807                    .unwrap());
4808            }
4809
4810            // OCI specific things
4811            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4812                let response = r#"
4813                    {
4814                        "schemaVersion": 2,
4815                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
4816                        "config": {
4817                            "mediaType": "application/vnd.devcontainers",
4818                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4819                            "size": 2
4820                        },
4821                        "layers": [
4822                            {
4823                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4824                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4825                                "size": 59392,
4826                                "annotations": {
4827                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4828                                }
4829                            }
4830                        ],
4831                        "annotations": {
4832                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4833                            "com.github.package.type": "devcontainer_feature"
4834                        }
4835                    }
4836                    "#;
4837                return Ok(http::Response::builder()
4838                    .status(200)
4839                    .body(http_client::AsyncBody::from(response))
4840                    .unwrap());
4841            }
4842
4843            if parts.uri.path()
4844                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4845            {
4846                let response = build_tarball(vec![
4847                    ("./NOTES.md", r#"
4848                        ## Limitations
4849
4850                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4851                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4852                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4853                          ```
4854                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4855                          ```
4856                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4857
4858
4859                        ## OS Support
4860
4861                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4862
4863                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4864
4865                        `bash` is required to execute the `install.sh` script."#),
4866                    ("./README.md", r#"
4867                        # Docker (Docker-in-Docker) (docker-in-docker)
4868
4869                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4870
4871                        ## Example Usage
4872
4873                        ```json
4874                        "features": {
4875                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4876                        }
4877                        ```
4878
4879                        ## Options
4880
4881                        | Options Id | Description | Type | Default Value |
4882                        |-----|-----|-----|-----|
4883                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4884                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4885                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4886                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4887                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4888                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4889                        | installDockerBuildx | Install Docker Buildx | boolean | true |
4890                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4891                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4892
4893                        ## Customizations
4894
4895                        ### VS Code Extensions
4896
4897                        - `ms-azuretools.vscode-containers`
4898
4899                        ## Limitations
4900
4901                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4902                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4903                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4904                          ```
4905                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4906                          ```
4907                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4908
4909
4910                        ## OS Support
4911
4912                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4913
4914                        `bash` is required to execute the `install.sh` script.
4915
4916
4917                        ---
4918
4919                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
4920                    ("./devcontainer-feature.json", r#"
4921                        {
4922                          "id": "docker-in-docker",
4923                          "version": "2.16.1",
4924                          "name": "Docker (Docker-in-Docker)",
4925                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4926                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4927                          "options": {
4928                            "version": {
4929                              "type": "string",
4930                              "proposals": [
4931                                "latest",
4932                                "none",
4933                                "20.10"
4934                              ],
4935                              "default": "latest",
4936                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4937                            },
4938                            "moby": {
4939                              "type": "boolean",
4940                              "default": true,
4941                              "description": "Install OSS Moby build instead of Docker CE"
4942                            },
4943                            "mobyBuildxVersion": {
4944                              "type": "string",
4945                              "default": "latest",
4946                              "description": "Install a specific version of moby-buildx when using Moby"
4947                            },
4948                            "dockerDashComposeVersion": {
4949                              "type": "string",
4950                              "enum": [
4951                                "none",
4952                                "v1",
4953                                "v2"
4954                              ],
4955                              "default": "v2",
4956                              "description": "Default version of Docker Compose (v1, v2 or none)"
4957                            },
4958                            "azureDnsAutoDetection": {
4959                              "type": "boolean",
4960                              "default": true,
4961                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4962                            },
4963                            "dockerDefaultAddressPool": {
4964                              "type": "string",
4965                              "default": "",
4966                              "proposals": [],
4967                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4968                            },
4969                            "installDockerBuildx": {
4970                              "type": "boolean",
4971                              "default": true,
4972                              "description": "Install Docker Buildx"
4973                            },
4974                            "installDockerComposeSwitch": {
4975                              "type": "boolean",
4976                              "default": false,
4977                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4978                            },
4979                            "disableIp6tables": {
4980                              "type": "boolean",
4981                              "default": false,
4982                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4983                            }
4984                          },
4985                          "entrypoint": "/usr/local/share/docker-init.sh",
4986                          "privileged": true,
4987                          "containerEnv": {
4988                            "DOCKER_BUILDKIT": "1"
4989                          },
4990                          "customizations": {
4991                            "vscode": {
4992                              "extensions": [
4993                                "ms-azuretools.vscode-containers"
4994                              ],
4995                              "settings": {
4996                                "github.copilot.chat.codeGeneration.instructions": [
4997                                  {
4998                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
4999                                  }
5000                                ]
5001                              }
5002                            }
5003                          },
5004                          "mounts": [
5005                            {
5006                              "source": "dind-var-lib-docker-${devcontainerId}",
5007                              "target": "/var/lib/docker",
5008                              "type": "volume"
5009                            }
5010                          ],
5011                          "installsAfter": [
5012                            "ghcr.io/devcontainers/features/common-utils"
5013                          ]
5014                        }"#),
5015                    ("./install.sh", r#"
5016                    #!/usr/bin/env bash
5017                    #-------------------------------------------------------------------------------------------------------------
5018                    # Copyright (c) Microsoft Corporation. All rights reserved.
5019                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5020                    #-------------------------------------------------------------------------------------------------------------
5021                    #
5022                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5023                    # Maintainer: The Dev Container spec maintainers
5024
5025
5026                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5027                    USE_MOBY="${MOBY:-"true"}"
5028                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5029                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5030                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5031                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5032                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5033                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5034                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5035                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5036                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5037                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5038                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5039                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5040
5041                    # Default: Exit on any failure.
5042                    set -e
5043
5044                    # Clean up
5045                    rm -rf /var/lib/apt/lists/*
5046
5047                    # Setup STDERR.
5048                    err() {
5049                        echo "(!) $*" >&2
5050                    }
5051
5052                    if [ "$(id -u)" -ne 0 ]; then
5053                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5054                        exit 1
5055                    fi
5056
5057                    ###################
5058                    # Helper Functions
5059                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5060                    ###################
5061
5062                    # Determine the appropriate non-root user
5063                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5064                        USERNAME=""
5065                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5066                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5067                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5068                                USERNAME=${CURRENT_USER}
5069                                break
5070                            fi
5071                        done
5072                        if [ "${USERNAME}" = "" ]; then
5073                            USERNAME=root
5074                        fi
5075                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5076                        USERNAME=root
5077                    fi
5078
5079                    # Package manager update function
5080                    pkg_mgr_update() {
5081                        case ${ADJUSTED_ID} in
5082                            debian)
5083                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5084                                    echo "Running apt-get update..."
5085                                    apt-get update -y
5086                                fi
5087                                ;;
5088                            rhel)
5089                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5090                                    cache_check_dir="/var/cache/yum"
5091                                else
5092                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5093                                fi
5094                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5095                                    echo "Running ${PKG_MGR_CMD} makecache ..."
5096                                    ${PKG_MGR_CMD} makecache
5097                                fi
5098                                ;;
5099                        esac
5100                    }
5101
5102                    # Checks if packages are installed and installs them if not
5103                    check_packages() {
5104                        case ${ADJUSTED_ID} in
5105                            debian)
5106                                if ! dpkg -s "$@" > /dev/null 2>&1; then
5107                                    pkg_mgr_update
5108                                    apt-get -y install --no-install-recommends "$@"
5109                                fi
5110                                ;;
5111                            rhel)
5112                                if ! rpm -q "$@" > /dev/null 2>&1; then
5113                                    pkg_mgr_update
5114                                    ${PKG_MGR_CMD} -y install "$@"
5115                                fi
5116                                ;;
5117                        esac
5118                    }
5119
5120                    # Figure out correct version of a three part version number is not passed
5121                    find_version_from_git_tags() {
5122                        local variable_name=$1
5123                        local requested_version=${!variable_name}
5124                        if [ "${requested_version}" = "none" ]; then return; fi
5125                        local repository=$2
5126                        local prefix=${3:-"tags/v"}
5127                        local separator=${4:-"."}
5128                        local last_part_optional=${5:-"false"}
5129                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5130                            local escaped_separator=${separator//./\\.}
5131                            local last_part
5132                            if [ "${last_part_optional}" = "true" ]; then
5133                                last_part="(${escaped_separator}[0-9]+)?"
5134                            else
5135                                last_part="${escaped_separator}[0-9]+"
5136                            fi
5137                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5138                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5139                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5140                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5141                            else
5142                                set +e
5143                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5144                                set -e
5145                            fi
5146                        fi
5147                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5148                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5149                            exit 1
5150                        fi
5151                        echo "${variable_name}=${!variable_name}"
5152                    }
5153
5154                    # Use semver logic to decrement a version number then look for the closest match
5155                    find_prev_version_from_git_tags() {
5156                        local variable_name=$1
5157                        local current_version=${!variable_name}
5158                        local repository=$2
5159                        # Normally a "v" is used before the version number, but support alternate cases
5160                        local prefix=${3:-"tags/v"}
5161                        # Some repositories use "_" instead of "." for version number part separation, support that
5162                        local separator=${4:-"."}
5163                        # Some tools release versions that omit the last digit (e.g. go)
5164                        local last_part_optional=${5:-"false"}
5165                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5166                        local version_suffix_regex=$6
5167                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5168                        set +e
5169                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5170                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5171                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5172
5173                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5174                                ((major=major-1))
5175                                declare -g ${variable_name}="${major}"
5176                                # Look for latest version from previous major release
5177                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5178                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
5179                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5180                                ((minor=minor-1))
5181                                declare -g ${variable_name}="${major}.${minor}"
5182                                # Look for latest version from previous minor release
5183                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5184                            else
5185                                ((breakfix=breakfix-1))
5186                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5187                                    declare -g ${variable_name}="${major}.${minor}"
5188                                else
5189                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5190                                fi
5191                            fi
5192                        set -e
5193                    }
5194
5195                    # Function to fetch the version released prior to the latest version
5196                    get_previous_version() {
5197                        local url=$1
5198                        local repo_url=$2
5199                        local variable_name=$3
5200                        prev_version=${!variable_name}
5201
5202                        output=$(curl -s "$repo_url");
5203                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5204                          message=$(echo "$output" | jq -r '.message')
5205
5206                          if [[ $message == "API rate limit exceeded"* ]]; then
5207                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5208                                echo -e "\nAttempting to find latest version using GitHub tags."
5209                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
5210                                declare -g ${variable_name}="${prev_version}"
5211                           fi
5212                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5213                            echo -e "\nAttempting to find latest version using GitHub Api."
5214                            version=$(echo "$output" | jq -r '.[1].tag_name')
5215                            declare -g ${variable_name}="${version#v}"
5216                        fi
5217                        echo "${variable_name}=${!variable_name}"
5218                    }
5219
5220                    get_github_api_repo_url() {
5221                        local url=$1
5222                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5223                    }
5224
5225                    ###########################################
5226                    # Start docker-in-docker installation
5227                    ###########################################
5228
5229                    # Ensure apt is in non-interactive to avoid prompts
5230                    export DEBIAN_FRONTEND=noninteractive
5231
5232                    # Source /etc/os-release to get OS info
5233                    . /etc/os-release
5234
5235                    # Determine adjusted ID and package manager
5236                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5237                        ADJUSTED_ID="debian"
5238                        PKG_MGR_CMD="apt-get"
5239                        # Use dpkg for Debian-based systems
5240                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5241                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5242                        ADJUSTED_ID="rhel"
5243                        # Determine the appropriate package manager for RHEL-based systems
5244                        for pkg_mgr in tdnf dnf microdnf yum; do
5245                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
5246                                PKG_MGR_CMD="$pkg_mgr"
5247                                break
5248                            fi
5249                        done
5250
5251                        if [ -z "${PKG_MGR_CMD}" ]; then
5252                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5253                            exit 1
5254                        fi
5255
5256                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5257                    else
5258                        err "Linux distro ${ID} not supported."
5259                        exit 1
5260                    fi
5261
5262                    # Azure Linux specific setup
5263                    if [ "${ID}" = "azurelinux" ]; then
5264                        VERSION_CODENAME="azurelinux${VERSION_ID}"
5265                    fi
5266
5267                    # Prevent attempting to install Moby on Debian trixie (packages removed)
5268                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5269                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5270                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5271                        exit 1
5272                    fi
5273
5274                    # Check if distro is supported
5275                    if [ "${USE_MOBY}" = "true" ]; then
5276                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5277                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5278                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5279                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5280                                exit 1
5281                            fi
5282                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
5283                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5284                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5285                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5286                            else
5287                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5288                            fi
5289                        fi
5290                    else
5291                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5292                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5293                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5294                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5295                                exit 1
5296                            fi
5297                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5298                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5299
5300                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5301                        fi
5302                    fi
5303
5304                    # Install base dependencies
5305                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5306                    case ${ADJUSTED_ID} in
5307                        debian)
5308                            check_packages apt-transport-https $base_packages dirmngr
5309                            ;;
5310                        rhel)
5311                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
5312
5313                            ;;
5314                    esac
5315
5316                    # Install git if not already present
5317                    if ! command -v git >/dev/null 2>&1; then
5318                        check_packages git
5319                    fi
5320
5321                    # Update CA certificates to ensure HTTPS connections work properly
5322                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5323                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5324                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5325                        update-ca-certificates
5326                    fi
5327
5328                    # Swap to legacy iptables for compatibility (Debian only)
5329                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5330                        update-alternatives --set iptables /usr/sbin/iptables-legacy
5331                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5332                    fi
5333
5334                    # Set up the necessary repositories
5335                    if [ "${USE_MOBY}" = "true" ]; then
5336                        # Name of open source engine/cli
5337                        engine_package_name="moby-engine"
5338                        cli_package_name="moby-cli"
5339
5340                        case ${ADJUSTED_ID} in
5341                            debian)
5342                                # Import key safely and import Microsoft apt repo
5343                                {
5344                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5345                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5346                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5347                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5348                                ;;
5349                            rhel)
5350                                echo "(*) ${ID} detected - checking for Moby packages..."
5351
5352                                # Check if moby packages are available in default repos
5353                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5354                                    echo "(*) Using built-in ${ID} Moby packages"
5355                                else
5356                                    case "${ID}" in
5357                                        azurelinux)
5358                                            echo "(*) Moby packages not found in Azure Linux repositories"
5359                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5360                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5361                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5362                                            exit 1
5363                                            ;;
5364                                        mariner)
5365                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
5366                                            # Add Microsoft repository if packages aren't available locally
5367                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5368                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
5369                    [microsoft]
5370                    name=Microsoft Repository
5371                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5372                    enabled=1
5373                    gpgcheck=1
5374                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5375                    EOF
5376                                    # Verify packages are available after adding repo
5377                                    pkg_mgr_update
5378                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5379                                        echo "(*) Moby packages not found in Microsoft repository either"
5380                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5381                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5382                                        exit 1
5383                                    fi
5384                                    ;;
5385                                *)
5386                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5387                                    exit 1
5388                                    ;;
5389                                esac
5390                            fi
5391                            ;;
5392                        esac
5393                    else
5394                        # Name of licensed engine/cli
5395                        engine_package_name="docker-ce"
5396                        cli_package_name="docker-ce-cli"
5397                        case ${ADJUSTED_ID} in
5398                            debian)
5399                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5400                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5401                                ;;
5402                            rhel)
5403                                # Docker CE repository setup for RHEL-based systems
5404                                setup_docker_ce_repo() {
5405                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5406                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
5407                    [docker-ce-stable]
5408                    name=Docker CE Stable
5409                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5410                    enabled=1
5411                    gpgcheck=1
5412                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5413                    skip_if_unavailable=1
5414                    module_hotfixes=1
5415                    EOF
5416                                }
5417                                install_azure_linux_deps() {
5418                                    echo "(*) Installing device-mapper libraries for Docker CE..."
5419                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5420                                    echo "(*) Installing additional Docker CE dependencies..."
5421                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5422                                        echo "(*) Some optional dependencies could not be installed, continuing..."
5423                                    }
5424                                }
5425                                setup_selinux_context() {
5426                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5427                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
5428                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5429                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5430                                    fi
5431                                }
5432
5433                                # Special handling for RHEL Docker CE installation
5434                                case "${ID}" in
5435                                    azurelinux|mariner)
5436                                        echo "(*) ${ID} detected"
5437                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5438                                        echo "(*) Setting up Docker CE repository..."
5439
5440                                        setup_docker_ce_repo
5441                                        install_azure_linux_deps
5442
5443                                        if [ "${USE_MOBY}" != "true" ]; then
5444                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5445                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5446                                            setup_selinux_context
5447                                        else
5448                                            echo "(*) Using Moby - container-selinux not required"
5449                                        fi
5450                                        ;;
5451                                    *)
5452                                        # Standard RHEL/CentOS/Fedora approach
5453                                        if command -v dnf >/dev/null 2>&1; then
5454                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5455                                        elif command -v yum-config-manager >/dev/null 2>&1; then
5456                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5457                                        else
5458                                            # Manual fallback
5459                                            setup_docker_ce_repo
5460                                fi
5461                                ;;
5462                            esac
5463                            ;;
5464                        esac
5465                    fi
5466
5467                    # Refresh package database
5468                    case ${ADJUSTED_ID} in
5469                        debian)
5470                            apt-get update
5471                            ;;
5472                        rhel)
5473                            pkg_mgr_update
5474                            ;;
5475                    esac
5476
5477                    # Soft version matching
5478                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5479                        # Empty, meaning grab whatever "latest" is in apt repo
5480                        engine_version_suffix=""
5481                        cli_version_suffix=""
5482                    else
5483                        case ${ADJUSTED_ID} in
5484                            debian)
5485                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5486                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5487                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5488                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5489                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5490                        set +e # Don't exit if finding version fails - will handle gracefully
5491                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5492                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5493                        set -e
5494                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5495                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5496                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5497                            exit 1
5498                        fi
5499                        ;;
5500                    rhel)
5501                         # For RHEL-based systems, use dnf/yum to find versions
5502                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
5503                                set +e # Don't exit if finding version fails - will handle gracefully
5504                                    if [ "${USE_MOBY}" = "true" ]; then
5505                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5506                                    else
5507                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5508                                    fi
5509                                set -e
5510                                if [ -n "${available_versions}" ]; then
5511                                    engine_version_suffix="-${available_versions}"
5512                                    cli_version_suffix="-${available_versions}"
5513                                else
5514                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5515                                    engine_version_suffix=""
5516                                    cli_version_suffix=""
5517                                fi
5518                                ;;
5519                        esac
5520                    fi
5521
5522                    # Version matching for moby-buildx
5523                    if [ "${USE_MOBY}" = "true" ]; then
5524                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5525                            # Empty, meaning grab whatever "latest" is in apt repo
5526                            buildx_version_suffix=""
5527                        else
5528                            case ${ADJUSTED_ID} in
5529                                debian)
5530                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5531                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5532                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5533                            set +e
5534                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5535                            set -e
5536                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5537                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5538                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5539                                exit 1
5540                            fi
5541                            ;;
5542                                rhel)
5543                                    # For RHEL-based systems, try to find buildx version or use latest
5544                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5545                                    set +e
5546                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5547                                    set -e
5548                                    if [ -n "${available_buildx}" ]; then
5549                                        buildx_version_suffix="-${available_buildx}"
5550                                    else
5551                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5552                                        buildx_version_suffix=""
5553                                    fi
5554                                    ;;
5555                            esac
5556                            echo "buildx_version_suffix ${buildx_version_suffix}"
5557                        fi
5558                    fi
5559
5560                    # Install Docker / Moby CLI if not already installed
5561                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5562                        echo "Docker / Moby CLI and Engine already installed."
5563                    else
5564                            case ${ADJUSTED_ID} in
5565                            debian)
5566                                if [ "${USE_MOBY}" = "true" ]; then
5567                                    # Install engine
5568                                    set +e # Handle error gracefully
5569                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5570                                        exit_code=$?
5571                                    set -e
5572
5573                                    if [ ${exit_code} -ne 0 ]; then
5574                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5575                                        exit 1
5576                                    fi
5577
5578                                    # Install compose
5579                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5580                                else
5581                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5582                                    # Install compose
5583                                    apt-mark hold docker-ce docker-ce-cli
5584                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5585                                fi
5586                                ;;
5587                            rhel)
5588                                if [ "${USE_MOBY}" = "true" ]; then
5589                                    set +e # Handle error gracefully
5590                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5591                                        exit_code=$?
5592                                    set -e
5593
5594                                    if [ ${exit_code} -ne 0 ]; then
5595                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5596                                        exit 1
5597                                    fi
5598
5599                                    # Install compose
5600                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5601                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5602                                    fi
5603                                else
5604                                                   # Special handling for Azure Linux Docker CE installation
5605                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5606                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5607
5608                                        # Use rpm with --force and --nodeps for Azure Linux
5609                                        set +e  # Don't exit on error for this section
5610                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5611                                        install_result=$?
5612                                        set -e
5613
5614                                        if [ $install_result -ne 0 ]; then
5615                                            echo "(*) Standard installation failed, trying manual installation..."
5616
5617                                            echo "(*) Standard installation failed, trying manual installation..."
5618
5619                                            # Create directory for downloading packages
5620                                            mkdir -p /tmp/docker-ce-install
5621
5622                                            # Download packages manually using curl since tdnf doesn't support download
5623                                            echo "(*) Downloading Docker CE packages manually..."
5624
5625                                            # Get the repository baseurl
5626                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5627
5628                                            # Download packages directly
5629                                            cd /tmp/docker-ce-install
5630
5631                                            # Get package names with versions
5632                                            if [ -n "${cli_version_suffix}" ]; then
5633                                                docker_ce_version="${cli_version_suffix#-}"
5634                                                docker_cli_version="${engine_version_suffix#-}"
5635                                            else
5636                                                # Get latest version from repository
5637                                                docker_ce_version="latest"
5638                                            fi
5639
5640                                            echo "(*) Attempting to download Docker CE packages from repository..."
5641
5642                                            # Try to download latest packages if specific version fails
5643                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5644                                                # Fallback: try to get latest available version
5645                                                echo "(*) Specific version not found, trying latest..."
5646                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5647                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5648                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5649
5650                                                if [ -n "${latest_docker}" ]; then
5651                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5652                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5653                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5654                                                else
5655                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
5656                                                    echo "(*) Please check repository configuration or use 'moby': true"
5657                                                    exit 1
5658                                                fi
5659                                            fi
5660                                            # Install systemd libraries required by Docker CE
5661                                            echo "(*) Installing systemd libraries required by Docker CE..."
5662                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5663                                                echo "(*) WARNING: Could not install systemd libraries"
5664                                                echo "(*) Docker may fail to start without these"
5665                                            }
5666
5667                                            # Install with rpm --force --nodeps
5668                                            echo "(*) Installing Docker CE packages with dependency override..."
5669                                            rpm -Uvh --force --nodeps *.rpm
5670
5671                                            # Cleanup
5672                                            cd /
5673                                            rm -rf /tmp/docker-ce-install
5674
5675                                            echo "(*) Docker CE installation completed with dependency bypass"
5676                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5677                                        fi
5678                                    else
5679                                        # Standard installation for other RHEL-based systems
5680                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5681                                    fi
5682                                    # Install compose
5683                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5684                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5685                                    fi
5686                                fi
5687                                ;;
5688                        esac
5689                    fi
5690
5691                    echo "Finished installing docker / moby!"
5692
5693                    docker_home="/usr/libexec/docker"
5694                    cli_plugins_dir="${docker_home}/cli-plugins"
5695
5696                    # fallback for docker-compose
5697                    fallback_compose(){
5698                        local url=$1
5699                        local repo_url=$(get_github_api_repo_url "$url")
5700                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5701                        get_previous_version "${url}" "${repo_url}" compose_version
5702                        echo -e "\nAttempting to install v${compose_version}"
5703                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5704                    }
5705
5706                    # If 'docker-compose' command is to be included
5707                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5708                        case "${architecture}" in
5709                        amd64|x86_64) target_compose_arch=x86_64 ;;
5710                        arm64|aarch64) target_compose_arch=aarch64 ;;
5711                        *)
5712                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5713                            exit 1
5714                        esac
5715
5716                        docker_compose_path="/usr/local/bin/docker-compose"
5717                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5718                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5719                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
5720
5721                            if [ "${target_compose_arch}" = "x86_64" ]; then
5722                                echo "(*) Installing docker compose v1..."
5723                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5724                                chmod +x ${docker_compose_path}
5725
5726                                # Download the SHA256 checksum
5727                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5728                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5729                                sha256sum -c docker-compose.sha256sum --ignore-missing
5730                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5731                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5732                                exit 1
5733                            else
5734                                # Use pip to get a version that runs on this architecture
5735                                check_packages python3-minimal python3-pip libffi-dev python3-venv
5736                                echo "(*) Installing docker compose v1 via pip..."
5737                                export PYTHONUSERBASE=/usr/local
5738                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5739                            fi
5740                        else
5741                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5742                            docker_compose_url="https://github.com/docker/compose"
5743                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5744                            echo "(*) Installing docker-compose ${compose_version}..."
5745                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5746                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5747                                     fallback_compose "$docker_compose_url"
5748                            }
5749
5750                            chmod +x ${docker_compose_path}
5751
5752                            # Download the SHA256 checksum
5753                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5754                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
5755                            sha256sum -c docker-compose.sha256sum --ignore-missing
5756
5757                            mkdir -p ${cli_plugins_dir}
5758                            cp ${docker_compose_path} ${cli_plugins_dir}
5759                        fi
5760                    fi
5761
5762                    # fallback method for compose-switch
5763                    fallback_compose-switch() {
5764                        local url=$1
5765                        local repo_url=$(get_github_api_repo_url "$url")
5766                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5767                        get_previous_version "$url" "$repo_url" compose_switch_version
5768                        echo -e "\nAttempting to install v${compose_switch_version}"
5769                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5770                    }
5771                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5772                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5773                        if type docker-compose > /dev/null 2>&1; then
5774                            echo "(*) Installing compose-switch..."
5775                            current_compose_path="$(command -v docker-compose)"
5776                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5777                            compose_switch_version="latest"
5778                            compose_switch_url="https://github.com/docker/compose-switch"
5779                            # Try to get latest version, fallback to known stable version if GitHub API fails
5780                            set +e
5781                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
5782                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5783                                echo "(*) GitHub API rate limited or failed, using fallback method"
5784                                fallback_compose-switch "$compose_switch_url"
5785                            fi
5786                            set -e
5787
5788                            # Map architecture for compose-switch downloads
5789                            case "${architecture}" in
5790                                amd64|x86_64) target_switch_arch=amd64 ;;
5791                                arm64|aarch64) target_switch_arch=arm64 ;;
5792                                *) target_switch_arch=${architecture} ;;
5793                            esac
5794                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5795                            chmod +x /usr/local/bin/compose-switch
5796                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5797                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5798                            mv "${current_compose_path}" "${target_compose_path}"
5799                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5800                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5801                        else
5802                            err "Skipping installation of compose-switch as docker compose is unavailable..."
5803                        fi
5804                    fi
5805
5806                    # If init file already exists, exit
5807                    if [ -f "/usr/local/share/docker-init.sh" ]; then
5808                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
5809                        # Clean up
5810                        rm -rf /var/lib/apt/lists/*
5811                        exit 0
5812                    fi
5813                    echo "docker-init doesn't exist, adding..."
5814
5815                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5816                            groupadd -r docker
5817                    fi
5818
5819                    usermod -aG docker ${USERNAME}
5820
5821                    # fallback for docker/buildx
5822                    fallback_buildx() {
5823                        local url=$1
5824                        local repo_url=$(get_github_api_repo_url "$url")
5825                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5826                        get_previous_version "$url" "$repo_url" buildx_version
5827                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5828                        echo -e "\nAttempting to install v${buildx_version}"
5829                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5830                    }
5831
5832                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5833                        buildx_version="latest"
5834                        docker_buildx_url="https://github.com/docker/buildx"
5835                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5836                        echo "(*) Installing buildx ${buildx_version}..."
5837
5838                          # Map architecture for buildx downloads
5839                        case "${architecture}" in
5840                            amd64|x86_64) target_buildx_arch=amd64 ;;
5841                            arm64|aarch64) target_buildx_arch=arm64 ;;
5842                            *) target_buildx_arch=${architecture} ;;
5843                        esac
5844
5845                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5846
5847                        cd /tmp
5848                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5849
5850                        docker_home="/usr/libexec/docker"
5851                        cli_plugins_dir="${docker_home}/cli-plugins"
5852
5853                        mkdir -p ${cli_plugins_dir}
5854                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5855                        chmod +x ${cli_plugins_dir}/docker-buildx
5856
5857                        chown -R "${USERNAME}:docker" "${docker_home}"
5858                        chmod -R g+r+w "${docker_home}"
5859                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5860                    fi
5861
5862                    DOCKER_DEFAULT_IP6_TABLES=""
5863                    if [ "$DISABLE_IP6_TABLES" == true ]; then
5864                        requested_version=""
5865                        # checking whether the version requested either is in semver format or just a number denoting the major version
5866                        # and, extracting the major version number out of the two scenarios
5867                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5868                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5869                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5870                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5871                            requested_version=$DOCKER_VERSION
5872                        fi
5873                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5874                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5875                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5876                        fi
5877                    fi
5878
5879                    if [ ! -d /usr/local/share ]; then
5880                        mkdir -p /usr/local/share
5881                    fi
5882
5883                    tee /usr/local/share/docker-init.sh > /dev/null \
5884                    << EOF
5885                    #!/bin/sh
5886                    #-------------------------------------------------------------------------------------------------------------
5887                    # Copyright (c) Microsoft Corporation. All rights reserved.
5888                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5889                    #-------------------------------------------------------------------------------------------------------------
5890
5891                    set -e
5892
5893                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5894                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5895                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5896                    EOF
5897
5898                    tee -a /usr/local/share/docker-init.sh > /dev/null \
5899                    << 'EOF'
5900                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5901                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5902                        find /run /var/run -iname 'docker*.pid' -delete || :
5903                        find /run /var/run -iname 'container*.pid' -delete || :
5904
5905                        # -- Start: dind wrapper script --
5906                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5907
5908                        export container=docker
5909
5910                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5911                            mount -t securityfs none /sys/kernel/security || {
5912                                echo >&2 'Could not mount /sys/kernel/security.'
5913                                echo >&2 'AppArmor detection and --privileged mode might break.'
5914                            }
5915                        fi
5916
5917                        # Mount /tmp (conditionally)
5918                        if ! mountpoint -q /tmp; then
5919                            mount -t tmpfs none /tmp
5920                        fi
5921
5922                        set_cgroup_nesting()
5923                        {
5924                            # cgroup v2: enable nesting
5925                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5926                                # move the processes from the root group to the /init group,
5927                                # otherwise writing subtree_control fails with EBUSY.
5928                                # An error during moving non-existent process (i.e., "cat") is ignored.
5929                                mkdir -p /sys/fs/cgroup/init
5930                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5931                                # enable controllers
5932                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5933                                    > /sys/fs/cgroup/cgroup.subtree_control
5934                            fi
5935                        }
5936
5937                        # Set cgroup nesting, retrying if necessary
5938                        retry_cgroup_nesting=0
5939
5940                        until [ "${retry_cgroup_nesting}" -eq "5" ];
5941                        do
5942                            set +e
5943                                set_cgroup_nesting
5944
5945                                if [ $? -ne 0 ]; then
5946                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5947                                else
5948                                    break
5949                                fi
5950
5951                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5952                            set -e
5953                        done
5954
5955                        # -- End: dind wrapper script --
5956
5957                        # Handle DNS
5958                        set +e
5959                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5960                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5961                            then
5962                                echo "Setting dockerd Azure DNS."
5963                                CUSTOMDNS="--dns 168.63.129.16"
5964                            else
5965                                echo "Not setting dockerd DNS manually."
5966                                CUSTOMDNS=""
5967                            fi
5968                        set -e
5969
5970                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5971                        then
5972                            DEFAULT_ADDRESS_POOL=""
5973                        else
5974                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5975                        fi
5976
5977                        # Start docker/moby engine
5978                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5979                    INNEREOF
5980                    )"
5981
5982                    sudo_if() {
5983                        COMMAND="$*"
5984
5985                        if [ "$(id -u)" -ne 0 ]; then
5986                            sudo $COMMAND
5987                        else
5988                            $COMMAND
5989                        fi
5990                    }
5991
5992                    retry_docker_start_count=0
5993                    docker_ok="false"
5994
5995                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
5996                    do
5997                        # Start using sudo if not invoked as root
5998                        if [ "$(id -u)" -ne 0 ]; then
5999                            sudo /bin/sh -c "${dockerd_start}"
6000                        else
6001                            eval "${dockerd_start}"
6002                        fi
6003
6004                        retry_count=0
6005                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
6006                        do
6007                            sleep 1s
6008                            set +e
6009                                docker info > /dev/null 2>&1 && docker_ok="true"
6010                            set -e
6011
6012                            retry_count=`expr $retry_count + 1`
6013                        done
6014
6015                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6016                            echo "(*) Failed to start docker, retrying..."
6017                            set +e
6018                                sudo_if pkill dockerd
6019                                sudo_if pkill containerd
6020                            set -e
6021                        fi
6022
6023                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
6024                    done
6025
6026                    # Execute whatever commands were passed in (if any). This allows us
6027                    # to set this script to ENTRYPOINT while still executing the default CMD.
6028                    exec "$@"
6029                    EOF
6030
6031                    chmod +x /usr/local/share/docker-init.sh
6032                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
6033
6034                    # Clean up
6035                    rm -rf /var/lib/apt/lists/*
6036
6037                    echo 'docker-in-docker-debian script has completed!'"#),
6038                ]).await;
6039
6040                return Ok(http::Response::builder()
6041                    .status(200)
6042                    .body(AsyncBody::from(response))
6043                    .unwrap());
6044            }
6045            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6046                let response = r#"
6047                    {
6048                        "schemaVersion": 2,
6049                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6050                        "config": {
6051                            "mediaType": "application/vnd.devcontainers",
6052                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6053                            "size": 2
6054                        },
6055                        "layers": [
6056                            {
6057                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6058                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6059                                "size": 20992,
6060                                "annotations": {
6061                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6062                                }
6063                            }
6064                        ],
6065                        "annotations": {
6066                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6067                            "com.github.package.type": "devcontainer_feature"
6068                        }
6069                    }
6070                    "#;
6071
6072                return Ok(http::Response::builder()
6073                    .status(200)
6074                    .body(http_client::AsyncBody::from(response))
6075                    .unwrap());
6076            }
6077            if parts.uri.path()
6078                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6079            {
6080                let response = build_tarball(vec![
6081                    ("./devcontainer-feature.json", r#"
6082                        {
6083                            "id": "go",
6084                            "version": "1.3.3",
6085                            "name": "Go",
6086                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6087                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6088                            "options": {
6089                                "version": {
6090                                    "type": "string",
6091                                    "proposals": [
6092                                        "latest",
6093                                        "none",
6094                                        "1.24",
6095                                        "1.23"
6096                                    ],
6097                                    "default": "latest",
6098                                    "description": "Select or enter a Go version to install"
6099                                },
6100                                "golangciLintVersion": {
6101                                    "type": "string",
6102                                    "default": "latest",
6103                                    "description": "Version of golangci-lint to install"
6104                                }
6105                            },
6106                            "init": true,
6107                            "customizations": {
6108                                "vscode": {
6109                                    "extensions": [
6110                                        "golang.Go"
6111                                    ],
6112                                    "settings": {
6113                                        "github.copilot.chat.codeGeneration.instructions": [
6114                                            {
6115                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6116                                            }
6117                                        ]
6118                                    }
6119                                }
6120                            },
6121                            "containerEnv": {
6122                                "GOROOT": "/usr/local/go",
6123                                "GOPATH": "/go",
6124                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6125                            },
6126                            "capAdd": [
6127                                "SYS_PTRACE"
6128                            ],
6129                            "securityOpt": [
6130                                "seccomp=unconfined"
6131                            ],
6132                            "installsAfter": [
6133                                "ghcr.io/devcontainers/features/common-utils"
6134                            ]
6135                        }
6136                        "#),
6137                    ("./install.sh", r#"
6138                    #!/usr/bin/env bash
6139                    #-------------------------------------------------------------------------------------------------------------
6140                    # Copyright (c) Microsoft Corporation. All rights reserved.
6141                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6142                    #-------------------------------------------------------------------------------------------------------------
6143                    #
6144                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6145                    # Maintainer: The VS Code and Codespaces Teams
6146
6147                    TARGET_GO_VERSION="${VERSION:-"latest"}"
6148                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6149
6150                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6151                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6152                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6153                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6154
6155                    # https://www.google.com/linuxrepositories/
6156                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6157
6158                    set -e
6159
6160                    if [ "$(id -u)" -ne 0 ]; then
6161                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6162                        exit 1
6163                    fi
6164
6165                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6166                    . /etc/os-release
6167                    # Get an adjusted ID independent of distro variants
6168                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6169                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6170                        ADJUSTED_ID="debian"
6171                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6172                        ADJUSTED_ID="rhel"
6173                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6174                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6175                        else
6176                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6177                        fi
6178                    else
6179                        echo "Linux distro ${ID} not supported."
6180                        exit 1
6181                    fi
6182
6183                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6184                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6185                        # Update the repo files to reference vault.centos.org.
6186                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6187                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6188                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6189                    fi
6190
6191                    # Setup INSTALL_CMD & PKG_MGR_CMD
6192                    if type apt-get > /dev/null 2>&1; then
6193                        PKG_MGR_CMD=apt-get
6194                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6195                    elif type microdnf > /dev/null 2>&1; then
6196                        PKG_MGR_CMD=microdnf
6197                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6198                    elif type dnf > /dev/null 2>&1; then
6199                        PKG_MGR_CMD=dnf
6200                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6201                    else
6202                        PKG_MGR_CMD=yum
6203                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6204                    fi
6205
6206                    # Clean up
6207                    clean_up() {
6208                        case ${ADJUSTED_ID} in
6209                            debian)
6210                                rm -rf /var/lib/apt/lists/*
6211                                ;;
6212                            rhel)
6213                                rm -rf /var/cache/dnf/* /var/cache/yum/*
6214                                rm -rf /tmp/yum.log
6215                                rm -rf ${GPG_INSTALL_PATH}
6216                                ;;
6217                        esac
6218                    }
6219                    clean_up
6220
6221
6222                    # Figure out correct version of a three part version number is not passed
6223                    find_version_from_git_tags() {
6224                        local variable_name=$1
6225                        local requested_version=${!variable_name}
6226                        if [ "${requested_version}" = "none" ]; then return; fi
6227                        local repository=$2
6228                        local prefix=${3:-"tags/v"}
6229                        local separator=${4:-"."}
6230                        local last_part_optional=${5:-"false"}
6231                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6232                            local escaped_separator=${separator//./\\.}
6233                            local last_part
6234                            if [ "${last_part_optional}" = "true" ]; then
6235                                last_part="(${escaped_separator}[0-9]+)?"
6236                            else
6237                                last_part="${escaped_separator}[0-9]+"
6238                            fi
6239                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6240                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6241                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6242                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6243                            else
6244                                set +e
6245                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6246                                set -e
6247                            fi
6248                        fi
6249                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6250                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6251                            exit 1
6252                        fi
6253                        echo "${variable_name}=${!variable_name}"
6254                    }
6255
6256                    pkg_mgr_update() {
6257                        case $ADJUSTED_ID in
6258                            debian)
6259                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6260                                    echo "Running apt-get update..."
6261                                    ${PKG_MGR_CMD} update -y
6262                                fi
6263                                ;;
6264                            rhel)
6265                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6266                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6267                                        echo "Running ${PKG_MGR_CMD} makecache ..."
6268                                        ${PKG_MGR_CMD} makecache
6269                                    fi
6270                                else
6271                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6272                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6273                                        set +e
6274                                        ${PKG_MGR_CMD} check-update
6275                                        rc=$?
6276                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6277                                            exit 1
6278                                        fi
6279                                        set -e
6280                                    fi
6281                                fi
6282                                ;;
6283                        esac
6284                    }
6285
6286                    # Checks if packages are installed and installs them if not
6287                    check_packages() {
6288                        case ${ADJUSTED_ID} in
6289                            debian)
6290                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6291                                    pkg_mgr_update
6292                                    ${INSTALL_CMD} "$@"
6293                                fi
6294                                ;;
6295                            rhel)
6296                                if ! rpm -q "$@" > /dev/null 2>&1; then
6297                                    pkg_mgr_update
6298                                    ${INSTALL_CMD} "$@"
6299                                fi
6300                                ;;
6301                        esac
6302                    }
6303
6304                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6305                    rm -f /etc/profile.d/00-restore-env.sh
6306                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6307                    chmod +x /etc/profile.d/00-restore-env.sh
6308
6309                    # Some distributions do not install awk by default (e.g. Mariner)
6310                    if ! type awk >/dev/null 2>&1; then
6311                        check_packages awk
6312                    fi
6313
6314                    # Determine the appropriate non-root user
6315                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6316                        USERNAME=""
6317                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6318                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6319                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6320                                USERNAME=${CURRENT_USER}
6321                                break
6322                            fi
6323                        done
6324                        if [ "${USERNAME}" = "" ]; then
6325                            USERNAME=root
6326                        fi
6327                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6328                        USERNAME=root
6329                    fi
6330
6331                    export DEBIAN_FRONTEND=noninteractive
6332
6333                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
6334
6335                    if [ $ADJUSTED_ID = "debian" ]; then
6336                        check_packages g++ libc6-dev
6337                    else
6338                        check_packages gcc-c++ glibc-devel
6339                    fi
6340                    # Install curl, git, other dependencies if missing
6341                    if ! type curl > /dev/null 2>&1; then
6342                        check_packages curl
6343                    fi
6344                    if ! type git > /dev/null 2>&1; then
6345                        check_packages git
6346                    fi
6347                    # Some systems, e.g. Mariner, still a few more packages
6348                    if ! type as > /dev/null 2>&1; then
6349                        check_packages binutils
6350                    fi
6351                    if ! [ -f /usr/include/linux/errno.h ]; then
6352                        check_packages kernel-headers
6353                    fi
6354                    # Minimal RHEL install may need findutils installed
6355                    if ! [ -f /usr/bin/find ]; then
6356                        check_packages findutils
6357                    fi
6358
6359                    # Get closest match for version number specified
6360                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6361
6362                    architecture="$(uname -m)"
6363                    case $architecture in
6364                        x86_64) architecture="amd64";;
6365                        aarch64 | armv8*) architecture="arm64";;
6366                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
6367                        i?86) architecture="386";;
6368                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6369                    esac
6370
6371                    # Install Go
6372                    umask 0002
6373                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6374                        groupadd -r golang
6375                    fi
6376                    usermod -a -G golang "${USERNAME}"
6377                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6378
6379                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6380                        # Use a temporary location for gpg keys to avoid polluting image
6381                        export GNUPGHOME="/tmp/tmp-gnupg"
6382                        mkdir -p ${GNUPGHOME}
6383                        chmod 700 ${GNUPGHOME}
6384                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6385                        gpg -q --import /tmp/tmp-gnupg/golang_key
6386                        echo "Downloading Go ${TARGET_GO_VERSION}..."
6387                        set +e
6388                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6389                        exit_code=$?
6390                        set -e
6391                        if [ "$exit_code" != "0" ]; then
6392                            echo "(!) Download failed."
6393                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6394                            set +e
6395                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6396                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6397                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6398                            # Handle Go's odd version pattern where "0" releases omit the last part
6399                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6400                                ((minor=minor-1))
6401                                TARGET_GO_VERSION="${major}.${minor}"
6402                                # Look for latest version from previous minor release
6403                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6404                            else
6405                                ((breakfix=breakfix-1))
6406                                if [ "${breakfix}" = "0" ]; then
6407                                    TARGET_GO_VERSION="${major}.${minor}"
6408                                else
6409                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6410                                fi
6411                            fi
6412                            set -e
6413                            echo "Trying ${TARGET_GO_VERSION}..."
6414                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6415                        fi
6416                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6417                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6418                        echo "Extracting Go ${TARGET_GO_VERSION}..."
6419                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6420                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6421                    else
6422                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6423                    fi
6424
6425                    # Install Go tools that are isImportant && !replacedByGopls based on
6426                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6427                    GO_TOOLS="\
6428                        golang.org/x/tools/gopls@latest \
6429                        honnef.co/go/tools/cmd/staticcheck@latest \
6430                        golang.org/x/lint/golint@latest \
6431                        github.com/mgechev/revive@latest \
6432                        github.com/go-delve/delve/cmd/dlv@latest \
6433                        github.com/fatih/gomodifytags@latest \
6434                        github.com/haya14busa/goplay/cmd/goplay@latest \
6435                        github.com/cweill/gotests/gotests@latest \
6436                        github.com/josharian/impl@latest"
6437
6438                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6439                        echo "Installing common Go tools..."
6440                        export PATH=${TARGET_GOROOT}/bin:${PATH}
6441                        export GOPATH=/tmp/gotools
6442                        export GOCACHE="${GOPATH}/cache"
6443
6444                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6445                        cd "${GOPATH}"
6446
6447                        # Use go get for versions of go under 1.16
6448                        go_install_command=install
6449                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6450                            export GO111MODULE=on
6451                            go_install_command=get
6452                            echo "Go version < 1.16, using go get."
6453                        fi
6454
6455                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6456
6457                        # Move Go tools into path
6458                        if [ -d "${GOPATH}/bin" ]; then
6459                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6460                        fi
6461
6462                        # Install golangci-lint from precompiled binaries
6463                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6464                            echo "Installing golangci-lint latest..."
6465                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6466                                sh -s -- -b "${TARGET_GOPATH}/bin"
6467                        else
6468                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6469                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6470                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6471                        fi
6472
6473                        # Remove Go tools temp directory
6474                        rm -rf "${GOPATH}"
6475                    fi
6476
6477
6478                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6479                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6480                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6481                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6482
6483                    # Clean up
6484                    clean_up
6485
6486                    echo "Done!"
6487                        "#),
6488                ])
6489                .await;
6490                return Ok(http::Response::builder()
6491                    .status(200)
6492                    .body(AsyncBody::from(response))
6493                    .unwrap());
6494            }
6495            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6496                let response = r#"
6497                    {
6498                        "schemaVersion": 2,
6499                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6500                        "config": {
6501                            "mediaType": "application/vnd.devcontainers",
6502                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6503                            "size": 2
6504                        },
6505                        "layers": [
6506                            {
6507                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6508                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6509                                "size": 19968,
6510                                "annotations": {
6511                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6512                                }
6513                            }
6514                        ],
6515                        "annotations": {
6516                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6517                            "com.github.package.type": "devcontainer_feature"
6518                        }
6519                    }"#;
6520                return Ok(http::Response::builder()
6521                    .status(200)
6522                    .body(AsyncBody::from(response))
6523                    .unwrap());
6524            }
6525            if parts.uri.path()
6526                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6527            {
6528                let response = build_tarball(vec![
6529                    (
6530                        "./devcontainer-feature.json",
6531                        r#"
6532{
6533    "id": "aws-cli",
6534    "version": "1.1.3",
6535    "name": "AWS CLI",
6536    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6537    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6538    "options": {
6539        "version": {
6540            "type": "string",
6541            "proposals": [
6542                "latest"
6543            ],
6544            "default": "latest",
6545            "description": "Select or enter an AWS CLI version."
6546        },
6547        "verbose": {
6548            "type": "boolean",
6549            "default": true,
6550            "description": "Suppress verbose output."
6551        }
6552    },
6553    "customizations": {
6554        "vscode": {
6555            "extensions": [
6556                "AmazonWebServices.aws-toolkit-vscode"
6557            ],
6558            "settings": {
6559                "github.copilot.chat.codeGeneration.instructions": [
6560                    {
6561                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6562                    }
6563                ]
6564            }
6565        }
6566    },
6567    "installsAfter": [
6568        "ghcr.io/devcontainers/features/common-utils"
6569    ]
6570}
6571                    "#,
6572                    ),
6573                    (
6574                        "./install.sh",
6575                        r#"#!/usr/bin/env bash
6576                    #-------------------------------------------------------------------------------------------------------------
6577                    # Copyright (c) Microsoft Corporation. All rights reserved.
6578                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6579                    #-------------------------------------------------------------------------------------------------------------
6580                    #
6581                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6582                    # Maintainer: The VS Code and Codespaces Teams
6583
6584                    set -e
6585
6586                    # Clean up
6587                    rm -rf /var/lib/apt/lists/*
6588
6589                    VERSION=${VERSION:-"latest"}
6590                    VERBOSE=${VERBOSE:-"true"}
6591
6592                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6593                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6594
6595                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6596                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6597                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6598                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6599                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6600                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6601                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6602                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6603                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6604                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6605                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6606                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6607                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6608                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6609                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6610                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6611                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6612                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6613                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6614                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6615                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6616                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6617                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6618                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6619                    YLZATHZKTJyiqA==
6620                    =vYOk
6621                    -----END PGP PUBLIC KEY BLOCK-----"
6622
6623                    if [ "$(id -u)" -ne 0 ]; then
6624                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6625                        exit 1
6626                    fi
6627
6628                    apt_get_update()
6629                    {
6630                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6631                            echo "Running apt-get update..."
6632                            apt-get update -y
6633                        fi
6634                    }
6635
6636                    # Checks if packages are installed and installs them if not
6637                    check_packages() {
6638                        if ! dpkg -s "$@" > /dev/null 2>&1; then
6639                            apt_get_update
6640                            apt-get -y install --no-install-recommends "$@"
6641                        fi
6642                    }
6643
6644                    export DEBIAN_FRONTEND=noninteractive
6645
6646                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6647
6648                    verify_aws_cli_gpg_signature() {
6649                        local filePath=$1
6650                        local sigFilePath=$2
6651                        local awsGpgKeyring=aws-cli-public-key.gpg
6652
6653                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6654                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6655                        local status=$?
6656
6657                        rm "./${awsGpgKeyring}"
6658
6659                        return ${status}
6660                    }
6661
6662                    install() {
6663                        local scriptZipFile=awscli.zip
6664                        local scriptSigFile=awscli.sig
6665
6666                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6667                        if [ "${VERSION}" != "latest" ]; then
6668                            local versionStr=-${VERSION}
6669                        fi
6670                        architecture=$(dpkg --print-architecture)
6671                        case "${architecture}" in
6672                            amd64) architectureStr=x86_64 ;;
6673                            arm64) architectureStr=aarch64 ;;
6674                            *)
6675                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6676                                exit 1
6677                        esac
6678                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6679                        curl "${scriptUrl}" -o "${scriptZipFile}"
6680                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6681
6682                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6683                        if (( $? > 0 )); then
6684                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6685                            exit 1
6686                        fi
6687
6688                        if [ "${VERBOSE}" = "false" ]; then
6689                            unzip -q "${scriptZipFile}"
6690                        else
6691                            unzip "${scriptZipFile}"
6692                        fi
6693
6694                        ./aws/install
6695
6696                        # kubectl bash completion
6697                        mkdir -p /etc/bash_completion.d
6698                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6699
6700                        # kubectl zsh completion
6701                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6702                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6703                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6704                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6705                        fi
6706
6707                        rm -rf ./aws
6708                    }
6709
6710                    echo "(*) Installing AWS CLI..."
6711
6712                    install
6713
6714                    # Clean up
6715                    rm -rf /var/lib/apt/lists/*
6716
6717                    echo "Done!""#,
6718                    ),
6719                    ("./scripts/", r#""#),
6720                    (
6721                        "./scripts/fetch-latest-completer-scripts.sh",
6722                        r#"
6723                        #!/bin/bash
6724                        #-------------------------------------------------------------------------------------------------------------
6725                        # Copyright (c) Microsoft Corporation. All rights reserved.
6726                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6727                        #-------------------------------------------------------------------------------------------------------------
6728                        #
6729                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6730                        # Maintainer: The Dev Container spec maintainers
6731                        #
6732                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6733                        #
6734                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6735                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6736                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6737
6738                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6739                        chmod +x "$BASH_COMPLETER_SCRIPT"
6740
6741                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6742                        chmod +x "$ZSH_COMPLETER_SCRIPT"
6743                        "#,
6744                    ),
6745                    ("./scripts/vendor/", r#""#),
6746                    (
6747                        "./scripts/vendor/aws_bash_completer",
6748                        r#"
6749                        # Typically that would be added under one of the following paths:
6750                        # - /etc/bash_completion.d
6751                        # - /usr/local/etc/bash_completion.d
6752                        # - /usr/share/bash-completion/completions
6753
6754                        complete -C aws_completer aws
6755                        "#,
6756                    ),
6757                    (
6758                        "./scripts/vendor/aws_zsh_completer.sh",
6759                        r#"
6760                        # Source this file to activate auto completion for zsh using the bash
6761                        # compatibility helper.  Make sure to run `compinit` before, which should be
6762                        # given usually.
6763                        #
6764                        # % source /path/to/zsh_complete.sh
6765                        #
6766                        # Typically that would be called somewhere in your .zshrc.
6767                        #
6768                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6769                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6770                        #
6771                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6772                        #
6773                        # zsh releases prior to that version do not export the required env variables!
6774
6775                        autoload -Uz bashcompinit
6776                        bashcompinit -i
6777
6778                        _bash_complete() {
6779                          local ret=1
6780                          local -a suf matches
6781                          local -x COMP_POINT COMP_CWORD
6782                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6783                          local -x COMP_LINE="$words"
6784                          local -A savejobstates savejobtexts
6785
6786                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6787                          (( COMP_CWORD = CURRENT - 1))
6788                          COMP_WORDS=( $words )
6789                          BASH_VERSINFO=( 2 05b 0 1 release )
6790
6791                          savejobstates=( ${(kv)jobstates} )
6792                          savejobtexts=( ${(kv)jobtexts} )
6793
6794                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6795
6796                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6797
6798                          if [[ -n $matches ]]; then
6799                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6800                              compset -P '*/' && matches=( ${matches##*/} )
6801                              compset -S '/*' && matches=( ${matches%%/*} )
6802                              compadd -Q -f "${suf[@]}" -a matches && ret=0
6803                            else
6804                              compadd -Q "${suf[@]}" -a matches && ret=0
6805                            fi
6806                          fi
6807
6808                          if (( ret )); then
6809                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6810                              _default "${suf[@]}" && ret=0
6811                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6812                              _directories "${suf[@]}" && ret=0
6813                            fi
6814                          fi
6815
6816                          return ret
6817                        }
6818
6819                        complete -C aws_completer aws
6820                        "#,
6821                    ),
6822                ]).await;
6823
6824                return Ok(http::Response::builder()
6825                    .status(200)
6826                    .body(AsyncBody::from(response))
6827                    .unwrap());
6828            }
6829
6830            Ok(http::Response::builder()
6831                .status(404)
6832                .body(http_client::AsyncBody::default())
6833                .unwrap())
6834        })
6835    }
6836}