devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use regex::Regex;
  10
  11use fs::Fs;
  12use http_client::HttpClient;
  13use util::{ResultExt, command::Command};
  14
  15use crate::{
  16    DevContainerConfig, DevContainerContext,
  17    command_json::{CommandRunner, DefaultCommandRunner},
  18    devcontainer_api::{DevContainerError, DevContainerUp},
  19    devcontainer_json::{
  20        DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
  21        deserialize_devcontainer_json,
  22    },
  23    docker::{
  24        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  25        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  26        get_remote_dir_from_config,
  27    },
  28    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  29    get_oci_token,
  30    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  31    safe_id_lower,
  32};
  33
  34enum ConfigStatus {
  35    Deserialized(DevContainer),
  36    VariableParsed(DevContainer),
  37}
  38
  39#[derive(Debug, Clone, Eq, PartialEq, Default)]
  40pub(crate) struct DockerComposeResources {
  41    files: Vec<PathBuf>,
  42    config: DockerComposeConfig,
  43}
  44
  45struct DevContainerManifest {
  46    http_client: Arc<dyn HttpClient>,
  47    fs: Arc<dyn Fs>,
  48    docker_client: Arc<dyn DockerClient>,
  49    command_runner: Arc<dyn CommandRunner>,
  50    raw_config: String,
  51    config: ConfigStatus,
  52    local_environment: HashMap<String, String>,
  53    local_project_directory: PathBuf,
  54    config_directory: PathBuf,
  55    file_name: String,
  56    root_image: Option<DockerInspect>,
  57    features_build_info: Option<FeaturesBuildInfo>,
  58    features: Vec<FeatureManifest>,
  59}
  60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
  61impl DevContainerManifest {
  62    async fn new(
  63        context: &DevContainerContext,
  64        environment: HashMap<String, String>,
  65        docker_client: Arc<dyn DockerClient>,
  66        command_runner: Arc<dyn CommandRunner>,
  67        local_config: DevContainerConfig,
  68        local_project_path: &Path,
  69    ) -> Result<Self, DevContainerError> {
  70        let config_path = local_project_path.join(local_config.config_path.clone());
  71        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  72        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  73            log::error!("Unable to read devcontainer contents: {e}");
  74            DevContainerError::DevContainerParseFailed
  75        })?;
  76
  77        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  78
  79        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  80            log::error!("Dev container file should be in a directory");
  81            DevContainerError::NotInValidProject
  82        })?;
  83        let file_name = config_path
  84            .file_name()
  85            .and_then(|f| f.to_str())
  86            .ok_or_else(|| {
  87                log::error!("Dev container file has no file name, or is invalid unicode");
  88                DevContainerError::DevContainerParseFailed
  89            })?;
  90
  91        Ok(Self {
  92            fs: context.fs.clone(),
  93            http_client: context.http_client.clone(),
  94            docker_client,
  95            command_runner,
  96            raw_config: devcontainer_contents,
  97            config: ConfigStatus::Deserialized(devcontainer),
  98            local_project_directory: local_project_path.to_path_buf(),
  99            local_environment: environment,
 100            config_directory: devcontainer_directory.to_path_buf(),
 101            file_name: file_name.to_string(),
 102            root_image: None,
 103            features_build_info: None,
 104            features: Vec::new(),
 105        })
 106    }
 107
 108    fn devcontainer_id(&self) -> String {
 109        let mut labels = self.identifying_labels();
 110        labels.sort_by_key(|(key, _)| *key);
 111
 112        let mut hasher = DefaultHasher::new();
 113        for (key, value) in &labels {
 114            key.hash(&mut hasher);
 115            value.hash(&mut hasher);
 116        }
 117
 118        format!("{:016x}", hasher.finish())
 119    }
 120
 121    fn identifying_labels(&self) -> Vec<(&str, String)> {
 122        let labels = vec![
 123            (
 124                "devcontainer.local_folder",
 125                (self.local_project_directory.display()).to_string(),
 126            ),
 127            (
 128                "devcontainer.config_file",
 129                (self.config_file().display()).to_string(),
 130            ),
 131        ];
 132        labels
 133    }
 134
 135    fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
 136        let mut replaced_content = content
 137            .replace("${devcontainerId}", &self.devcontainer_id())
 138            .replace(
 139                "${containerWorkspaceFolderBasename}",
 140                &self.remote_workspace_base_name().unwrap_or_default(),
 141            )
 142            .replace(
 143                "${localWorkspaceFolderBasename}",
 144                &self.local_workspace_base_name()?,
 145            )
 146            .replace(
 147                "${containerWorkspaceFolder}",
 148                &self
 149                    .remote_workspace_folder()
 150                    .map(|path| path.display().to_string())
 151                    .unwrap_or_default()
 152                    .replace('\\', "/"),
 153            )
 154            .replace(
 155                "${localWorkspaceFolder}",
 156                &self.local_workspace_folder().replace('\\', "/"),
 157            );
 158        for (k, v) in &self.local_environment {
 159            let find = format!("${{localEnv:{k}}}");
 160            replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
 161        }
 162
 163        Ok(replaced_content)
 164    }
 165
 166    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 167        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 168        let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
 169
 170        self.config = ConfigStatus::VariableParsed(parsed_config);
 171
 172        Ok(())
 173    }
 174
 175    fn runtime_remote_env(
 176        &self,
 177        container_env: &HashMap<String, String>,
 178    ) -> Result<HashMap<String, String>, DevContainerError> {
 179        let mut merged_remote_env = container_env.clone();
 180        // HOME is user-specific, and we will often not run as the image user
 181        merged_remote_env.remove("HOME");
 182        if let Some(remote_env) = self.dev_container().remote_env.clone() {
 183            let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
 184                log::error!(
 185                    "Unexpected error serializing dev container remote_env: {e} - {:?}",
 186                    remote_env
 187                );
 188                DevContainerError::DevContainerParseFailed
 189            })?;
 190            for (k, v) in container_env {
 191                raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
 192            }
 193            let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
 194                .map_err(|e| {
 195                    log::error!(
 196                        "Unexpected error reserializing dev container remote env: {e} - {:?}",
 197                        &raw
 198                    );
 199                    DevContainerError::DevContainerParseFailed
 200                })?;
 201            for (k, v) in reserialized {
 202                merged_remote_env.insert(k, v);
 203            }
 204        }
 205        Ok(merged_remote_env)
 206    }
 207
 208    fn config_file(&self) -> PathBuf {
 209        self.config_directory.join(&self.file_name)
 210    }
 211
 212    fn dev_container(&self) -> &DevContainer {
 213        match &self.config {
 214            ConfigStatus::Deserialized(dev_container) => dev_container,
 215            ConfigStatus::VariableParsed(dev_container) => dev_container,
 216        }
 217    }
 218
 219    async fn dockerfile_location(&self) -> Option<PathBuf> {
 220        let dev_container = self.dev_container();
 221        match dev_container.build_type() {
 222            DevContainerBuildType::Image(_) => None,
 223            DevContainerBuildType::Dockerfile(build) => {
 224                Some(self.config_directory.join(&build.dockerfile))
 225            }
 226            DevContainerBuildType::DockerCompose => {
 227                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 228                    return None;
 229                };
 230                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 231                else {
 232                    return None;
 233                };
 234                main_service
 235                    .build
 236                    .and_then(|b| b.dockerfile)
 237                    .map(|dockerfile| self.config_directory.join(dockerfile))
 238            }
 239            DevContainerBuildType::None => None,
 240        }
 241    }
 242
 243    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 244        let mut hasher = DefaultHasher::new();
 245        let prefix = match &self.dev_container().name {
 246            Some(name) => &safe_id_lower(name),
 247            None => "zed-dc",
 248        };
 249        let prefix = prefix.get(..6).unwrap_or(prefix);
 250
 251        dockerfile_build_path.hash(&mut hasher);
 252
 253        let hash = hasher.finish();
 254        format!("{}-{:x}-features", prefix, hash)
 255    }
 256
 257    /// Gets the base image from the devcontainer with the following precedence:
 258    /// - The devcontainer image if an image is specified
 259    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 260    /// - The image sourced in the docker-compose main service, if one is specified
 261    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 262    /// If no such image is available, return an error
 263    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 264        match self.dev_container().build_type() {
 265            DevContainerBuildType::Image(image) => {
 266                return Ok(image);
 267            }
 268            DevContainerBuildType::Dockerfile(build) => {
 269                let dockerfile_contents = self.expanded_dockerfile_content().await?;
 270                return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
 271                    || {
 272                        log::error!("Unable to find base image in Dockerfile");
 273                        DevContainerError::DevContainerParseFailed
 274                    },
 275                );
 276            }
 277            DevContainerBuildType::DockerCompose => {
 278                let docker_compose_manifest = self.docker_compose_manifest().await?;
 279                let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 280
 281                if let Some(_) = main_service
 282                    .build
 283                    .as_ref()
 284                    .and_then(|b| b.dockerfile.as_ref())
 285                {
 286                    let dockerfile_contents = self.expanded_dockerfile_content().await?;
 287                    return image_from_dockerfile(
 288                        dockerfile_contents,
 289                        &main_service.build.as_ref().and_then(|b| b.target.clone()),
 290                    )
 291                    .ok_or_else(|| {
 292                        log::error!("Unable to find base image in Dockerfile");
 293                        DevContainerError::DevContainerParseFailed
 294                    });
 295                }
 296                if let Some(image) = &main_service.image {
 297                    return Ok(image.to_string());
 298                }
 299
 300                log::error!("No valid base image found in docker-compose configuration");
 301                return Err(DevContainerError::DevContainerParseFailed);
 302            }
 303            DevContainerBuildType::None => {
 304                log::error!("Not a valid devcontainer config for build");
 305                return Err(DevContainerError::NotInValidProject);
 306            }
 307        }
 308    }
 309
 310    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 311        let dev_container = match &self.config {
 312            ConfigStatus::Deserialized(_) => {
 313                log::error!(
 314                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 315                );
 316                return Err(DevContainerError::DevContainerParseFailed);
 317            }
 318            ConfigStatus::VariableParsed(dev_container) => dev_container,
 319        };
 320        let root_image_tag = self.get_base_image_from_config().await?;
 321        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 322
 323        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 324        let timestamp = std::time::SystemTime::now()
 325            .duration_since(std::time::UNIX_EPOCH)
 326            .map(|d| d.as_millis())
 327            .unwrap_or(0);
 328
 329        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 330        let empty_context_dir = temp_base.join("empty-folder");
 331
 332        self.fs
 333            .create_dir(&features_content_dir)
 334            .await
 335            .map_err(|e| {
 336                log::error!("Failed to create features content dir: {e}");
 337                DevContainerError::FilesystemError
 338            })?;
 339
 340        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 341            log::error!("Failed to create empty context dir: {e}");
 342            DevContainerError::FilesystemError
 343        })?;
 344
 345        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 346        let image_tag =
 347            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 348
 349        let build_info = FeaturesBuildInfo {
 350            dockerfile_path,
 351            features_content_dir,
 352            empty_context_dir,
 353            build_image: dev_container.image.clone(),
 354            image_tag,
 355        };
 356
 357        let features = match &dev_container.features {
 358            Some(features) => features,
 359            None => &HashMap::new(),
 360        };
 361
 362        let container_user = get_container_user_from_config(&root_image, self)?;
 363        let remote_user = get_remote_user_from_config(&root_image, self)?;
 364
 365        let builtin_env_content = format!(
 366            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 367            container_user, remote_user
 368        );
 369
 370        let builtin_env_path = build_info
 371            .features_content_dir
 372            .join("devcontainer-features.builtin.env");
 373
 374        self.fs
 375            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 376            .await
 377            .map_err(|e| {
 378                log::error!("Failed to write builtin env file: {e}");
 379                DevContainerError::FilesystemError
 380            })?;
 381
 382        let ordered_features =
 383            resolve_feature_order(features, &dev_container.override_feature_install_order);
 384
 385        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 386            if matches!(options, FeatureOptions::Bool(false)) {
 387                log::debug!(
 388                    "Feature '{}' is disabled (set to false), skipping",
 389                    feature_ref
 390                );
 391                continue;
 392            }
 393
 394            let feature_id = extract_feature_id(feature_ref);
 395            let consecutive_id = format!("{}_{}", feature_id, index);
 396            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 397
 398            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 399                log::error!(
 400                    "Failed to create feature directory for {}: {e}",
 401                    feature_ref
 402                );
 403                DevContainerError::FilesystemError
 404            })?;
 405
 406            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 407                log::error!(
 408                    "Feature '{}' is not a supported OCI feature reference",
 409                    feature_ref
 410                );
 411                DevContainerError::DevContainerParseFailed
 412            })?;
 413            let TokenResponse { token } =
 414                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 415                    .await
 416                    .map_err(|e| {
 417                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 418                        DevContainerError::ResourceFetchFailed
 419                    })?;
 420            let manifest = get_oci_manifest(
 421                &oci_ref.registry,
 422                &oci_ref.path,
 423                &token,
 424                &self.http_client,
 425                &oci_ref.version,
 426                None,
 427            )
 428            .await
 429            .map_err(|e| {
 430                log::error!(
 431                    "Failed to fetch OCI manifest for feature '{}': {e}",
 432                    feature_ref
 433                );
 434                DevContainerError::ResourceFetchFailed
 435            })?;
 436            let digest = &manifest
 437                .layers
 438                .first()
 439                .ok_or_else(|| {
 440                    log::error!(
 441                        "OCI manifest for feature '{}' contains no layers",
 442                        feature_ref
 443                    );
 444                    DevContainerError::ResourceFetchFailed
 445                })?
 446                .digest;
 447            download_oci_tarball(
 448                &token,
 449                &oci_ref.registry,
 450                &oci_ref.path,
 451                digest,
 452                "application/vnd.devcontainers.layer.v1+tar",
 453                &feature_dir,
 454                &self.http_client,
 455                &self.fs,
 456                None,
 457            )
 458            .await?;
 459
 460            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 461            if !self.fs.is_file(feature_json_path).await {
 462                let message = format!(
 463                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 464                    feature_json_path
 465                );
 466                log::error!("{}", &message);
 467                return Err(DevContainerError::ResourceFetchFailed);
 468            }
 469
 470            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 471                log::error!("error reading devcontainer-feature.json: {:?}", e);
 472                DevContainerError::FilesystemError
 473            })?;
 474
 475            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 476
 477            let feature_json: DevContainerFeatureJson =
 478                serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
 479                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 480                    DevContainerError::ResourceFetchFailed
 481                })?;
 482
 483            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 484
 485            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 486
 487            let env_content = feature_manifest
 488                .write_feature_env(&self.fs, options)
 489                .await?;
 490
 491            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 492
 493            self.fs
 494                .write(
 495                    &feature_manifest
 496                        .file_path()
 497                        .join("devcontainer-features-install.sh"),
 498                    &wrapper_content.as_bytes(),
 499                )
 500                .await
 501                .map_err(|e| {
 502                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 503                    DevContainerError::FilesystemError
 504                })?;
 505
 506            self.features.push(feature_manifest);
 507        }
 508
 509        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 510
 511        let is_compose = match dev_container.build_type() {
 512            DevContainerBuildType::DockerCompose => true,
 513            _ => false,
 514        };
 515        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 516
 517        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 518            self.fs.load(location).await.log_err()
 519        } else {
 520            None
 521        };
 522
 523        let build_target = if is_compose {
 524            find_primary_service(&self.docker_compose_manifest().await?, self)?
 525                .1
 526                .build
 527                .and_then(|b| b.target)
 528        } else {
 529            dev_container.build.as_ref().and_then(|b| b.target.clone())
 530        };
 531
 532        let dockerfile_content = dockerfile_base_content
 533            .map(|content| {
 534                dockerfile_inject_alias(
 535                    &content,
 536                    "dev_container_auto_added_stage_label",
 537                    build_target,
 538                )
 539            })
 540            .unwrap_or_default();
 541
 542        let dockerfile_content = self.generate_dockerfile_extended(
 543            &container_user,
 544            &remote_user,
 545            dockerfile_content,
 546            use_buildkit,
 547        );
 548
 549        self.fs
 550            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 551            .await
 552            .map_err(|e| {
 553                log::error!("Failed to write Dockerfile.extended: {e}");
 554                DevContainerError::FilesystemError
 555            })?;
 556
 557        log::debug!(
 558            "Features build resources written to {:?}",
 559            build_info.features_content_dir
 560        );
 561
 562        self.root_image = Some(root_image);
 563        self.features_build_info = Some(build_info);
 564
 565        Ok(())
 566    }
 567
 568    fn generate_dockerfile_extended(
 569        &self,
 570        container_user: &str,
 571        remote_user: &str,
 572        dockerfile_content: String,
 573        use_buildkit: bool,
 574    ) -> String {
 575        #[cfg(not(target_os = "windows"))]
 576        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 577        #[cfg(target_os = "windows")]
 578        let update_remote_user_uid = false;
 579        let feature_layers: String = self
 580            .features
 581            .iter()
 582            .map(|manifest| {
 583                manifest.generate_dockerfile_feature_layer(
 584                    use_buildkit,
 585                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 586                )
 587            })
 588            .collect();
 589
 590        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 591        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 592
 593        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 594
 595        let feature_content_source_stage = if use_buildkit {
 596            "".to_string()
 597        } else {
 598            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 599                .to_string()
 600        };
 601
 602        let builtin_env_source_path = if use_buildkit {
 603            "./devcontainer-features.builtin.env"
 604        } else {
 605            "/tmp/build-features/devcontainer-features.builtin.env"
 606        };
 607
 608        let mut extended_dockerfile = format!(
 609            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 610
 611{dockerfile_content}
 612{feature_content_source_stage}
 613FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 614USER root
 615COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 616RUN chmod -R 0755 /tmp/build-features/
 617
 618FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 619
 620USER root
 621
 622RUN mkdir -p {dest}
 623COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 624
 625RUN \
 626echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 627echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 628
 629{feature_layers}
 630
 631ARG _DEV_CONTAINERS_IMAGE_USER=root
 632USER $_DEV_CONTAINERS_IMAGE_USER
 633"#
 634        );
 635
 636        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 637        if !update_remote_user_uid {
 638            extended_dockerfile = format!(
 639                r#"{extended_dockerfile}
 640# Ensure that /etc/profile does not clobber the existing path
 641RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 642"#
 643            );
 644
 645            for feature in &self.features {
 646                let container_env_layer = feature.generate_dockerfile_env();
 647                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 648            }
 649
 650            if let Some(env) = &self.dev_container().container_env {
 651                for (key, value) in env {
 652                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 653                }
 654            }
 655        }
 656
 657        extended_dockerfile
 658    }
 659
 660    fn build_merged_resources(
 661        &self,
 662        base_image: DockerInspect,
 663    ) -> Result<DockerBuildResources, DevContainerError> {
 664        let dev_container = match &self.config {
 665            ConfigStatus::Deserialized(_) => {
 666                log::error!(
 667                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 668                );
 669                return Err(DevContainerError::DevContainerParseFailed);
 670            }
 671            ConfigStatus::VariableParsed(dev_container) => dev_container,
 672        };
 673        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 674
 675        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 676
 677        mounts.append(&mut feature_mounts);
 678
 679        let privileged = dev_container.privileged.unwrap_or(false)
 680            || self.features.iter().any(|f| f.privileged());
 681
 682        let mut entrypoint_script_lines = vec![
 683            "echo Container started".to_string(),
 684            "trap \"exit 0\" 15".to_string(),
 685        ];
 686
 687        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 688            entrypoint_script_lines.push(entrypoint.clone());
 689        }
 690        entrypoint_script_lines.append(&mut vec![
 691            "exec \"$@\"".to_string(),
 692            "while sleep 1 & wait $!; do :; done".to_string(),
 693        ]);
 694
 695        Ok(DockerBuildResources {
 696            image: base_image,
 697            additional_mounts: mounts,
 698            privileged,
 699            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 700        })
 701    }
 702
 703    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 704        if let ConfigStatus::Deserialized(_) = &self.config {
 705            log::error!(
 706                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 707            );
 708            return Err(DevContainerError::DevContainerParseFailed);
 709        }
 710        let dev_container = self.dev_container();
 711        match dev_container.build_type() {
 712            DevContainerBuildType::Image(base_image) => {
 713                let built_docker_image = self.build_docker_image().await?;
 714
 715                let built_docker_image = self
 716                    .update_remote_user_uid(built_docker_image, &base_image)
 717                    .await?;
 718
 719                let resources = self.build_merged_resources(built_docker_image)?;
 720                Ok(DevContainerBuildResources::Docker(resources))
 721            }
 722            DevContainerBuildType::Dockerfile(_) => {
 723                let built_docker_image = self.build_docker_image().await?;
 724                let Some(features_build_info) = &self.features_build_info else {
 725                    log::error!(
 726                        "Can't attempt to build update UID dockerfile before initial docker build"
 727                    );
 728                    return Err(DevContainerError::DevContainerParseFailed);
 729                };
 730                let built_docker_image = self
 731                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 732                    .await?;
 733
 734                let resources = self.build_merged_resources(built_docker_image)?;
 735                Ok(DevContainerBuildResources::Docker(resources))
 736            }
 737            DevContainerBuildType::DockerCompose => {
 738                log::debug!("Using docker compose. Building extended compose files");
 739                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 740
 741                return Ok(DevContainerBuildResources::DockerCompose(
 742                    docker_compose_resources,
 743                ));
 744            }
 745            DevContainerBuildType::None => {
 746                return Err(DevContainerError::DevContainerParseFailed);
 747            }
 748        }
 749    }
 750
 751    async fn run_dev_container(
 752        &self,
 753        build_resources: DevContainerBuildResources,
 754    ) -> Result<DevContainerUp, DevContainerError> {
 755        let ConfigStatus::VariableParsed(_) = &self.config else {
 756            log::error!(
 757                "Variables have not been parsed; cannot proceed with running the dev container"
 758            );
 759            return Err(DevContainerError::DevContainerParseFailed);
 760        };
 761        let running_container = match build_resources {
 762            DevContainerBuildResources::DockerCompose(resources) => {
 763                self.run_docker_compose(resources).await?
 764            }
 765            DevContainerBuildResources::Docker(resources) => {
 766                self.run_docker_image(resources).await?
 767            }
 768        };
 769
 770        let remote_user = get_remote_user_from_config(&running_container, self)?;
 771        let remote_workspace_folder = get_remote_dir_from_config(
 772            &running_container,
 773            (&self.local_project_directory.display()).to_string(),
 774        )?;
 775
 776        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 777
 778        Ok(DevContainerUp {
 779            container_id: running_container.id,
 780            remote_user,
 781            remote_workspace_folder,
 782            extension_ids: self.extension_ids(),
 783            remote_env,
 784        })
 785    }
 786
 787    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 788        let dev_container = match &self.config {
 789            ConfigStatus::Deserialized(_) => {
 790                log::error!(
 791                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 792                );
 793                return Err(DevContainerError::DevContainerParseFailed);
 794            }
 795            ConfigStatus::VariableParsed(dev_container) => dev_container,
 796        };
 797        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 798            return Err(DevContainerError::DevContainerParseFailed);
 799        };
 800        let docker_compose_full_paths = docker_compose_files
 801            .iter()
 802            .map(|relative| self.config_directory.join(relative))
 803            .collect::<Vec<PathBuf>>();
 804
 805        let Some(config) = self
 806            .docker_client
 807            .get_docker_compose_config(&docker_compose_full_paths)
 808            .await?
 809        else {
 810            log::error!("Output could not deserialize into DockerComposeConfig");
 811            return Err(DevContainerError::DevContainerParseFailed);
 812        };
 813        Ok(DockerComposeResources {
 814            files: docker_compose_full_paths,
 815            config,
 816        })
 817    }
 818
 819    async fn build_and_extend_compose_files(
 820        &self,
 821    ) -> Result<DockerComposeResources, DevContainerError> {
 822        let dev_container = match &self.config {
 823            ConfigStatus::Deserialized(_) => {
 824                log::error!(
 825                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 826                );
 827                return Err(DevContainerError::DevContainerParseFailed);
 828            }
 829            ConfigStatus::VariableParsed(dev_container) => dev_container,
 830        };
 831
 832        let Some(features_build_info) = &self.features_build_info else {
 833            log::error!(
 834                "Cannot build and extend compose files: features build info is not yet constructed"
 835            );
 836            return Err(DevContainerError::DevContainerParseFailed);
 837        };
 838        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 839        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 840
 841        let (main_service_name, main_service) =
 842            find_primary_service(&docker_compose_resources, self)?;
 843        let (built_service_image, built_service_image_tag) = if main_service
 844            .build
 845            .as_ref()
 846            .map(|b| b.dockerfile.as_ref())
 847            .is_some()
 848        {
 849            if !supports_buildkit {
 850                self.build_feature_content_image().await?;
 851            }
 852
 853            let dockerfile_path = &features_build_info.dockerfile_path;
 854
 855            let build_args = if !supports_buildkit {
 856                HashMap::from([
 857                    (
 858                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 859                        "dev_container_auto_added_stage_label".to_string(),
 860                    ),
 861                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 862                ])
 863            } else {
 864                HashMap::from([
 865                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 866                    (
 867                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 868                        "dev_container_auto_added_stage_label".to_string(),
 869                    ),
 870                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 871                ])
 872            };
 873
 874            let additional_contexts = if !supports_buildkit {
 875                None
 876            } else {
 877                Some(HashMap::from([(
 878                    "dev_containers_feature_content_source".to_string(),
 879                    features_build_info
 880                        .features_content_dir
 881                        .display()
 882                        .to_string(),
 883                )]))
 884            };
 885
 886            let build_override = DockerComposeConfig {
 887                name: None,
 888                services: HashMap::from([(
 889                    main_service_name.clone(),
 890                    DockerComposeService {
 891                        image: Some(features_build_info.image_tag.clone()),
 892                        entrypoint: None,
 893                        cap_add: None,
 894                        security_opt: None,
 895                        labels: None,
 896                        build: Some(DockerComposeServiceBuild {
 897                            context: Some(
 898                                main_service
 899                                    .build
 900                                    .as_ref()
 901                                    .and_then(|b| b.context.clone())
 902                                    .unwrap_or_else(|| {
 903                                        features_build_info.empty_context_dir.display().to_string()
 904                                    }),
 905                            ),
 906                            dockerfile: Some(dockerfile_path.display().to_string()),
 907                            target: Some("dev_containers_target_stage".to_string()),
 908                            args: Some(build_args),
 909                            additional_contexts,
 910                        }),
 911                        volumes: Vec::new(),
 912                        ..Default::default()
 913                    },
 914                )]),
 915                volumes: HashMap::new(),
 916            };
 917
 918            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 919            let config_location = temp_base.join("docker_compose_build.json");
 920
 921            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 922                log::error!("Error serializing docker compose runtime override: {e}");
 923                DevContainerError::DevContainerParseFailed
 924            })?;
 925
 926            self.fs
 927                .write(&config_location, config_json.as_bytes())
 928                .await
 929                .map_err(|e| {
 930                    log::error!("Error writing the runtime override file: {e}");
 931                    DevContainerError::FilesystemError
 932                })?;
 933
 934            docker_compose_resources.files.push(config_location);
 935
 936            self.docker_client
 937                .docker_compose_build(&docker_compose_resources.files, &self.project_name())
 938                .await?;
 939            (
 940                self.docker_client
 941                    .inspect(&features_build_info.image_tag)
 942                    .await?,
 943                &features_build_info.image_tag,
 944            )
 945        } else if let Some(image) = &main_service.image {
 946            if dev_container
 947                .features
 948                .as_ref()
 949                .is_none_or(|features| features.is_empty())
 950            {
 951                (self.docker_client.inspect(image).await?, image)
 952            } else {
 953                if !supports_buildkit {
 954                    self.build_feature_content_image().await?;
 955                }
 956
 957                let dockerfile_path = &features_build_info.dockerfile_path;
 958
 959                let build_args = if !supports_buildkit {
 960                    HashMap::from([
 961                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 962                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 963                    ])
 964                } else {
 965                    HashMap::from([
 966                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 967                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
 968                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 969                    ])
 970                };
 971
 972                let additional_contexts = if !supports_buildkit {
 973                    None
 974                } else {
 975                    Some(HashMap::from([(
 976                        "dev_containers_feature_content_source".to_string(),
 977                        features_build_info
 978                            .features_content_dir
 979                            .display()
 980                            .to_string(),
 981                    )]))
 982                };
 983
 984                let build_override = DockerComposeConfig {
 985                    name: None,
 986                    services: HashMap::from([(
 987                        main_service_name.clone(),
 988                        DockerComposeService {
 989                            image: Some(features_build_info.image_tag.clone()),
 990                            entrypoint: None,
 991                            cap_add: None,
 992                            security_opt: None,
 993                            labels: None,
 994                            build: Some(DockerComposeServiceBuild {
 995                                context: Some(
 996                                    features_build_info.empty_context_dir.display().to_string(),
 997                                ),
 998                                dockerfile: Some(dockerfile_path.display().to_string()),
 999                                target: Some("dev_containers_target_stage".to_string()),
1000                                args: Some(build_args),
1001                                additional_contexts,
1002                            }),
1003                            volumes: Vec::new(),
1004                            ..Default::default()
1005                        },
1006                    )]),
1007                    volumes: HashMap::new(),
1008                };
1009
1010                let temp_base = std::env::temp_dir().join("devcontainer-zed");
1011                let config_location = temp_base.join("docker_compose_build.json");
1012
1013                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1014                    log::error!("Error serializing docker compose runtime override: {e}");
1015                    DevContainerError::DevContainerParseFailed
1016                })?;
1017
1018                self.fs
1019                    .write(&config_location, config_json.as_bytes())
1020                    .await
1021                    .map_err(|e| {
1022                        log::error!("Error writing the runtime override file: {e}");
1023                        DevContainerError::FilesystemError
1024                    })?;
1025
1026                docker_compose_resources.files.push(config_location);
1027
1028                self.docker_client
1029                    .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1030                    .await?;
1031
1032                (
1033                    self.docker_client
1034                        .inspect(&features_build_info.image_tag)
1035                        .await?,
1036                    &features_build_info.image_tag,
1037                )
1038            }
1039        } else {
1040            log::error!("Docker compose must have either image or dockerfile defined");
1041            return Err(DevContainerError::DevContainerParseFailed);
1042        };
1043
1044        let built_service_image = self
1045            .update_remote_user_uid(built_service_image, built_service_image_tag)
1046            .await?;
1047
1048        let resources = self.build_merged_resources(built_service_image)?;
1049
1050        let network_mode = main_service.network_mode.as_ref();
1051        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1052        let runtime_override_file = self
1053            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1054            .await?;
1055
1056        docker_compose_resources.files.push(runtime_override_file);
1057
1058        Ok(docker_compose_resources)
1059    }
1060
1061    async fn write_runtime_override_file(
1062        &self,
1063        main_service_name: &str,
1064        network_mode_service: Option<&str>,
1065        resources: DockerBuildResources,
1066    ) -> Result<PathBuf, DevContainerError> {
1067        let config =
1068            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1069        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1070        let config_location = temp_base.join("docker_compose_runtime.json");
1071
1072        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1073            log::error!("Error serializing docker compose runtime override: {e}");
1074            DevContainerError::DevContainerParseFailed
1075        })?;
1076
1077        self.fs
1078            .write(&config_location, config_json.as_bytes())
1079            .await
1080            .map_err(|e| {
1081                log::error!("Error writing the runtime override file: {e}");
1082                DevContainerError::FilesystemError
1083            })?;
1084
1085        Ok(config_location)
1086    }
1087
1088    fn build_runtime_override(
1089        &self,
1090        main_service_name: &str,
1091        network_mode_service: Option<&str>,
1092        resources: DockerBuildResources,
1093    ) -> Result<DockerComposeConfig, DevContainerError> {
1094        let mut runtime_labels = HashMap::new();
1095
1096        if let Some(metadata) = &resources.image.config.labels.metadata {
1097            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1098                log::error!("Error serializing docker image metadata: {e}");
1099                DevContainerError::ContainerNotValid(resources.image.id.clone())
1100            })?;
1101
1102            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1103        }
1104
1105        for (k, v) in self.identifying_labels() {
1106            runtime_labels.insert(k.to_string(), v.to_string());
1107        }
1108
1109        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1110            .additional_mounts
1111            .iter()
1112            .filter_map(|mount| {
1113                if let Some(mount_type) = &mount.mount_type
1114                    && mount_type.to_lowercase() == "volume"
1115                    && let Some(source) = &mount.source
1116                {
1117                    Some((
1118                        source.clone(),
1119                        DockerComposeVolume {
1120                            name: source.clone(),
1121                        },
1122                    ))
1123                } else {
1124                    None
1125                }
1126            })
1127            .collect();
1128
1129        let volumes: Vec<MountDefinition> = resources
1130            .additional_mounts
1131            .iter()
1132            .map(|v| MountDefinition {
1133                source: v.source.clone(),
1134                target: v.target.clone(),
1135                mount_type: v.mount_type.clone(),
1136            })
1137            .collect();
1138
1139        let mut main_service = DockerComposeService {
1140            entrypoint: Some(vec![
1141                "/bin/sh".to_string(),
1142                "-c".to_string(),
1143                resources.entrypoint_script,
1144                "-".to_string(),
1145            ]),
1146            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1147            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1148            labels: Some(runtime_labels),
1149            volumes,
1150            privileged: Some(resources.privileged),
1151            ..Default::default()
1152        };
1153        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1154        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1155        if let Some(forward_ports) = &self.dev_container().forward_ports {
1156            let main_service_ports: Vec<String> = forward_ports
1157                .iter()
1158                .filter_map(|f| match f {
1159                    ForwardPort::Number(port) => Some(port.to_string()),
1160                    ForwardPort::String(port) => {
1161                        let parts: Vec<&str> = port.split(":").collect();
1162                        if parts.len() <= 1 {
1163                            Some(port.to_string())
1164                        } else if parts.len() == 2 {
1165                            if parts[0] == main_service_name {
1166                                Some(parts[1].to_string())
1167                            } else {
1168                                None
1169                            }
1170                        } else {
1171                            None
1172                        }
1173                    }
1174                })
1175                .collect();
1176            for port in main_service_ports {
1177                // If the main service uses a different service's network bridge, append to that service's ports instead
1178                if let Some(network_service_name) = network_mode_service {
1179                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1180                        service.ports.push(DockerComposeServicePort {
1181                            target: port.clone(),
1182                            published: port.clone(),
1183                            ..Default::default()
1184                        });
1185                    } else {
1186                        service_declarations.insert(
1187                            network_service_name.to_string(),
1188                            DockerComposeService {
1189                                ports: vec![DockerComposeServicePort {
1190                                    target: port.clone(),
1191                                    published: port.clone(),
1192                                    ..Default::default()
1193                                }],
1194                                ..Default::default()
1195                            },
1196                        );
1197                    }
1198                } else {
1199                    main_service.ports.push(DockerComposeServicePort {
1200                        target: port.clone(),
1201                        published: port.clone(),
1202                        ..Default::default()
1203                    });
1204                }
1205            }
1206            let other_service_ports: Vec<(&str, &str)> = forward_ports
1207                .iter()
1208                .filter_map(|f| match f {
1209                    ForwardPort::Number(_) => None,
1210                    ForwardPort::String(port) => {
1211                        let parts: Vec<&str> = port.split(":").collect();
1212                        if parts.len() != 2 {
1213                            None
1214                        } else {
1215                            if parts[0] == main_service_name {
1216                                None
1217                            } else {
1218                                Some((parts[0], parts[1]))
1219                            }
1220                        }
1221                    }
1222                })
1223                .collect();
1224            for (service_name, port) in other_service_ports {
1225                if let Some(service) = service_declarations.get_mut(service_name) {
1226                    service.ports.push(DockerComposeServicePort {
1227                        target: port.to_string(),
1228                        published: port.to_string(),
1229                        ..Default::default()
1230                    });
1231                } else {
1232                    service_declarations.insert(
1233                        service_name.to_string(),
1234                        DockerComposeService {
1235                            ports: vec![DockerComposeServicePort {
1236                                target: port.to_string(),
1237                                published: port.to_string(),
1238                                ..Default::default()
1239                            }],
1240                            ..Default::default()
1241                        },
1242                    );
1243                }
1244            }
1245        }
1246
1247        service_declarations.insert(main_service_name.to_string(), main_service);
1248        let new_docker_compose_config = DockerComposeConfig {
1249            name: None,
1250            services: service_declarations,
1251            volumes: config_volumes,
1252        };
1253
1254        Ok(new_docker_compose_config)
1255    }
1256
1257    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1258        let dev_container = match &self.config {
1259            ConfigStatus::Deserialized(_) => {
1260                log::error!(
1261                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1262                );
1263                return Err(DevContainerError::DevContainerParseFailed);
1264            }
1265            ConfigStatus::VariableParsed(dev_container) => dev_container,
1266        };
1267
1268        match dev_container.build_type() {
1269            DevContainerBuildType::Image(image_tag) => {
1270                let base_image = self.docker_client.inspect(&image_tag).await?;
1271                if dev_container
1272                    .features
1273                    .as_ref()
1274                    .is_none_or(|features| features.is_empty())
1275                {
1276                    log::debug!("No features to add. Using base image");
1277                    return Ok(base_image);
1278                }
1279            }
1280            DevContainerBuildType::Dockerfile(_) => {}
1281            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1282                return Err(DevContainerError::DevContainerParseFailed);
1283            }
1284        };
1285
1286        let mut command = self.create_docker_build()?;
1287
1288        let output = self
1289            .command_runner
1290            .run_command(&mut command)
1291            .await
1292            .map_err(|e| {
1293                log::error!("Error building docker image: {e}");
1294                DevContainerError::CommandFailed(command.get_program().display().to_string())
1295            })?;
1296
1297        if !output.status.success() {
1298            let stderr = String::from_utf8_lossy(&output.stderr);
1299            log::error!("docker buildx build failed: {stderr}");
1300            return Err(DevContainerError::CommandFailed(
1301                command.get_program().display().to_string(),
1302            ));
1303        }
1304
1305        // After a successful build, inspect the newly tagged image to get its metadata
1306        let Some(features_build_info) = &self.features_build_info else {
1307            log::error!("Features build info expected, but not created");
1308            return Err(DevContainerError::DevContainerParseFailed);
1309        };
1310        let image = self
1311            .docker_client
1312            .inspect(&features_build_info.image_tag)
1313            .await?;
1314
1315        Ok(image)
1316    }
1317
1318    #[cfg(target_os = "windows")]
1319    async fn update_remote_user_uid(
1320        &self,
1321        image: DockerInspect,
1322        _base_image: &str,
1323    ) -> Result<DockerInspect, DevContainerError> {
1324        Ok(image)
1325    }
1326    #[cfg(not(target_os = "windows"))]
1327    async fn update_remote_user_uid(
1328        &self,
1329        image: DockerInspect,
1330        base_image: &str,
1331    ) -> Result<DockerInspect, DevContainerError> {
1332        let dev_container = self.dev_container();
1333
1334        let Some(features_build_info) = &self.features_build_info else {
1335            return Ok(image);
1336        };
1337
1338        // updateRemoteUserUID defaults to true per the devcontainers spec
1339        if dev_container.update_remote_user_uid == Some(false) {
1340            return Ok(image);
1341        }
1342
1343        let remote_user = get_remote_user_from_config(&image, self)?;
1344        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1345            return Ok(image);
1346        }
1347
1348        let image_user = image
1349            .config
1350            .image_user
1351            .as_deref()
1352            .unwrap_or("root")
1353            .to_string();
1354
1355        let host_uid = Command::new("id")
1356            .arg("-u")
1357            .output()
1358            .await
1359            .map_err(|e| {
1360                log::error!("Failed to get host UID: {e}");
1361                DevContainerError::CommandFailed("id -u".to_string())
1362            })
1363            .and_then(|output| {
1364                String::from_utf8_lossy(&output.stdout)
1365                    .trim()
1366                    .parse::<u32>()
1367                    .map_err(|e| {
1368                        log::error!("Failed to parse host UID: {e}");
1369                        DevContainerError::CommandFailed("id -u".to_string())
1370                    })
1371            })?;
1372
1373        let host_gid = Command::new("id")
1374            .arg("-g")
1375            .output()
1376            .await
1377            .map_err(|e| {
1378                log::error!("Failed to get host GID: {e}");
1379                DevContainerError::CommandFailed("id -g".to_string())
1380            })
1381            .and_then(|output| {
1382                String::from_utf8_lossy(&output.stdout)
1383                    .trim()
1384                    .parse::<u32>()
1385                    .map_err(|e| {
1386                        log::error!("Failed to parse host GID: {e}");
1387                        DevContainerError::CommandFailed("id -g".to_string())
1388                    })
1389            })?;
1390
1391        let dockerfile_content = self.generate_update_uid_dockerfile();
1392
1393        let dockerfile_path = features_build_info
1394            .features_content_dir
1395            .join("updateUID.Dockerfile");
1396        self.fs
1397            .write(&dockerfile_path, dockerfile_content.as_bytes())
1398            .await
1399            .map_err(|e| {
1400                log::error!("Failed to write updateUID Dockerfile: {e}");
1401                DevContainerError::FilesystemError
1402            })?;
1403
1404        let updated_image_tag = features_build_info.image_tag.clone();
1405
1406        let mut command = Command::new(self.docker_client.docker_cli());
1407        command.args(["build"]);
1408        command.args(["-f", &dockerfile_path.display().to_string()]);
1409        command.args(["-t", &updated_image_tag]);
1410        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1411        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1412        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1413        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1414        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1415        command.arg(features_build_info.empty_context_dir.display().to_string());
1416
1417        let output = self
1418            .command_runner
1419            .run_command(&mut command)
1420            .await
1421            .map_err(|e| {
1422                log::error!("Error building UID update image: {e}");
1423                DevContainerError::CommandFailed(command.get_program().display().to_string())
1424            })?;
1425
1426        if !output.status.success() {
1427            let stderr = String::from_utf8_lossy(&output.stderr);
1428            log::error!("UID update build failed: {stderr}");
1429            return Err(DevContainerError::CommandFailed(
1430                command.get_program().display().to_string(),
1431            ));
1432        }
1433
1434        self.docker_client.inspect(&updated_image_tag).await
1435    }
1436
1437    #[cfg(not(target_os = "windows"))]
1438    fn generate_update_uid_dockerfile(&self) -> String {
1439        let mut dockerfile = r#"ARG BASE_IMAGE
1440FROM $BASE_IMAGE
1441
1442USER root
1443
1444ARG REMOTE_USER
1445ARG NEW_UID
1446ARG NEW_GID
1447SHELL ["/bin/sh", "-c"]
1448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1449	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1450	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1451	if [ -z "$OLD_UID" ]; then \
1452		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1453	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1454		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1455	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1456		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1457	else \
1458		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1459			FREE_GID=65532; \
1460			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1461			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1462			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1463		fi; \
1464		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1465		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1466		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1467			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1468		fi; \
1469		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1470	fi;
1471
1472ARG IMAGE_USER
1473USER $IMAGE_USER
1474
1475# Ensure that /etc/profile does not clobber the existing path
1476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1477"#.to_string();
1478        for feature in &self.features {
1479            let container_env_layer = feature.generate_dockerfile_env();
1480            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1481        }
1482
1483        if let Some(env) = &self.dev_container().container_env {
1484            for (key, value) in env {
1485                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1486            }
1487        }
1488        dockerfile
1489    }
1490
1491    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1492        let Some(features_build_info) = &self.features_build_info else {
1493            log::error!("Features build info not available for building feature content image");
1494            return Err(DevContainerError::DevContainerParseFailed);
1495        };
1496        let features_content_dir = &features_build_info.features_content_dir;
1497
1498        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1499        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1500
1501        self.fs
1502            .write(&dockerfile_path, dockerfile_content.as_bytes())
1503            .await
1504            .map_err(|e| {
1505                log::error!("Failed to write feature content Dockerfile: {e}");
1506                DevContainerError::FilesystemError
1507            })?;
1508
1509        let mut command = Command::new(self.docker_client.docker_cli());
1510        command.args([
1511            "build",
1512            "-t",
1513            "dev_container_feature_content_temp",
1514            "-f",
1515            &dockerfile_path.display().to_string(),
1516            &features_content_dir.display().to_string(),
1517        ]);
1518
1519        let output = self
1520            .command_runner
1521            .run_command(&mut command)
1522            .await
1523            .map_err(|e| {
1524                log::error!("Error building feature content image: {e}");
1525                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1526            })?;
1527
1528        if !output.status.success() {
1529            let stderr = String::from_utf8_lossy(&output.stderr);
1530            log::error!("Feature content image build failed: {stderr}");
1531            return Err(DevContainerError::CommandFailed(
1532                self.docker_client.docker_cli(),
1533            ));
1534        }
1535
1536        Ok(())
1537    }
1538
1539    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1540        let dev_container = match &self.config {
1541            ConfigStatus::Deserialized(_) => {
1542                log::error!(
1543                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1544                );
1545                return Err(DevContainerError::DevContainerParseFailed);
1546            }
1547            ConfigStatus::VariableParsed(dev_container) => dev_container,
1548        };
1549
1550        let Some(features_build_info) = &self.features_build_info else {
1551            log::error!(
1552                "Cannot create docker build command; features build info has not been constructed"
1553            );
1554            return Err(DevContainerError::DevContainerParseFailed);
1555        };
1556        let mut command = Command::new(self.docker_client.docker_cli());
1557
1558        command.args(["buildx", "build"]);
1559
1560        // --load is short for --output=docker, loading the built image into the local docker images
1561        command.arg("--load");
1562
1563        // BuildKit build context: provides the features content directory as a named context
1564        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1565        command.args([
1566            "--build-context",
1567            &format!(
1568                "dev_containers_feature_content_source={}",
1569                features_build_info.features_content_dir.display()
1570            ),
1571        ]);
1572
1573        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1574        if let Some(build_image) = &features_build_info.build_image {
1575            command.args([
1576                "--build-arg",
1577                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1578            ]);
1579        } else {
1580            command.args([
1581                "--build-arg",
1582                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1583            ]);
1584        }
1585
1586        command.args([
1587            "--build-arg",
1588            &format!(
1589                "_DEV_CONTAINERS_IMAGE_USER={}",
1590                self.root_image
1591                    .as_ref()
1592                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1593                    .unwrap_or(&"root".to_string())
1594            ),
1595        ]);
1596
1597        command.args([
1598            "--build-arg",
1599            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1600        ]);
1601
1602        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1603            for (key, value) in args {
1604                command.args(["--build-arg", &format!("{}={}", key, value)]);
1605            }
1606        }
1607
1608        command.args(["--target", "dev_containers_target_stage"]);
1609
1610        command.args([
1611            "-f",
1612            &features_build_info.dockerfile_path.display().to_string(),
1613        ]);
1614
1615        command.args(["-t", &features_build_info.image_tag]);
1616
1617        if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1618            command.arg(self.config_directory.display().to_string());
1619        } else {
1620            // Use an empty folder as the build context to avoid pulling in unneeded files.
1621            // The actual feature content is supplied via the BuildKit build context above.
1622            command.arg(features_build_info.empty_context_dir.display().to_string());
1623        }
1624
1625        Ok(command)
1626    }
1627
1628    async fn run_docker_compose(
1629        &self,
1630        resources: DockerComposeResources,
1631    ) -> Result<DockerInspect, DevContainerError> {
1632        let mut command = Command::new(self.docker_client.docker_cli());
1633        command.args(&["compose", "--project-name", &self.project_name()]);
1634        for docker_compose_file in resources.files {
1635            command.args(&["-f", &docker_compose_file.display().to_string()]);
1636        }
1637        command.args(&["up", "-d"]);
1638
1639        let output = self
1640            .command_runner
1641            .run_command(&mut command)
1642            .await
1643            .map_err(|e| {
1644                log::error!("Error running docker compose up: {e}");
1645                DevContainerError::CommandFailed(command.get_program().display().to_string())
1646            })?;
1647
1648        if !output.status.success() {
1649            let stderr = String::from_utf8_lossy(&output.stderr);
1650            log::error!("Non-success status from docker compose up: {}", stderr);
1651            return Err(DevContainerError::CommandFailed(
1652                command.get_program().display().to_string(),
1653            ));
1654        }
1655
1656        if let Some(docker_ps) = self.check_for_existing_container().await? {
1657            log::debug!("Found newly created dev container");
1658            return self.docker_client.inspect(&docker_ps.id).await;
1659        }
1660
1661        log::error!("Could not find existing container after docker compose up");
1662
1663        Err(DevContainerError::DevContainerParseFailed)
1664    }
1665
1666    async fn run_docker_image(
1667        &self,
1668        build_resources: DockerBuildResources,
1669    ) -> Result<DockerInspect, DevContainerError> {
1670        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1671
1672        let output = self
1673            .command_runner
1674            .run_command(&mut docker_run_command)
1675            .await
1676            .map_err(|e| {
1677                log::error!("Error running docker run: {e}");
1678                DevContainerError::CommandFailed(
1679                    docker_run_command.get_program().display().to_string(),
1680                )
1681            })?;
1682
1683        if !output.status.success() {
1684            let std_err = String::from_utf8_lossy(&output.stderr);
1685            log::error!("Non-success status from docker run. StdErr: {std_err}");
1686            return Err(DevContainerError::CommandFailed(
1687                docker_run_command.get_program().display().to_string(),
1688            ));
1689        }
1690
1691        log::debug!("Checking for container that was started");
1692        let Some(docker_ps) = self.check_for_existing_container().await? else {
1693            log::error!("Could not locate container just created");
1694            return Err(DevContainerError::DevContainerParseFailed);
1695        };
1696        self.docker_client.inspect(&docker_ps.id).await
1697    }
1698
1699    fn local_workspace_folder(&self) -> String {
1700        self.local_project_directory.display().to_string()
1701    }
1702    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1703        self.local_project_directory
1704            .file_name()
1705            .map(|f| f.display().to_string())
1706            .ok_or(DevContainerError::DevContainerParseFailed)
1707    }
1708
1709    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1710        self.dev_container()
1711            .workspace_folder
1712            .as_ref()
1713            .map(|folder| PathBuf::from(folder))
1714            .or(Some(
1715                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1716            ))
1717            .ok_or(DevContainerError::DevContainerParseFailed)
1718    }
1719    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1720        self.remote_workspace_folder().and_then(|f| {
1721            f.file_name()
1722                .map(|file_name| file_name.display().to_string())
1723                .ok_or(DevContainerError::DevContainerParseFailed)
1724        })
1725    }
1726
1727    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1728        if let Some(mount) = &self.dev_container().workspace_mount {
1729            return Ok(mount.clone());
1730        }
1731        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1732            return Err(DevContainerError::DevContainerParseFailed);
1733        };
1734
1735        Ok(MountDefinition {
1736            source: Some(self.local_workspace_folder()),
1737            target: format!("/workspaces/{}", project_directory_name.display()),
1738            mount_type: None,
1739        })
1740    }
1741
1742    fn create_docker_run_command(
1743        &self,
1744        build_resources: DockerBuildResources,
1745    ) -> Result<Command, DevContainerError> {
1746        let remote_workspace_mount = self.remote_workspace_mount()?;
1747
1748        let docker_cli = self.docker_client.docker_cli();
1749        let mut command = Command::new(&docker_cli);
1750
1751        command.arg("run");
1752
1753        if build_resources.privileged {
1754            command.arg("--privileged");
1755        }
1756
1757        let run_args = match &self.dev_container().run_args {
1758            Some(run_args) => run_args,
1759            None => &Vec::new(),
1760        };
1761
1762        for arg in run_args {
1763            command.arg(arg);
1764        }
1765
1766        let run_if_missing = {
1767            |arg_name: &str, arg: &str, command: &mut Command| {
1768                if !run_args
1769                    .iter()
1770                    .any(|arg| arg.strip_prefix(arg_name).is_some())
1771                {
1772                    command.arg(arg);
1773                }
1774            }
1775        };
1776
1777        if &docker_cli == "podman" {
1778            run_if_missing(
1779                "--security-opt",
1780                "--security-opt=label=disable",
1781                &mut command,
1782            );
1783            run_if_missing("--userns", "--userns=keep-id", &mut command);
1784        }
1785
1786        run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1787        command.arg("-d");
1788        command.arg("--mount");
1789        command.arg(remote_workspace_mount.to_string());
1790
1791        for mount in &build_resources.additional_mounts {
1792            command.arg("--mount");
1793            command.arg(mount.to_string());
1794        }
1795
1796        for (key, val) in self.identifying_labels() {
1797            command.arg("-l");
1798            command.arg(format!("{}={}", key, val));
1799        }
1800
1801        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1802            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1803                log::error!("Problem serializing image metadata: {e}");
1804                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1805            })?;
1806            command.arg("-l");
1807            command.arg(format!(
1808                "{}={}",
1809                "devcontainer.metadata", serialized_metadata
1810            ));
1811        }
1812
1813        if let Some(forward_ports) = &self.dev_container().forward_ports {
1814            for port in forward_ports {
1815                if let ForwardPort::Number(port_number) = port {
1816                    command.arg("-p");
1817                    command.arg(format!("{port_number}:{port_number}"));
1818                }
1819            }
1820        }
1821        for app_port in &self.dev_container().app_port {
1822            command.arg("-p");
1823            command.arg(app_port);
1824        }
1825
1826        command.arg("--entrypoint");
1827        command.arg("/bin/sh");
1828        command.arg(&build_resources.image.id);
1829        command.arg("-c");
1830
1831        command.arg(build_resources.entrypoint_script);
1832        command.arg("-");
1833
1834        Ok(command)
1835    }
1836
1837    fn extension_ids(&self) -> Vec<String> {
1838        self.dev_container()
1839            .customizations
1840            .as_ref()
1841            .map(|c| c.zed.extensions.clone())
1842            .unwrap_or_default()
1843    }
1844
1845    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1846        self.run_initialize_commands().await?;
1847
1848        self.download_feature_and_dockerfile_resources().await?;
1849
1850        let build_resources = self.build_resources().await?;
1851
1852        let devcontainer_up = self.run_dev_container(build_resources).await?;
1853
1854        self.run_remote_scripts(&devcontainer_up, true).await?;
1855
1856        Ok(devcontainer_up)
1857    }
1858
1859    async fn run_remote_scripts(
1860        &self,
1861        devcontainer_up: &DevContainerUp,
1862        new_container: bool,
1863    ) -> Result<(), DevContainerError> {
1864        let ConfigStatus::VariableParsed(config) = &self.config else {
1865            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1866            return Err(DevContainerError::DevContainerScriptsFailed);
1867        };
1868        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1869
1870        if new_container {
1871            if let Some(on_create_command) = &config.on_create_command {
1872                for (command_name, command) in on_create_command.script_commands() {
1873                    log::debug!("Running on create command {command_name}");
1874                    self.docker_client
1875                        .run_docker_exec(
1876                            &devcontainer_up.container_id,
1877                            &remote_folder,
1878                            "root",
1879                            &devcontainer_up.remote_env,
1880                            command,
1881                        )
1882                        .await?;
1883                }
1884            }
1885            if let Some(update_content_command) = &config.update_content_command {
1886                for (command_name, command) in update_content_command.script_commands() {
1887                    log::debug!("Running update content command {command_name}");
1888                    self.docker_client
1889                        .run_docker_exec(
1890                            &devcontainer_up.container_id,
1891                            &remote_folder,
1892                            "root",
1893                            &devcontainer_up.remote_env,
1894                            command,
1895                        )
1896                        .await?;
1897                }
1898            }
1899
1900            if let Some(post_create_command) = &config.post_create_command {
1901                for (command_name, command) in post_create_command.script_commands() {
1902                    log::debug!("Running post create command {command_name}");
1903                    self.docker_client
1904                        .run_docker_exec(
1905                            &devcontainer_up.container_id,
1906                            &remote_folder,
1907                            &devcontainer_up.remote_user,
1908                            &devcontainer_up.remote_env,
1909                            command,
1910                        )
1911                        .await?;
1912                }
1913            }
1914            if let Some(post_start_command) = &config.post_start_command {
1915                for (command_name, command) in post_start_command.script_commands() {
1916                    log::debug!("Running post start command {command_name}");
1917                    self.docker_client
1918                        .run_docker_exec(
1919                            &devcontainer_up.container_id,
1920                            &remote_folder,
1921                            &devcontainer_up.remote_user,
1922                            &devcontainer_up.remote_env,
1923                            command,
1924                        )
1925                        .await?;
1926                }
1927            }
1928        }
1929        if let Some(post_attach_command) = &config.post_attach_command {
1930            for (command_name, command) in post_attach_command.script_commands() {
1931                log::debug!("Running post attach command {command_name}");
1932                self.docker_client
1933                    .run_docker_exec(
1934                        &devcontainer_up.container_id,
1935                        &remote_folder,
1936                        &devcontainer_up.remote_user,
1937                        &devcontainer_up.remote_env,
1938                        command,
1939                    )
1940                    .await?;
1941            }
1942        }
1943
1944        Ok(())
1945    }
1946
1947    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1948        let ConfigStatus::VariableParsed(config) = &self.config else {
1949            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1950            return Err(DevContainerError::DevContainerParseFailed);
1951        };
1952
1953        if let Some(initialize_command) = &config.initialize_command {
1954            log::debug!("Running initialize command");
1955            initialize_command
1956                .run(&self.command_runner, &self.local_project_directory)
1957                .await
1958        } else {
1959            log::warn!("No initialize command found");
1960            Ok(())
1961        }
1962    }
1963
1964    async fn check_for_existing_devcontainer(
1965        &self,
1966    ) -> Result<Option<DevContainerUp>, DevContainerError> {
1967        if let Some(docker_ps) = self.check_for_existing_container().await? {
1968            log::debug!("Dev container already found. Proceeding with it");
1969
1970            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1971
1972            if !docker_inspect.is_running() {
1973                log::debug!("Container not running. Will attempt to start, and then proceed");
1974                self.docker_client.start_container(&docker_ps.id).await?;
1975            }
1976
1977            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1978
1979            let remote_folder = get_remote_dir_from_config(
1980                &docker_inspect,
1981                (&self.local_project_directory.display()).to_string(),
1982            )?;
1983
1984            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1985
1986            let dev_container_up = DevContainerUp {
1987                container_id: docker_ps.id,
1988                remote_user: remote_user,
1989                remote_workspace_folder: remote_folder,
1990                extension_ids: self.extension_ids(),
1991                remote_env,
1992            };
1993
1994            self.run_remote_scripts(&dev_container_up, false).await?;
1995
1996            Ok(Some(dev_container_up))
1997        } else {
1998            log::debug!("Existing container not found.");
1999
2000            Ok(None)
2001        }
2002    }
2003
2004    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2005        self.docker_client
2006            .find_process_by_filters(
2007                self.identifying_labels()
2008                    .iter()
2009                    .map(|(k, v)| format!("label={k}={v}"))
2010                    .collect(),
2011            )
2012            .await
2013    }
2014
2015    fn project_name(&self) -> String {
2016        if let Some(name) = &self.dev_container().name {
2017            safe_id_lower(name)
2018        } else {
2019            let alternate_name = &self
2020                .local_workspace_base_name()
2021                .unwrap_or(self.local_workspace_folder());
2022            safe_id_lower(alternate_name)
2023        }
2024    }
2025
2026    async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2027        let Some(dockerfile_path) = self.dockerfile_location().await else {
2028            log::error!("Tried to expand dockerfile for an image-type config");
2029            return Err(DevContainerError::DevContainerParseFailed);
2030        };
2031
2032        let devcontainer_args = self
2033            .dev_container()
2034            .build
2035            .as_ref()
2036            .and_then(|b| b.args.clone())
2037            .unwrap_or_default();
2038        let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2039            log::error!("Failed to load Dockerfile: {e}");
2040            DevContainerError::FilesystemError
2041        })?;
2042        let mut parsed_lines: Vec<String> = Vec::new();
2043        let mut inline_args: Vec<(String, String)> = Vec::new();
2044        let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2045
2046        for line in contents.lines() {
2047            let mut parsed_line = line.to_string();
2048            // Replace from devcontainer args first, since they take precedence
2049            for (key, value) in &devcontainer_args {
2050                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2051            }
2052            for (key, value) in &inline_args {
2053                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2054            }
2055            if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2056                let trimmed = arg_directives.trim();
2057                let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2058                for (i, captures) in key_matches.iter().enumerate() {
2059                    let key = captures[1].to_string();
2060                    // Insert the devcontainer overrides here if needed
2061                    let value_start = captures.get(0).expect("full match").end();
2062                    let value_end = if i + 1 < key_matches.len() {
2063                        key_matches[i + 1].get(0).expect("full match").start()
2064                    } else {
2065                        trimmed.len()
2066                    };
2067                    let raw_value = trimmed[value_start..value_end].trim();
2068                    let value = if raw_value.starts_with('"')
2069                        && raw_value.ends_with('"')
2070                        && raw_value.len() > 1
2071                    {
2072                        &raw_value[1..raw_value.len() - 1]
2073                    } else {
2074                        raw_value
2075                    };
2076                    inline_args.push((key, value.to_string()));
2077                }
2078            }
2079            parsed_lines.push(parsed_line);
2080        }
2081
2082        Ok(parsed_lines.join("\n"))
2083    }
2084}
2085
2086/// Holds all the information needed to construct a `docker buildx build` command
2087/// that extends a base image with dev container features.
2088///
2089/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2090/// (cli/src/spec-node/containerFeatures.ts).
2091#[derive(Debug, Eq, PartialEq)]
2092pub(crate) struct FeaturesBuildInfo {
2093    /// Path to the generated Dockerfile.extended
2094    pub dockerfile_path: PathBuf,
2095    /// Path to the features content directory (used as a BuildKit build context)
2096    pub features_content_dir: PathBuf,
2097    /// Path to an empty directory used as the Docker build context
2098    pub empty_context_dir: PathBuf,
2099    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2100    pub build_image: Option<String>,
2101    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2102    pub image_tag: String,
2103}
2104
2105pub(crate) async fn read_devcontainer_configuration(
2106    config: DevContainerConfig,
2107    context: &DevContainerContext,
2108    environment: HashMap<String, String>,
2109) -> Result<DevContainer, DevContainerError> {
2110    let docker = if context.use_podman {
2111        Docker::new("podman")
2112    } else {
2113        Docker::new("docker")
2114    };
2115    let mut dev_container = DevContainerManifest::new(
2116        context,
2117        environment,
2118        Arc::new(docker),
2119        Arc::new(DefaultCommandRunner::new()),
2120        config,
2121        &context.project_directory.as_ref(),
2122    )
2123    .await?;
2124    dev_container.parse_nonremote_vars()?;
2125    Ok(dev_container.dev_container().clone())
2126}
2127
2128pub(crate) async fn spawn_dev_container(
2129    context: &DevContainerContext,
2130    environment: HashMap<String, String>,
2131    config: DevContainerConfig,
2132    local_project_path: &Path,
2133) -> Result<DevContainerUp, DevContainerError> {
2134    let docker = if context.use_podman {
2135        Docker::new("podman")
2136    } else {
2137        Docker::new("docker")
2138    };
2139    let mut devcontainer_manifest = DevContainerManifest::new(
2140        context,
2141        environment,
2142        Arc::new(docker),
2143        Arc::new(DefaultCommandRunner::new()),
2144        config,
2145        local_project_path,
2146    )
2147    .await?;
2148
2149    devcontainer_manifest.parse_nonremote_vars()?;
2150
2151    log::debug!("Checking for existing container");
2152    if let Some(devcontainer) = devcontainer_manifest
2153        .check_for_existing_devcontainer()
2154        .await?
2155    {
2156        Ok(devcontainer)
2157    } else {
2158        log::debug!("Existing container not found. Building");
2159
2160        devcontainer_manifest.build_and_run().await
2161    }
2162}
2163
2164#[derive(Debug)]
2165struct DockerBuildResources {
2166    image: DockerInspect,
2167    additional_mounts: Vec<MountDefinition>,
2168    privileged: bool,
2169    entrypoint_script: String,
2170}
2171
2172#[derive(Debug)]
2173enum DevContainerBuildResources {
2174    DockerCompose(DockerComposeResources),
2175    Docker(DockerBuildResources),
2176}
2177
2178fn find_primary_service(
2179    docker_compose: &DockerComposeResources,
2180    devcontainer: &DevContainerManifest,
2181) -> Result<(String, DockerComposeService), DevContainerError> {
2182    let Some(service_name) = &devcontainer.dev_container().service else {
2183        return Err(DevContainerError::DevContainerParseFailed);
2184    };
2185
2186    match docker_compose.config.services.get(service_name) {
2187        Some(service) => Ok((service_name.clone(), service.clone())),
2188        None => Err(DevContainerError::DevContainerParseFailed),
2189    }
2190}
2191
2192/// Destination folder inside the container where feature content is staged during build.
2193/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2194const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2195
2196/// Escapes regex special characters in a string.
2197fn escape_regex_chars(input: &str) -> String {
2198    let mut result = String::with_capacity(input.len() * 2);
2199    for c in input.chars() {
2200        if ".*+?^${}()|[]\\".contains(c) {
2201            result.push('\\');
2202        }
2203        result.push(c);
2204    }
2205    result
2206}
2207
2208/// Extracts the short feature ID from a full feature reference string.
2209///
2210/// Examples:
2211/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2212/// - `ghcr.io/user/repo/go` → `go`
2213/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2214/// - `./myFeature` → `myFeature`
2215fn extract_feature_id(feature_ref: &str) -> &str {
2216    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2217        &feature_ref[..at_idx]
2218    } else {
2219        let last_slash = feature_ref.rfind('/');
2220        let last_colon = feature_ref.rfind(':');
2221        match (last_slash, last_colon) {
2222            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2223            _ => feature_ref,
2224        }
2225    };
2226    match without_version.rfind('/') {
2227        Some(idx) => &without_version[idx + 1..],
2228        None => without_version,
2229    }
2230}
2231
2232/// Generates a shell command that looks up a user's passwd entry.
2233///
2234/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2235/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2236fn get_ent_passwd_shell_command(user: &str) -> String {
2237    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2238    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2239    format!(
2240        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2241        shell = escaped_for_shell,
2242        re = escaped_for_regex,
2243    )
2244}
2245
2246/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2247///
2248/// Features listed in the override come first (in the specified order), followed
2249/// by any remaining features sorted lexicographically by their full reference ID.
2250fn resolve_feature_order<'a>(
2251    features: &'a HashMap<String, FeatureOptions>,
2252    override_order: &Option<Vec<String>>,
2253) -> Vec<(&'a String, &'a FeatureOptions)> {
2254    if let Some(order) = override_order {
2255        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2256        for ordered_id in order {
2257            if let Some((key, options)) = features.get_key_value(ordered_id) {
2258                ordered.push((key, options));
2259            }
2260        }
2261        let mut remaining: Vec<_> = features
2262            .iter()
2263            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2264            .collect();
2265        remaining.sort_by_key(|(id, _)| id.as_str());
2266        ordered.extend(remaining);
2267        ordered
2268    } else {
2269        let mut entries: Vec<_> = features.iter().collect();
2270        entries.sort_by_key(|(id, _)| id.as_str());
2271        entries
2272    }
2273}
2274
2275/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2276///
2277/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2278/// `containerFeaturesConfiguration.ts`.
2279fn generate_install_wrapper(
2280    feature_ref: &str,
2281    feature_id: &str,
2282    env_variables: &str,
2283) -> Result<String, DevContainerError> {
2284    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2285        log::error!("Error escaping feature ref {feature_ref}: {e}");
2286        DevContainerError::DevContainerParseFailed
2287    })?;
2288    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2289        log::error!("Error escaping feature {feature_id}: {e}");
2290        DevContainerError::DevContainerParseFailed
2291    })?;
2292    let options_indented: String = env_variables
2293        .lines()
2294        .filter(|l| !l.is_empty())
2295        .map(|l| format!("    {}", l))
2296        .collect::<Vec<_>>()
2297        .join("\n");
2298    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2299        log::error!("Error escaping options {options_indented}: {e}");
2300        DevContainerError::DevContainerParseFailed
2301    })?;
2302
2303    let script = format!(
2304        r#"#!/bin/sh
2305set -e
2306
2307on_exit () {{
2308    [ $? -eq 0 ] && exit
2309    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2310}}
2311
2312trap on_exit EXIT
2313
2314echo ===========================================================================
2315echo 'Feature       : {escaped_name}'
2316echo 'Id            : {escaped_id}'
2317echo 'Options       :'
2318echo {escaped_options}
2319echo ===========================================================================
2320
2321set -a
2322. ../devcontainer-features.builtin.env
2323. ./devcontainer-features.env
2324set +a
2325
2326chmod +x ./install.sh
2327./install.sh
2328"#
2329    );
2330
2331    Ok(script)
2332}
2333
2334fn dockerfile_inject_alias(
2335    dockerfile_content: &str,
2336    alias: &str,
2337    build_target: Option<String>,
2338) -> String {
2339    match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2340        Some(target) => format!(
2341            r#"{dockerfile_content}
2342FROM {target} AS {alias}"#
2343        ),
2344        None => dockerfile_content.to_string(),
2345    }
2346}
2347
2348fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2349    dockerfile_contents
2350        .lines()
2351        .filter(|line| line.starts_with("FROM"))
2352        .rfind(|from_line| match &target {
2353            Some(target) => {
2354                let parts = from_line.split(' ').collect::<Vec<&str>>();
2355                if parts.len() >= 3
2356                    && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2357                {
2358                    parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2359                } else {
2360                    false
2361                }
2362            }
2363            None => true,
2364        })
2365        .and_then(|from_line| {
2366            from_line
2367                .split(' ')
2368                .collect::<Vec<&str>>()
2369                .get(1)
2370                .map(|s| s.to_string())
2371        })
2372}
2373
2374// Container user things
2375// This should come from spec - see the docs
2376fn get_remote_user_from_config(
2377    docker_config: &DockerInspect,
2378    devcontainer: &DevContainerManifest,
2379) -> Result<String, DevContainerError> {
2380    if let DevContainer {
2381        remote_user: Some(user),
2382        ..
2383    } = &devcontainer.dev_container()
2384    {
2385        return Ok(user.clone());
2386    }
2387    if let Some(metadata) = &docker_config.config.labels.metadata {
2388        for metadatum in metadata {
2389            if let Some(remote_user) = metadatum.get("remoteUser") {
2390                if let Some(remote_user_str) = remote_user.as_str() {
2391                    return Ok(remote_user_str.to_string());
2392                }
2393            }
2394        }
2395    }
2396    if let Some(image_user) = &docker_config.config.image_user {
2397        if !image_user.is_empty() {
2398            return Ok(image_user.to_string());
2399        }
2400    }
2401    Ok("root".to_string())
2402}
2403
2404// This should come from spec - see the docs
2405fn get_container_user_from_config(
2406    docker_config: &DockerInspect,
2407    devcontainer: &DevContainerManifest,
2408) -> Result<String, DevContainerError> {
2409    if let Some(user) = &devcontainer.dev_container().container_user {
2410        return Ok(user.to_string());
2411    }
2412    if let Some(metadata) = &docker_config.config.labels.metadata {
2413        for metadatum in metadata {
2414            if let Some(container_user) = metadatum.get("containerUser") {
2415                if let Some(container_user_str) = container_user.as_str() {
2416                    return Ok(container_user_str.to_string());
2417                }
2418            }
2419        }
2420    }
2421    if let Some(image_user) = &docker_config.config.image_user {
2422        return Ok(image_user.to_string());
2423    }
2424
2425    Ok("root".to_string())
2426}
2427
2428#[cfg(test)]
2429mod test {
2430    use std::{
2431        collections::HashMap,
2432        ffi::OsStr,
2433        path::PathBuf,
2434        process::{ExitStatus, Output},
2435        sync::{Arc, Mutex},
2436    };
2437
2438    use async_trait::async_trait;
2439    use fs::{FakeFs, Fs};
2440    use gpui::{AppContext, TestAppContext};
2441    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2442    use project::{
2443        ProjectEnvironment,
2444        worktree_store::{WorktreeIdCounter, WorktreeStore},
2445    };
2446    use serde_json_lenient::Value;
2447    use util::{command::Command, paths::SanitizedPath};
2448
2449    #[cfg(not(target_os = "windows"))]
2450    use crate::docker::DockerComposeServicePort;
2451    use crate::{
2452        DevContainerConfig, DevContainerContext,
2453        command_json::CommandRunner,
2454        devcontainer_api::DevContainerError,
2455        devcontainer_json::MountDefinition,
2456        devcontainer_manifest::{
2457            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2458            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2459            image_from_dockerfile,
2460        },
2461        docker::{
2462            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2463            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2464            DockerPs,
2465        },
2466        oci::TokenResponse,
2467    };
2468    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2469
2470    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2471        let buffer = futures::io::Cursor::new(Vec::new());
2472        let mut builder = async_tar::Builder::new(buffer);
2473        for (file_name, content) in content {
2474            if content.is_empty() {
2475                let mut header = async_tar::Header::new_gnu();
2476                header.set_size(0);
2477                header.set_mode(0o755);
2478                header.set_entry_type(async_tar::EntryType::Directory);
2479                header.set_cksum();
2480                builder
2481                    .append_data(&mut header, file_name, &[] as &[u8])
2482                    .await
2483                    .unwrap();
2484            } else {
2485                let data = content.as_bytes();
2486                let mut header = async_tar::Header::new_gnu();
2487                header.set_size(data.len() as u64);
2488                header.set_mode(0o755);
2489                header.set_entry_type(async_tar::EntryType::Regular);
2490                header.set_cksum();
2491                builder
2492                    .append_data(&mut header, file_name, data)
2493                    .await
2494                    .unwrap();
2495            }
2496        }
2497        let buffer = builder.into_inner().await.unwrap();
2498        buffer.into_inner()
2499    }
2500
2501    fn test_project_filename() -> String {
2502        PathBuf::from(TEST_PROJECT_PATH)
2503            .file_name()
2504            .expect("is valid")
2505            .display()
2506            .to_string()
2507    }
2508
2509    async fn init_devcontainer_config(
2510        fs: &Arc<FakeFs>,
2511        devcontainer_contents: &str,
2512    ) -> DevContainerConfig {
2513        fs.insert_tree(
2514            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2515            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2516        )
2517        .await;
2518
2519        DevContainerConfig::default_config()
2520    }
2521
2522    struct TestDependencies {
2523        fs: Arc<FakeFs>,
2524        _http_client: Arc<dyn HttpClient>,
2525        docker: Arc<FakeDocker>,
2526        command_runner: Arc<TestCommandRunner>,
2527    }
2528
2529    async fn init_default_devcontainer_manifest(
2530        cx: &mut TestAppContext,
2531        devcontainer_contents: &str,
2532    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2533        let fs = FakeFs::new(cx.executor());
2534        let http_client = fake_http_client();
2535        let command_runner = Arc::new(TestCommandRunner::new());
2536        let docker = Arc::new(FakeDocker::new());
2537        let environment = HashMap::new();
2538
2539        init_devcontainer_manifest(
2540            cx,
2541            fs,
2542            http_client,
2543            docker,
2544            command_runner,
2545            environment,
2546            devcontainer_contents,
2547        )
2548        .await
2549    }
2550
2551    async fn init_devcontainer_manifest(
2552        cx: &mut TestAppContext,
2553        fs: Arc<FakeFs>,
2554        http_client: Arc<dyn HttpClient>,
2555        docker_client: Arc<FakeDocker>,
2556        command_runner: Arc<TestCommandRunner>,
2557        environment: HashMap<String, String>,
2558        devcontainer_contents: &str,
2559    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2560        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2561        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2562        let worktree_store =
2563            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2564        let project_environment =
2565            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2566
2567        let context = DevContainerContext {
2568            project_directory: SanitizedPath::cast_arc(project_path),
2569            use_podman: false,
2570            fs: fs.clone(),
2571            http_client: http_client.clone(),
2572            environment: project_environment.downgrade(),
2573        };
2574
2575        let test_dependencies = TestDependencies {
2576            fs: fs.clone(),
2577            _http_client: http_client.clone(),
2578            docker: docker_client.clone(),
2579            command_runner: command_runner.clone(),
2580        };
2581        let manifest = DevContainerManifest::new(
2582            &context,
2583            environment,
2584            docker_client,
2585            command_runner,
2586            local_config,
2587            &PathBuf::from(TEST_PROJECT_PATH),
2588        )
2589        .await?;
2590
2591        Ok((test_dependencies, manifest))
2592    }
2593
2594    #[gpui::test]
2595    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2596        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2597            cx,
2598            r#"
2599// These are some external comments. serde_lenient should handle them
2600{
2601    // These are some internal comments
2602    "image": "image",
2603    "remoteUser": "root",
2604}
2605            "#,
2606        )
2607        .await
2608        .unwrap();
2609
2610        let mut metadata = HashMap::new();
2611        metadata.insert(
2612            "remoteUser".to_string(),
2613            serde_json_lenient::Value::String("vsCode".to_string()),
2614        );
2615        let given_docker_config = DockerInspect {
2616            id: "docker_id".to_string(),
2617            config: DockerInspectConfig {
2618                labels: DockerConfigLabels {
2619                    metadata: Some(vec![metadata]),
2620                },
2621                image_user: None,
2622                env: Vec::new(),
2623            },
2624            mounts: None,
2625            state: None,
2626        };
2627
2628        let remote_user =
2629            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2630
2631        assert_eq!(remote_user, "root".to_string())
2632    }
2633
2634    #[gpui::test]
2635    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2636        let (_, devcontainer_manifest) =
2637            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2638        let mut metadata = HashMap::new();
2639        metadata.insert(
2640            "remoteUser".to_string(),
2641            serde_json_lenient::Value::String("vsCode".to_string()),
2642        );
2643        let given_docker_config = DockerInspect {
2644            id: "docker_id".to_string(),
2645            config: DockerInspectConfig {
2646                labels: DockerConfigLabels {
2647                    metadata: Some(vec![metadata]),
2648                },
2649                image_user: None,
2650                env: Vec::new(),
2651            },
2652            mounts: None,
2653            state: None,
2654        };
2655
2656        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2657
2658        assert!(remote_user.is_ok());
2659        let remote_user = remote_user.expect("ok");
2660        assert_eq!(&remote_user, "vsCode")
2661    }
2662
2663    #[test]
2664    fn should_extract_feature_id_from_references() {
2665        assert_eq!(
2666            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2667            "aws-cli"
2668        );
2669        assert_eq!(
2670            extract_feature_id("ghcr.io/devcontainers/features/go"),
2671            "go"
2672        );
2673        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2674        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2675        assert_eq!(
2676            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2677            "rust"
2678        );
2679    }
2680
2681    #[gpui::test]
2682    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2683        let mut metadata = HashMap::new();
2684        metadata.insert(
2685            "remoteUser".to_string(),
2686            serde_json_lenient::Value::String("vsCode".to_string()),
2687        );
2688
2689        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2690            cx,
2691            r#"{
2692                    "name": "TODO"
2693                }"#,
2694        )
2695        .await
2696        .unwrap();
2697        let build_resources = DockerBuildResources {
2698            image: DockerInspect {
2699                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2700                config: DockerInspectConfig {
2701                    labels: DockerConfigLabels { metadata: None },
2702                    image_user: None,
2703                    env: Vec::new(),
2704                },
2705                mounts: None,
2706                state: None,
2707            },
2708            additional_mounts: vec![],
2709            privileged: false,
2710            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
2711        };
2712        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2713
2714        assert!(docker_run_command.is_ok());
2715        let docker_run_command = docker_run_command.expect("ok");
2716
2717        assert_eq!(docker_run_command.get_program(), "docker");
2718        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2719            .join(".devcontainer")
2720            .join("devcontainer.json");
2721        let expected_config_file_label = expected_config_file_label.display();
2722        assert_eq!(
2723            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2724            vec![
2725                OsStr::new("run"),
2726                OsStr::new("--sig-proxy=false"),
2727                OsStr::new("-d"),
2728                OsStr::new("--mount"),
2729                OsStr::new(
2730                    "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2731                ),
2732                OsStr::new("-l"),
2733                OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2734                OsStr::new("-l"),
2735                OsStr::new(&format!(
2736                    "devcontainer.config_file={expected_config_file_label}"
2737                )),
2738                OsStr::new("--entrypoint"),
2739                OsStr::new("/bin/sh"),
2740                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2741                OsStr::new("-c"),
2742                OsStr::new(
2743                    "
2744    echo Container started
2745    trap \"exit 0\" 15
2746    exec \"$@\"
2747    while sleep 1 & wait $!; do :; done
2748                        "
2749                    .trim()
2750                ),
2751                OsStr::new("-"),
2752            ]
2753        )
2754    }
2755
2756    #[gpui::test]
2757    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2758        // State where service not defined in dev container
2759        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2760        let given_docker_compose_config = DockerComposeResources {
2761            config: DockerComposeConfig {
2762                name: Some("devcontainers".to_string()),
2763                services: HashMap::new(),
2764                ..Default::default()
2765            },
2766            ..Default::default()
2767        };
2768
2769        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2770
2771        assert!(bad_result.is_err());
2772
2773        // State where service defined in devcontainer, not found in DockerCompose config
2774        let (_, given_dev_container) =
2775            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2776                .await
2777                .unwrap();
2778        let given_docker_compose_config = DockerComposeResources {
2779            config: DockerComposeConfig {
2780                name: Some("devcontainers".to_string()),
2781                services: HashMap::new(),
2782                ..Default::default()
2783            },
2784            ..Default::default()
2785        };
2786
2787        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2788
2789        assert!(bad_result.is_err());
2790        // State where service defined in devcontainer and in DockerCompose config
2791
2792        let (_, given_dev_container) =
2793            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2794                .await
2795                .unwrap();
2796        let given_docker_compose_config = DockerComposeResources {
2797            config: DockerComposeConfig {
2798                name: Some("devcontainers".to_string()),
2799                services: HashMap::from([(
2800                    "found_service".to_string(),
2801                    DockerComposeService {
2802                        ..Default::default()
2803                    },
2804                )]),
2805                ..Default::default()
2806            },
2807            ..Default::default()
2808        };
2809
2810        let (service_name, _) =
2811            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2812
2813        assert_eq!(service_name, "found_service".to_string());
2814    }
2815
2816    #[gpui::test]
2817    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2818        let fs = FakeFs::new(cx.executor());
2819        let given_devcontainer_contents = r#"
2820// These are some external comments. serde_lenient should handle them
2821{
2822    // These are some internal comments
2823    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2824    "name": "myDevContainer-${devcontainerId}",
2825    "remoteUser": "root",
2826    "remoteEnv": {
2827        "DEVCONTAINER_ID": "${devcontainerId}",
2828        "MYVAR2": "myvarothervalue",
2829        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2830        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2831        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2832        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2833        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2834        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2835
2836    }
2837}
2838                    "#;
2839        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2840            cx,
2841            fs,
2842            fake_http_client(),
2843            Arc::new(FakeDocker::new()),
2844            Arc::new(TestCommandRunner::new()),
2845            HashMap::from([
2846                ("local_env_1".to_string(), "local_env_value1".to_string()),
2847                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2848            ]),
2849            given_devcontainer_contents,
2850        )
2851        .await
2852        .unwrap();
2853
2854        devcontainer_manifest.parse_nonremote_vars().unwrap();
2855
2856        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2857            &devcontainer_manifest.config
2858        else {
2859            panic!("Config not parsed");
2860        };
2861
2862        // ${devcontainerId}
2863        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2864        assert_eq!(
2865            variable_replaced_devcontainer.name,
2866            Some(format!("myDevContainer-{devcontainer_id}"))
2867        );
2868        assert_eq!(
2869            variable_replaced_devcontainer
2870                .remote_env
2871                .as_ref()
2872                .and_then(|env| env.get("DEVCONTAINER_ID")),
2873            Some(&devcontainer_id)
2874        );
2875
2876        // ${containerWorkspaceFolderBasename}
2877        assert_eq!(
2878            variable_replaced_devcontainer
2879                .remote_env
2880                .as_ref()
2881                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2882            Some(&test_project_filename())
2883        );
2884
2885        // ${localWorkspaceFolderBasename}
2886        assert_eq!(
2887            variable_replaced_devcontainer
2888                .remote_env
2889                .as_ref()
2890                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2891            Some(&test_project_filename())
2892        );
2893
2894        // ${containerWorkspaceFolder}
2895        assert_eq!(
2896            variable_replaced_devcontainer
2897                .remote_env
2898                .as_ref()
2899                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2900            Some(&format!("/workspaces/{}", test_project_filename()))
2901        );
2902
2903        // ${localWorkspaceFolder}
2904        assert_eq!(
2905            variable_replaced_devcontainer
2906                .remote_env
2907                .as_ref()
2908                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2909            Some(&TEST_PROJECT_PATH.to_string())
2910        );
2911
2912        // ${localEnv:VARIABLE_NAME}
2913        assert_eq!(
2914            variable_replaced_devcontainer
2915                .remote_env
2916                .as_ref()
2917                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2918            Some(&"local_env_value1".to_string())
2919        );
2920        assert_eq!(
2921            variable_replaced_devcontainer
2922                .remote_env
2923                .as_ref()
2924                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2925            Some(&"THISVALUEHERE".to_string())
2926        );
2927    }
2928
2929    #[gpui::test]
2930    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2931        let given_devcontainer_contents = r#"
2932                // These are some external comments. serde_lenient should handle them
2933                {
2934                    // These are some internal comments
2935                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2936                    "name": "myDevContainer-${devcontainerId}",
2937                    "remoteUser": "root",
2938                    "remoteEnv": {
2939                        "DEVCONTAINER_ID": "${devcontainerId}",
2940                        "MYVAR2": "myvarothervalue",
2941                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2942                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2943                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2944                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2945
2946                    },
2947                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2948                    "workspaceFolder": "/workspace/customfolder"
2949                }
2950            "#;
2951
2952        let (_, mut devcontainer_manifest) =
2953            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2954                .await
2955                .unwrap();
2956
2957        devcontainer_manifest.parse_nonremote_vars().unwrap();
2958
2959        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2960            &devcontainer_manifest.config
2961        else {
2962            panic!("Config not parsed");
2963        };
2964
2965        // ${devcontainerId}
2966        let devcontainer_id = devcontainer_manifest.devcontainer_id();
2967        assert_eq!(
2968            variable_replaced_devcontainer.name,
2969            Some(format!("myDevContainer-{devcontainer_id}"))
2970        );
2971        assert_eq!(
2972            variable_replaced_devcontainer
2973                .remote_env
2974                .as_ref()
2975                .and_then(|env| env.get("DEVCONTAINER_ID")),
2976            Some(&devcontainer_id)
2977        );
2978
2979        // ${containerWorkspaceFolderBasename}
2980        assert_eq!(
2981            variable_replaced_devcontainer
2982                .remote_env
2983                .as_ref()
2984                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2985            Some(&"customfolder".to_string())
2986        );
2987
2988        // ${localWorkspaceFolderBasename}
2989        assert_eq!(
2990            variable_replaced_devcontainer
2991                .remote_env
2992                .as_ref()
2993                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2994            Some(&"project".to_string())
2995        );
2996
2997        // ${containerWorkspaceFolder}
2998        assert_eq!(
2999            variable_replaced_devcontainer
3000                .remote_env
3001                .as_ref()
3002                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3003            Some(&"/workspace/customfolder".to_string())
3004        );
3005
3006        // ${localWorkspaceFolder}
3007        assert_eq!(
3008            variable_replaced_devcontainer
3009                .remote_env
3010                .as_ref()
3011                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3012            Some(&TEST_PROJECT_PATH.to_string())
3013        );
3014    }
3015
3016    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3017    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3018    #[cfg(not(target_os = "windows"))]
3019    #[gpui::test]
3020    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3021        cx.executor().allow_parking();
3022        env_logger::try_init().ok();
3023        let given_devcontainer_contents = r#"
3024            /*---------------------------------------------------------------------------------------------
3025             *  Copyright (c) Microsoft Corporation. All rights reserved.
3026             *  Licensed under the MIT License. See License.txt in the project root for license information.
3027             *--------------------------------------------------------------------------------------------*/
3028            {
3029              "name": "cli-${devcontainerId}",
3030              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3031              "build": {
3032                "dockerfile": "Dockerfile",
3033                "args": {
3034                  "VARIANT": "18-bookworm",
3035                  "FOO": "bar",
3036                },
3037              },
3038              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3039              "workspaceFolder": "/workspace2",
3040              "mounts": [
3041                // Keep command history across instances
3042                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3043              ],
3044
3045              "runArgs": [
3046                "--cap-add=SYS_PTRACE",
3047                "--sig-proxy=true",
3048              ],
3049
3050              "forwardPorts": [
3051                8082,
3052                8083,
3053              ],
3054              "appPort": [
3055                8084,
3056                "8085:8086",
3057              ],
3058
3059              "containerEnv": {
3060                "VARIABLE_VALUE": "value",
3061              },
3062
3063              "initializeCommand": "touch IAM.md",
3064
3065              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3066
3067              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3068
3069              "postCreateCommand": {
3070                "yarn": "yarn install",
3071                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3072              },
3073
3074              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3075
3076              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3077
3078              "remoteUser": "node",
3079
3080              "remoteEnv": {
3081                "PATH": "${containerEnv:PATH}:/some/other/path",
3082                "OTHER_ENV": "other_env_value"
3083              },
3084
3085              "features": {
3086                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3087                  "moby": false,
3088                },
3089                "ghcr.io/devcontainers/features/go:1": {},
3090              },
3091
3092              "customizations": {
3093                "vscode": {
3094                  "extensions": [
3095                    "dbaeumer.vscode-eslint",
3096                    "GitHub.vscode-pull-request-github",
3097                  ],
3098                },
3099                "zed": {
3100                  "extensions": ["vue", "ruby"],
3101                },
3102                "codespaces": {
3103                  "repositories": {
3104                    "devcontainers/features": {
3105                      "permissions": {
3106                        "contents": "write",
3107                        "workflows": "write",
3108                      },
3109                    },
3110                  },
3111                },
3112              },
3113            }
3114            "#;
3115
3116        let (test_dependencies, mut devcontainer_manifest) =
3117            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3118                .await
3119                .unwrap();
3120
3121        test_dependencies
3122            .fs
3123            .atomic_write(
3124                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3125                r#"
3126#  Copyright (c) Microsoft Corporation. All rights reserved.
3127#  Licensed under the MIT License. See License.txt in the project root for license information.
3128ARG VARIANT="16-bullseye"
3129FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3130
3131RUN mkdir -p /workspaces && chown node:node /workspaces
3132
3133ARG USERNAME=node
3134USER $USERNAME
3135
3136# Save command line history
3137RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3138&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3139&& mkdir -p /home/$USERNAME/commandhistory \
3140&& touch /home/$USERNAME/commandhistory/.bash_history \
3141&& chown -R $USERNAME /home/$USERNAME/commandhistory
3142                    "#.trim().to_string(),
3143            )
3144            .await
3145            .unwrap();
3146
3147        devcontainer_manifest.parse_nonremote_vars().unwrap();
3148
3149        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3150
3151        assert_eq!(
3152            devcontainer_up.extension_ids,
3153            vec!["vue".to_string(), "ruby".to_string()]
3154        );
3155
3156        let files = test_dependencies.fs.files();
3157        let feature_dockerfile = files
3158            .iter()
3159            .find(|f| {
3160                f.file_name()
3161                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3162            })
3163            .expect("to be found");
3164        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3165        assert_eq!(
3166            &feature_dockerfile,
3167            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3168
3169#  Copyright (c) Microsoft Corporation. All rights reserved.
3170#  Licensed under the MIT License. See License.txt in the project root for license information.
3171ARG VARIANT="16-bullseye"
3172FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3173
3174RUN mkdir -p /workspaces && chown node:node /workspaces
3175
3176ARG USERNAME=node
3177USER $USERNAME
3178
3179# Save command line history
3180RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3181&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3182&& mkdir -p /home/$USERNAME/commandhistory \
3183&& touch /home/$USERNAME/commandhistory/.bash_history \
3184&& chown -R $USERNAME /home/$USERNAME/commandhistory
3185FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3186
3187FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3188USER root
3189COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3190RUN chmod -R 0755 /tmp/build-features/
3191
3192FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3193
3194USER root
3195
3196RUN mkdir -p /tmp/dev-container-features
3197COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3198
3199RUN \
3200echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3201echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3202
3203
3204RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3205cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3206&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3207&& cd /tmp/dev-container-features/docker-in-docker_0 \
3208&& chmod +x ./devcontainer-features-install.sh \
3209&& ./devcontainer-features-install.sh \
3210&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3211
3212RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3213cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3214&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3215&& cd /tmp/dev-container-features/go_1 \
3216&& chmod +x ./devcontainer-features-install.sh \
3217&& ./devcontainer-features-install.sh \
3218&& rm -rf /tmp/dev-container-features/go_1
3219
3220
3221ARG _DEV_CONTAINERS_IMAGE_USER=root
3222USER $_DEV_CONTAINERS_IMAGE_USER
3223"#
3224        );
3225
3226        let uid_dockerfile = files
3227            .iter()
3228            .find(|f| {
3229                f.file_name()
3230                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3231            })
3232            .expect("to be found");
3233        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3234
3235        assert_eq!(
3236            &uid_dockerfile,
3237            r#"ARG BASE_IMAGE
3238FROM $BASE_IMAGE
3239
3240USER root
3241
3242ARG REMOTE_USER
3243ARG NEW_UID
3244ARG NEW_GID
3245SHELL ["/bin/sh", "-c"]
3246RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3247	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3248	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3249	if [ -z "$OLD_UID" ]; then \
3250		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3251	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3252		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3253	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3254		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3255	else \
3256		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3257			FREE_GID=65532; \
3258			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3259			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3260			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3261		fi; \
3262		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3263		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3264		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3265			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3266		fi; \
3267		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3268	fi;
3269
3270ARG IMAGE_USER
3271USER $IMAGE_USER
3272
3273# Ensure that /etc/profile does not clobber the existing path
3274RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3275
3276ENV DOCKER_BUILDKIT=1
3277
3278ENV GOPATH=/go
3279ENV GOROOT=/usr/local/go
3280ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3281ENV VARIABLE_VALUE=value
3282"#
3283        );
3284
3285        let golang_install_wrapper = files
3286            .iter()
3287            .find(|f| {
3288                f.file_name()
3289                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3290                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3291            })
3292            .expect("to be found");
3293        let golang_install_wrapper = test_dependencies
3294            .fs
3295            .load(golang_install_wrapper)
3296            .await
3297            .unwrap();
3298        assert_eq!(
3299            &golang_install_wrapper,
3300            r#"#!/bin/sh
3301set -e
3302
3303on_exit () {
3304    [ $? -eq 0 ] && exit
3305    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3306}
3307
3308trap on_exit EXIT
3309
3310echo ===========================================================================
3311echo 'Feature       : go'
3312echo 'Id            : ghcr.io/devcontainers/features/go:1'
3313echo 'Options       :'
3314echo '    GOLANGCILINTVERSION=latest
3315    VERSION=latest'
3316echo ===========================================================================
3317
3318set -a
3319. ../devcontainer-features.builtin.env
3320. ./devcontainer-features.env
3321set +a
3322
3323chmod +x ./install.sh
3324./install.sh
3325"#
3326        );
3327
3328        let docker_commands = test_dependencies
3329            .command_runner
3330            .commands_by_program("docker");
3331
3332        let docker_run_command = docker_commands
3333            .iter()
3334            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3335            .expect("found");
3336
3337        assert_eq!(
3338            docker_run_command.args,
3339            vec![
3340                "run".to_string(),
3341                "--privileged".to_string(),
3342                "--cap-add=SYS_PTRACE".to_string(),
3343                "--sig-proxy=true".to_string(),
3344                "-d".to_string(),
3345                "--mount".to_string(),
3346                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3347                "--mount".to_string(),
3348                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3349                "--mount".to_string(),
3350                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3351                "-l".to_string(),
3352                "devcontainer.local_folder=/path/to/local/project".to_string(),
3353                "-l".to_string(),
3354                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3355                "-l".to_string(),
3356                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3357                "-p".to_string(),
3358                "8082:8082".to_string(),
3359                "-p".to_string(),
3360                "8083:8083".to_string(),
3361                "-p".to_string(),
3362                "8084:8084".to_string(),
3363                "-p".to_string(),
3364                "8085:8086".to_string(),
3365                "--entrypoint".to_string(),
3366                "/bin/sh".to_string(),
3367                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3368                "-c".to_string(),
3369                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3370                "-".to_string()
3371            ]
3372        );
3373
3374        let docker_exec_commands = test_dependencies
3375            .docker
3376            .exec_commands_recorded
3377            .lock()
3378            .unwrap();
3379
3380        assert!(docker_exec_commands.iter().all(|exec| {
3381            exec.env
3382                == HashMap::from([
3383                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3384                    (
3385                        "PATH".to_string(),
3386                        "/initial/path:/some/other/path".to_string(),
3387                    ),
3388                ])
3389        }))
3390    }
3391
3392    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3393    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3394    #[cfg(not(target_os = "windows"))]
3395    #[gpui::test]
3396    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3397        cx.executor().allow_parking();
3398        env_logger::try_init().ok();
3399        let given_devcontainer_contents = r#"
3400            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3401            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3402            {
3403              "features": {
3404                "ghcr.io/devcontainers/features/aws-cli:1": {},
3405                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3406              },
3407              "name": "Rust and PostgreSQL",
3408              "dockerComposeFile": "docker-compose.yml",
3409              "service": "app",
3410              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3411
3412              // Features to add to the dev container. More info: https://containers.dev/features.
3413              // "features": {},
3414
3415              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3416              "forwardPorts": [
3417                8083,
3418                "db:5432",
3419                "db:1234",
3420              ],
3421
3422              // Use 'postCreateCommand' to run commands after the container is created.
3423              // "postCreateCommand": "rustc --version",
3424
3425              // Configure tool-specific properties.
3426              // "customizations": {},
3427
3428              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3429              // "remoteUser": "root"
3430            }
3431            "#;
3432        let (test_dependencies, mut devcontainer_manifest) =
3433            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3434                .await
3435                .unwrap();
3436
3437        test_dependencies
3438            .fs
3439            .atomic_write(
3440                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3441                r#"
3442version: '3.8'
3443
3444volumes:
3445    postgres-data:
3446
3447services:
3448    app:
3449        build:
3450            context: .
3451            dockerfile: Dockerfile
3452        env_file:
3453            # Ensure that the variables in .env match the same variables in devcontainer.json
3454            - .env
3455
3456        volumes:
3457            - ../..:/workspaces:cached
3458
3459        # Overrides default command so things don't shut down after the process ends.
3460        command: sleep infinity
3461
3462        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3463        network_mode: service:db
3464
3465        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3466        # (Adding the "ports" property to this file will not forward from a Codespace.)
3467
3468    db:
3469        image: postgres:14.1
3470        restart: unless-stopped
3471        volumes:
3472            - postgres-data:/var/lib/postgresql/data
3473        env_file:
3474            # Ensure that the variables in .env match the same variables in devcontainer.json
3475            - .env
3476
3477        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3478        # (Adding the "ports" property to this file will not forward from a Codespace.)
3479                    "#.trim().to_string(),
3480            )
3481            .await
3482            .unwrap();
3483
3484        test_dependencies.fs.atomic_write(
3485            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3486            r#"
3487FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3488
3489# Include lld linker to improve build times either by using environment variable
3490# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3491RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3492    && apt-get -y install clang lld \
3493    && apt-get autoremove -y && apt-get clean -y
3494            "#.trim().to_string()).await.unwrap();
3495
3496        devcontainer_manifest.parse_nonremote_vars().unwrap();
3497
3498        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3499
3500        let files = test_dependencies.fs.files();
3501        let feature_dockerfile = files
3502            .iter()
3503            .find(|f| {
3504                f.file_name()
3505                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3506            })
3507            .expect("to be found");
3508        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3509        assert_eq!(
3510            &feature_dockerfile,
3511            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3512
3513FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3514
3515# Include lld linker to improve build times either by using environment variable
3516# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3517RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3518    && apt-get -y install clang lld \
3519    && apt-get autoremove -y && apt-get clean -y
3520FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3521
3522FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3523USER root
3524COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3525RUN chmod -R 0755 /tmp/build-features/
3526
3527FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3528
3529USER root
3530
3531RUN mkdir -p /tmp/dev-container-features
3532COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3533
3534RUN \
3535echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3536echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3537
3538
3539RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3540cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3541&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3542&& cd /tmp/dev-container-features/aws-cli_0 \
3543&& chmod +x ./devcontainer-features-install.sh \
3544&& ./devcontainer-features-install.sh \
3545&& rm -rf /tmp/dev-container-features/aws-cli_0
3546
3547RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3548cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3549&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3550&& cd /tmp/dev-container-features/docker-in-docker_1 \
3551&& chmod +x ./devcontainer-features-install.sh \
3552&& ./devcontainer-features-install.sh \
3553&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3554
3555
3556ARG _DEV_CONTAINERS_IMAGE_USER=root
3557USER $_DEV_CONTAINERS_IMAGE_USER
3558"#
3559        );
3560
3561        let uid_dockerfile = files
3562            .iter()
3563            .find(|f| {
3564                f.file_name()
3565                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3566            })
3567            .expect("to be found");
3568        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3569
3570        assert_eq!(
3571            &uid_dockerfile,
3572            r#"ARG BASE_IMAGE
3573FROM $BASE_IMAGE
3574
3575USER root
3576
3577ARG REMOTE_USER
3578ARG NEW_UID
3579ARG NEW_GID
3580SHELL ["/bin/sh", "-c"]
3581RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3582	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3583	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3584	if [ -z "$OLD_UID" ]; then \
3585		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3586	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3587		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3588	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3589		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3590	else \
3591		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3592			FREE_GID=65532; \
3593			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3594			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3595			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3596		fi; \
3597		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3598		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3599		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3600			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3601		fi; \
3602		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3603	fi;
3604
3605ARG IMAGE_USER
3606USER $IMAGE_USER
3607
3608# Ensure that /etc/profile does not clobber the existing path
3609RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3610
3611
3612ENV DOCKER_BUILDKIT=1
3613"#
3614        );
3615
3616        let build_override = files
3617            .iter()
3618            .find(|f| {
3619                f.file_name()
3620                    .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3621            })
3622            .expect("to be found");
3623        let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3624        let build_config: DockerComposeConfig =
3625            serde_json_lenient::from_str(&build_override).unwrap();
3626        let build_context = build_config
3627            .services
3628            .get("app")
3629            .and_then(|s| s.build.as_ref())
3630            .and_then(|b| b.context.clone())
3631            .expect("build override should have a context");
3632        assert_eq!(
3633            build_context, ".",
3634            "build override should preserve the original build context from docker-compose.yml"
3635        );
3636
3637        let runtime_override = files
3638            .iter()
3639            .find(|f| {
3640                f.file_name()
3641                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3642            })
3643            .expect("to be found");
3644        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3645
3646        let expected_runtime_override = DockerComposeConfig {
3647            name: None,
3648            services: HashMap::from([
3649                (
3650                    "app".to_string(),
3651                    DockerComposeService {
3652                        entrypoint: Some(vec![
3653                            "/bin/sh".to_string(),
3654                            "-c".to_string(),
3655                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3656                            "-".to_string(),
3657                        ]),
3658                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3659                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3660                        privileged: Some(true),
3661                        labels: Some(HashMap::from([
3662                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3663                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3664                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3665                        ])),
3666                        volumes: vec![
3667                            MountDefinition {
3668                                source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3669                                target: "/var/lib/docker".to_string(),
3670                                mount_type: Some("volume".to_string())
3671                            }
3672                        ],
3673                        ..Default::default()
3674                    },
3675                ),
3676                (
3677                    "db".to_string(),
3678                    DockerComposeService {
3679                        ports: vec![
3680                            DockerComposeServicePort {
3681                                target: "8083".to_string(),
3682                                published: "8083".to_string(),
3683                                ..Default::default()
3684                            },
3685                            DockerComposeServicePort {
3686                                target: "5432".to_string(),
3687                                published: "5432".to_string(),
3688                                ..Default::default()
3689                            },
3690                            DockerComposeServicePort {
3691                                target: "1234".to_string(),
3692                                published: "1234".to_string(),
3693                                ..Default::default()
3694                            },
3695                        ],
3696                        ..Default::default()
3697                    },
3698                ),
3699            ]),
3700            volumes: HashMap::from([(
3701                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3702                DockerComposeVolume {
3703                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3704                },
3705            )]),
3706        };
3707
3708        assert_eq!(
3709            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3710            expected_runtime_override
3711        )
3712    }
3713
3714    #[gpui::test]
3715    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3716        cx: &mut TestAppContext,
3717    ) {
3718        cx.executor().allow_parking();
3719        env_logger::try_init().ok();
3720        let given_devcontainer_contents = r#"
3721        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3722        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3723        {
3724          "features": {
3725            "ghcr.io/devcontainers/features/aws-cli:1": {},
3726            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3727          },
3728          "name": "Rust and PostgreSQL",
3729          "dockerComposeFile": "docker-compose.yml",
3730          "service": "app",
3731          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3732
3733          // Features to add to the dev container. More info: https://containers.dev/features.
3734          // "features": {},
3735
3736          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3737          "forwardPorts": [
3738            8083,
3739            "db:5432",
3740            "db:1234",
3741          ],
3742          "updateRemoteUserUID": false,
3743          "appPort": "8084",
3744
3745          // Use 'postCreateCommand' to run commands after the container is created.
3746          // "postCreateCommand": "rustc --version",
3747
3748          // Configure tool-specific properties.
3749          // "customizations": {},
3750
3751          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3752          // "remoteUser": "root"
3753        }
3754        "#;
3755        let (test_dependencies, mut devcontainer_manifest) =
3756            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3757                .await
3758                .unwrap();
3759
3760        test_dependencies
3761        .fs
3762        .atomic_write(
3763            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3764            r#"
3765version: '3.8'
3766
3767volumes:
3768postgres-data:
3769
3770services:
3771app:
3772    build:
3773        context: .
3774        dockerfile: Dockerfile
3775    env_file:
3776        # Ensure that the variables in .env match the same variables in devcontainer.json
3777        - .env
3778
3779    volumes:
3780        - ../..:/workspaces:cached
3781
3782    # Overrides default command so things don't shut down after the process ends.
3783    command: sleep infinity
3784
3785    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3786    network_mode: service:db
3787
3788    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3789    # (Adding the "ports" property to this file will not forward from a Codespace.)
3790
3791db:
3792    image: postgres:14.1
3793    restart: unless-stopped
3794    volumes:
3795        - postgres-data:/var/lib/postgresql/data
3796    env_file:
3797        # Ensure that the variables in .env match the same variables in devcontainer.json
3798        - .env
3799
3800    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3801    # (Adding the "ports" property to this file will not forward from a Codespace.)
3802                "#.trim().to_string(),
3803        )
3804        .await
3805        .unwrap();
3806
3807        test_dependencies.fs.atomic_write(
3808        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3809        r#"
3810FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3811
3812# Include lld linker to improve build times either by using environment variable
3813# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3814RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3815&& apt-get -y install clang lld \
3816&& apt-get autoremove -y && apt-get clean -y
3817        "#.trim().to_string()).await.unwrap();
3818
3819        devcontainer_manifest.parse_nonremote_vars().unwrap();
3820
3821        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3822
3823        let files = test_dependencies.fs.files();
3824        let feature_dockerfile = files
3825            .iter()
3826            .find(|f| {
3827                f.file_name()
3828                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3829            })
3830            .expect("to be found");
3831        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3832        assert_eq!(
3833            &feature_dockerfile,
3834            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3835
3836FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3837
3838# Include lld linker to improve build times either by using environment variable
3839# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3840RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3841&& apt-get -y install clang lld \
3842&& apt-get autoremove -y && apt-get clean -y
3843FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3844
3845FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3846USER root
3847COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3848RUN chmod -R 0755 /tmp/build-features/
3849
3850FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3851
3852USER root
3853
3854RUN mkdir -p /tmp/dev-container-features
3855COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3856
3857RUN \
3858echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3859echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3860
3861
3862RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3863cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3864&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3865&& cd /tmp/dev-container-features/aws-cli_0 \
3866&& chmod +x ./devcontainer-features-install.sh \
3867&& ./devcontainer-features-install.sh \
3868&& rm -rf /tmp/dev-container-features/aws-cli_0
3869
3870RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3871cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3872&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3873&& cd /tmp/dev-container-features/docker-in-docker_1 \
3874&& chmod +x ./devcontainer-features-install.sh \
3875&& ./devcontainer-features-install.sh \
3876&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3877
3878
3879ARG _DEV_CONTAINERS_IMAGE_USER=root
3880USER $_DEV_CONTAINERS_IMAGE_USER
3881
3882# Ensure that /etc/profile does not clobber the existing path
3883RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3884
3885
3886ENV DOCKER_BUILDKIT=1
3887"#
3888        );
3889    }
3890
3891    #[cfg(not(target_os = "windows"))]
3892    #[gpui::test]
3893    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3894        cx.executor().allow_parking();
3895        env_logger::try_init().ok();
3896        let given_devcontainer_contents = r#"
3897        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3898        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3899        {
3900          "features": {
3901            "ghcr.io/devcontainers/features/aws-cli:1": {},
3902            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3903          },
3904          "name": "Rust and PostgreSQL",
3905          "dockerComposeFile": "docker-compose.yml",
3906          "service": "app",
3907          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3908
3909          // Features to add to the dev container. More info: https://containers.dev/features.
3910          // "features": {},
3911
3912          // Use 'forwardPorts' to make a list of ports inside the container available locally.
3913          // "forwardPorts": [5432],
3914
3915          // Use 'postCreateCommand' to run commands after the container is created.
3916          // "postCreateCommand": "rustc --version",
3917
3918          // Configure tool-specific properties.
3919          // "customizations": {},
3920
3921          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3922          // "remoteUser": "root"
3923        }
3924        "#;
3925        let mut fake_docker = FakeDocker::new();
3926        fake_docker.set_podman(true);
3927        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3928            cx,
3929            FakeFs::new(cx.executor()),
3930            fake_http_client(),
3931            Arc::new(fake_docker),
3932            Arc::new(TestCommandRunner::new()),
3933            HashMap::new(),
3934            given_devcontainer_contents,
3935        )
3936        .await
3937        .unwrap();
3938
3939        test_dependencies
3940        .fs
3941        .atomic_write(
3942            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3943            r#"
3944version: '3.8'
3945
3946volumes:
3947postgres-data:
3948
3949services:
3950app:
3951build:
3952    context: .
3953    dockerfile: Dockerfile
3954env_file:
3955    # Ensure that the variables in .env match the same variables in devcontainer.json
3956    - .env
3957
3958volumes:
3959    - ../..:/workspaces:cached
3960
3961# Overrides default command so things don't shut down after the process ends.
3962command: sleep infinity
3963
3964# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3965network_mode: service:db
3966
3967# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3968# (Adding the "ports" property to this file will not forward from a Codespace.)
3969
3970db:
3971image: postgres:14.1
3972restart: unless-stopped
3973volumes:
3974    - postgres-data:/var/lib/postgresql/data
3975env_file:
3976    # Ensure that the variables in .env match the same variables in devcontainer.json
3977    - .env
3978
3979# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3980# (Adding the "ports" property to this file will not forward from a Codespace.)
3981                "#.trim().to_string(),
3982        )
3983        .await
3984        .unwrap();
3985
3986        test_dependencies.fs.atomic_write(
3987        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3988        r#"
3989FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3990
3991# Include lld linker to improve build times either by using environment variable
3992# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3993RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3994&& apt-get -y install clang lld \
3995&& apt-get autoremove -y && apt-get clean -y
3996        "#.trim().to_string()).await.unwrap();
3997
3998        devcontainer_manifest.parse_nonremote_vars().unwrap();
3999
4000        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4001
4002        let files = test_dependencies.fs.files();
4003
4004        let feature_dockerfile = files
4005            .iter()
4006            .find(|f| {
4007                f.file_name()
4008                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4009            })
4010            .expect("to be found");
4011        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4012        assert_eq!(
4013            &feature_dockerfile,
4014            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4015
4016FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4017
4018# Include lld linker to improve build times either by using environment variable
4019# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4020RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4021&& apt-get -y install clang lld \
4022&& apt-get autoremove -y && apt-get clean -y
4023FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4024
4025FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4026
4027FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4028USER root
4029COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4030RUN chmod -R 0755 /tmp/build-features/
4031
4032FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4033
4034USER root
4035
4036RUN mkdir -p /tmp/dev-container-features
4037COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4038
4039RUN \
4040echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4041echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4042
4043
4044COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4045RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4046&& cd /tmp/dev-container-features/aws-cli_0 \
4047&& chmod +x ./devcontainer-features-install.sh \
4048&& ./devcontainer-features-install.sh
4049
4050COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4051RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4052&& cd /tmp/dev-container-features/docker-in-docker_1 \
4053&& chmod +x ./devcontainer-features-install.sh \
4054&& ./devcontainer-features-install.sh
4055
4056
4057ARG _DEV_CONTAINERS_IMAGE_USER=root
4058USER $_DEV_CONTAINERS_IMAGE_USER
4059"#
4060        );
4061
4062        let uid_dockerfile = files
4063            .iter()
4064            .find(|f| {
4065                f.file_name()
4066                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4067            })
4068            .expect("to be found");
4069        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4070
4071        assert_eq!(
4072            &uid_dockerfile,
4073            r#"ARG BASE_IMAGE
4074FROM $BASE_IMAGE
4075
4076USER root
4077
4078ARG REMOTE_USER
4079ARG NEW_UID
4080ARG NEW_GID
4081SHELL ["/bin/sh", "-c"]
4082RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4083	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4084	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4085	if [ -z "$OLD_UID" ]; then \
4086		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4087	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4088		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4089	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4090		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4091	else \
4092		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4093			FREE_GID=65532; \
4094			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4095			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4096			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4097		fi; \
4098		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4099		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4100		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4101			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4102		fi; \
4103		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4104	fi;
4105
4106ARG IMAGE_USER
4107USER $IMAGE_USER
4108
4109# Ensure that /etc/profile does not clobber the existing path
4110RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4111
4112
4113ENV DOCKER_BUILDKIT=1
4114"#
4115        );
4116    }
4117
4118    #[gpui::test]
4119    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4120        cx.executor().allow_parking();
4121        env_logger::try_init().ok();
4122        let given_devcontainer_contents = r#"
4123            /*---------------------------------------------------------------------------------------------
4124             *  Copyright (c) Microsoft Corporation. All rights reserved.
4125             *  Licensed under the MIT License. See License.txt in the project root for license information.
4126             *--------------------------------------------------------------------------------------------*/
4127            {
4128              "name": "cli-${devcontainerId}",
4129              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4130              "build": {
4131                "dockerfile": "Dockerfile",
4132                "args": {
4133                  "VARIANT": "18-bookworm",
4134                  "FOO": "bar",
4135                },
4136                "target": "development",
4137              },
4138              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4139              "workspaceFolder": "/workspace2",
4140              "mounts": [
4141                // Keep command history across instances
4142                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4143              ],
4144
4145              "forwardPorts": [
4146                8082,
4147                8083,
4148              ],
4149              "appPort": "8084",
4150              "updateRemoteUserUID": false,
4151
4152              "containerEnv": {
4153                "VARIABLE_VALUE": "value",
4154              },
4155
4156              "initializeCommand": "touch IAM.md",
4157
4158              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4159
4160              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4161
4162              "postCreateCommand": {
4163                "yarn": "yarn install",
4164                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4165              },
4166
4167              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4168
4169              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4170
4171              "remoteUser": "node",
4172
4173              "remoteEnv": {
4174                "PATH": "${containerEnv:PATH}:/some/other/path",
4175                "OTHER_ENV": "other_env_value"
4176              },
4177
4178              "features": {
4179                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4180                  "moby": false,
4181                },
4182                "ghcr.io/devcontainers/features/go:1": {},
4183              },
4184
4185              "customizations": {
4186                "vscode": {
4187                  "extensions": [
4188                    "dbaeumer.vscode-eslint",
4189                    "GitHub.vscode-pull-request-github",
4190                  ],
4191                },
4192                "zed": {
4193                  "extensions": ["vue", "ruby"],
4194                },
4195                "codespaces": {
4196                  "repositories": {
4197                    "devcontainers/features": {
4198                      "permissions": {
4199                        "contents": "write",
4200                        "workflows": "write",
4201                      },
4202                    },
4203                  },
4204                },
4205              },
4206            }
4207            "#;
4208
4209        let (test_dependencies, mut devcontainer_manifest) =
4210            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4211                .await
4212                .unwrap();
4213
4214        test_dependencies
4215            .fs
4216            .atomic_write(
4217                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4218                r#"
4219#  Copyright (c) Microsoft Corporation. All rights reserved.
4220#  Licensed under the MIT License. See License.txt in the project root for license information.
4221ARG VARIANT="16-bullseye"
4222FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4223FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4224
4225RUN mkdir -p /workspaces && chown node:node /workspaces
4226
4227ARG USERNAME=node
4228USER $USERNAME
4229
4230# Save command line history
4231RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4232&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4233&& mkdir -p /home/$USERNAME/commandhistory \
4234&& touch /home/$USERNAME/commandhistory/.bash_history \
4235&& chown -R $USERNAME /home/$USERNAME/commandhistory
4236                    "#.trim().to_string(),
4237            )
4238            .await
4239            .unwrap();
4240
4241        devcontainer_manifest.parse_nonremote_vars().unwrap();
4242
4243        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4244
4245        assert_eq!(
4246            devcontainer_up.extension_ids,
4247            vec!["vue".to_string(), "ruby".to_string()]
4248        );
4249
4250        let files = test_dependencies.fs.files();
4251        let feature_dockerfile = files
4252            .iter()
4253            .find(|f| {
4254                f.file_name()
4255                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4256            })
4257            .expect("to be found");
4258        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4259        assert_eq!(
4260            &feature_dockerfile,
4261            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4262
4263#  Copyright (c) Microsoft Corporation. All rights reserved.
4264#  Licensed under the MIT License. See License.txt in the project root for license information.
4265ARG VARIANT="16-bullseye"
4266FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4267FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4268
4269RUN mkdir -p /workspaces && chown node:node /workspaces
4270
4271ARG USERNAME=node
4272USER $USERNAME
4273
4274# Save command line history
4275RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4276&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4277&& mkdir -p /home/$USERNAME/commandhistory \
4278&& touch /home/$USERNAME/commandhistory/.bash_history \
4279&& chown -R $USERNAME /home/$USERNAME/commandhistory
4280FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4281
4282FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4283USER root
4284COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4285RUN chmod -R 0755 /tmp/build-features/
4286
4287FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4288
4289USER root
4290
4291RUN mkdir -p /tmp/dev-container-features
4292COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4293
4294RUN \
4295echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4296echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4297
4298
4299RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4300cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4301&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4302&& cd /tmp/dev-container-features/docker-in-docker_0 \
4303&& chmod +x ./devcontainer-features-install.sh \
4304&& ./devcontainer-features-install.sh \
4305&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4306
4307RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4308cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4309&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4310&& cd /tmp/dev-container-features/go_1 \
4311&& chmod +x ./devcontainer-features-install.sh \
4312&& ./devcontainer-features-install.sh \
4313&& rm -rf /tmp/dev-container-features/go_1
4314
4315
4316ARG _DEV_CONTAINERS_IMAGE_USER=root
4317USER $_DEV_CONTAINERS_IMAGE_USER
4318
4319# Ensure that /etc/profile does not clobber the existing path
4320RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4321
4322ENV DOCKER_BUILDKIT=1
4323
4324ENV GOPATH=/go
4325ENV GOROOT=/usr/local/go
4326ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4327ENV VARIABLE_VALUE=value
4328"#
4329        );
4330
4331        let golang_install_wrapper = files
4332            .iter()
4333            .find(|f| {
4334                f.file_name()
4335                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4336                    && f.to_str().is_some_and(|s| s.contains("go_"))
4337            })
4338            .expect("to be found");
4339        let golang_install_wrapper = test_dependencies
4340            .fs
4341            .load(golang_install_wrapper)
4342            .await
4343            .unwrap();
4344        assert_eq!(
4345            &golang_install_wrapper,
4346            r#"#!/bin/sh
4347set -e
4348
4349on_exit () {
4350    [ $? -eq 0 ] && exit
4351    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4352}
4353
4354trap on_exit EXIT
4355
4356echo ===========================================================================
4357echo 'Feature       : go'
4358echo 'Id            : ghcr.io/devcontainers/features/go:1'
4359echo 'Options       :'
4360echo '    GOLANGCILINTVERSION=latest
4361    VERSION=latest'
4362echo ===========================================================================
4363
4364set -a
4365. ../devcontainer-features.builtin.env
4366. ./devcontainer-features.env
4367set +a
4368
4369chmod +x ./install.sh
4370./install.sh
4371"#
4372        );
4373
4374        let docker_commands = test_dependencies
4375            .command_runner
4376            .commands_by_program("docker");
4377
4378        let docker_run_command = docker_commands
4379            .iter()
4380            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4381
4382        assert!(docker_run_command.is_some());
4383
4384        let docker_exec_commands = test_dependencies
4385            .docker
4386            .exec_commands_recorded
4387            .lock()
4388            .unwrap();
4389
4390        assert!(docker_exec_commands.iter().all(|exec| {
4391            exec.env
4392                == HashMap::from([
4393                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4394                    (
4395                        "PATH".to_string(),
4396                        "/initial/path:/some/other/path".to_string(),
4397                    ),
4398                ])
4399        }))
4400    }
4401
4402    #[cfg(not(target_os = "windows"))]
4403    #[gpui::test]
4404    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4405        cx.executor().allow_parking();
4406        env_logger::try_init().ok();
4407        let given_devcontainer_contents = r#"
4408            {
4409              "name": "cli-${devcontainerId}",
4410              "image": "test_image:latest",
4411            }
4412            "#;
4413
4414        let (test_dependencies, mut devcontainer_manifest) =
4415            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4416                .await
4417                .unwrap();
4418
4419        devcontainer_manifest.parse_nonremote_vars().unwrap();
4420
4421        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4422
4423        let files = test_dependencies.fs.files();
4424        let uid_dockerfile = files
4425            .iter()
4426            .find(|f| {
4427                f.file_name()
4428                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4429            })
4430            .expect("to be found");
4431        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4432
4433        assert_eq!(
4434            &uid_dockerfile,
4435            r#"ARG BASE_IMAGE
4436FROM $BASE_IMAGE
4437
4438USER root
4439
4440ARG REMOTE_USER
4441ARG NEW_UID
4442ARG NEW_GID
4443SHELL ["/bin/sh", "-c"]
4444RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4445	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4446	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4447	if [ -z "$OLD_UID" ]; then \
4448		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4449	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4450		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4451	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4452		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4453	else \
4454		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4455			FREE_GID=65532; \
4456			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4457			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4458			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4459		fi; \
4460		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4461		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4462		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4463			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4464		fi; \
4465		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4466	fi;
4467
4468ARG IMAGE_USER
4469USER $IMAGE_USER
4470
4471# Ensure that /etc/profile does not clobber the existing path
4472RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4473"#
4474        );
4475    }
4476
4477    #[cfg(not(target_os = "windows"))]
4478    #[gpui::test]
4479    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4480        cx.executor().allow_parking();
4481        env_logger::try_init().ok();
4482        let given_devcontainer_contents = r#"
4483            {
4484              "name": "cli-${devcontainerId}",
4485              "dockerComposeFile": "docker-compose-plain.yml",
4486              "service": "app",
4487            }
4488            "#;
4489
4490        let (test_dependencies, mut devcontainer_manifest) =
4491            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4492                .await
4493                .unwrap();
4494
4495        test_dependencies
4496            .fs
4497            .atomic_write(
4498                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4499                r#"
4500services:
4501    app:
4502        image: test_image:latest
4503        command: sleep infinity
4504        volumes:
4505            - ..:/workspace:cached
4506                "#
4507                .trim()
4508                .to_string(),
4509            )
4510            .await
4511            .unwrap();
4512
4513        devcontainer_manifest.parse_nonremote_vars().unwrap();
4514
4515        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4516
4517        let files = test_dependencies.fs.files();
4518        let uid_dockerfile = files
4519            .iter()
4520            .find(|f| {
4521                f.file_name()
4522                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4523            })
4524            .expect("to be found");
4525        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4526
4527        assert_eq!(
4528            &uid_dockerfile,
4529            r#"ARG BASE_IMAGE
4530FROM $BASE_IMAGE
4531
4532USER root
4533
4534ARG REMOTE_USER
4535ARG NEW_UID
4536ARG NEW_GID
4537SHELL ["/bin/sh", "-c"]
4538RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4539	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4540	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4541	if [ -z "$OLD_UID" ]; then \
4542		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4543	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4544		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4545	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4546		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4547	else \
4548		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4549			FREE_GID=65532; \
4550			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4551			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4552			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4553		fi; \
4554		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4555		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4556		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4557			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4558		fi; \
4559		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4560	fi;
4561
4562ARG IMAGE_USER
4563USER $IMAGE_USER
4564
4565# Ensure that /etc/profile does not clobber the existing path
4566RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4567"#
4568        );
4569    }
4570
4571    #[gpui::test]
4572    async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4573        cx.executor().allow_parking();
4574        env_logger::try_init().ok();
4575        let given_devcontainer_contents = r#"
4576            {
4577              "name": "cli-${devcontainerId}",
4578              "build": {
4579                "dockerfile": "Dockerfile",
4580                "args": {
4581                    "VERSION": "1.22",
4582                }
4583              },
4584            }
4585            "#;
4586
4587        let (test_dependencies, mut devcontainer_manifest) =
4588            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4589                .await
4590                .unwrap();
4591
4592        test_dependencies
4593            .fs
4594            .atomic_write(
4595                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4596                r#"
4597FROM dontgrabme as build_context
4598ARG VERSION=1.21
4599ARG REPOSITORY=mybuild
4600ARG REGISTRY=docker.io/stuff
4601
4602ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4603
4604FROM ${IMAGE} AS devcontainer
4605                    "#
4606                .trim()
4607                .to_string(),
4608            )
4609            .await
4610            .unwrap();
4611
4612        devcontainer_manifest.parse_nonremote_vars().unwrap();
4613
4614        let dockerfile_contents = devcontainer_manifest
4615            .expanded_dockerfile_content()
4616            .await
4617            .unwrap();
4618        let base_image = image_from_dockerfile(
4619            dockerfile_contents,
4620            &devcontainer_manifest
4621                .dev_container()
4622                .build
4623                .as_ref()
4624                .and_then(|b| b.target.clone()),
4625        )
4626        .unwrap();
4627
4628        assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4629    }
4630
4631    #[gpui::test]
4632    async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4633        cx.executor().allow_parking();
4634        env_logger::try_init().ok();
4635        let given_devcontainer_contents = r#"
4636            {
4637              "name": "cli-${devcontainerId}",
4638              "build": {
4639                "dockerfile": "Dockerfile",
4640                "args": {
4641                    "VERSION": "1.22",
4642                },
4643                "target": "development"
4644              },
4645            }
4646            "#;
4647
4648        let (test_dependencies, mut devcontainer_manifest) =
4649            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4650                .await
4651                .unwrap();
4652
4653        test_dependencies
4654            .fs
4655            .atomic_write(
4656                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4657                r#"
4658FROM dontgrabme as build_context
4659ARG VERSION=1.21
4660ARG REPOSITORY=mybuild
4661ARG REGISTRY=docker.io/stuff
4662
4663ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4664ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4665
4666FROM ${DEV_IMAGE} AS development
4667FROM ${IMAGE} AS production
4668                    "#
4669                .trim()
4670                .to_string(),
4671            )
4672            .await
4673            .unwrap();
4674
4675        devcontainer_manifest.parse_nonremote_vars().unwrap();
4676
4677        let dockerfile_contents = devcontainer_manifest
4678            .expanded_dockerfile_content()
4679            .await
4680            .unwrap();
4681        let base_image = image_from_dockerfile(
4682            dockerfile_contents,
4683            &devcontainer_manifest
4684                .dev_container()
4685                .build
4686                .as_ref()
4687                .and_then(|b| b.target.clone()),
4688        )
4689        .unwrap();
4690
4691        assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4692    }
4693
4694    #[gpui::test]
4695    async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4696        cx.executor().allow_parking();
4697        env_logger::try_init().ok();
4698        let given_devcontainer_contents = r#"
4699            {
4700              "name": "cli-${devcontainerId}",
4701              "build": {
4702                "dockerfile": "Dockerfile",
4703                "args": {
4704                    "JSON_ARG": "some-value",
4705                    "ELIXIR_VERSION": "1.21",
4706                }
4707              },
4708            }
4709            "#;
4710
4711        let (test_dependencies, mut devcontainer_manifest) =
4712            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4713                .await
4714                .unwrap();
4715
4716        test_dependencies
4717            .fs
4718            .atomic_write(
4719                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4720                r#"
4721ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4722ARG ELIXIR_VERSION=1.20.0-rc.4
4723ARG FOO=foo BAR=bar
4724ARG FOOBAR=${FOO}${BAR}
4725ARG OTP_VERSION=28.4.1
4726ARG DEBIAN_VERSION=trixie-20260316-slim
4727ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4728ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4729ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4730ARG FROM_JSON=${JSON_ARG}
4731
4732FROM ${IMAGE} AS devcontainer
4733                    "#
4734                .trim()
4735                .to_string(),
4736            )
4737            .await
4738            .unwrap();
4739
4740        devcontainer_manifest.parse_nonremote_vars().unwrap();
4741
4742        let expanded_dockerfile = devcontainer_manifest
4743            .expanded_dockerfile_content()
4744            .await
4745            .unwrap();
4746
4747        assert_eq!(
4748            &expanded_dockerfile,
4749            r#"
4750ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4751ARG ELIXIR_VERSION=1.20.0-rc.4
4752ARG FOO=foo BAR=bar
4753ARG FOOBAR=foobar
4754ARG OTP_VERSION=28.4.1
4755ARG DEBIAN_VERSION=trixie-20260316-slim
4756ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4757ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4758ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4759ARG FROM_JSON=some-value
4760
4761FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4762            "#
4763            .trim()
4764        )
4765    }
4766
4767    #[test]
4768    fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4769
4770    #[test]
4771    fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4772
4773    #[test]
4774    fn test_aliases_dockerfile_with_build_target_specified() {}
4775
4776    pub(crate) struct RecordedExecCommand {
4777        pub(crate) _container_id: String,
4778        pub(crate) _remote_folder: String,
4779        pub(crate) _user: String,
4780        pub(crate) env: HashMap<String, String>,
4781        pub(crate) _inner_command: Command,
4782    }
4783
4784    pub(crate) struct FakeDocker {
4785        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4786        podman: bool,
4787    }
4788
4789    impl FakeDocker {
4790        pub(crate) fn new() -> Self {
4791            Self {
4792                podman: false,
4793                exec_commands_recorded: Mutex::new(Vec::new()),
4794            }
4795        }
4796        #[cfg(not(target_os = "windows"))]
4797        fn set_podman(&mut self, podman: bool) {
4798            self.podman = podman;
4799        }
4800    }
4801
4802    #[async_trait]
4803    impl DockerClient for FakeDocker {
4804        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4805            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4806                return Ok(DockerInspect {
4807                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4808                        .to_string(),
4809                    config: DockerInspectConfig {
4810                        labels: DockerConfigLabels {
4811                            metadata: Some(vec![HashMap::from([(
4812                                "remoteUser".to_string(),
4813                                Value::String("node".to_string()),
4814                            )])]),
4815                        },
4816                        env: Vec::new(),
4817                        image_user: Some("root".to_string()),
4818                    },
4819                    mounts: None,
4820                    state: None,
4821                });
4822            }
4823            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4824                return Ok(DockerInspect {
4825                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4826                        .to_string(),
4827                    config: DockerInspectConfig {
4828                        labels: DockerConfigLabels {
4829                            metadata: Some(vec![HashMap::from([(
4830                                "remoteUser".to_string(),
4831                                Value::String("vscode".to_string()),
4832                            )])]),
4833                        },
4834                        image_user: Some("root".to_string()),
4835                        env: Vec::new(),
4836                    },
4837                    mounts: None,
4838                    state: None,
4839                });
4840            }
4841            if id.starts_with("cli_") {
4842                return Ok(DockerInspect {
4843                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4844                        .to_string(),
4845                    config: DockerInspectConfig {
4846                        labels: DockerConfigLabels {
4847                            metadata: Some(vec![HashMap::from([(
4848                                "remoteUser".to_string(),
4849                                Value::String("node".to_string()),
4850                            )])]),
4851                        },
4852                        image_user: Some("root".to_string()),
4853                        env: vec!["PATH=/initial/path".to_string()],
4854                    },
4855                    mounts: None,
4856                    state: None,
4857                });
4858            }
4859            if id == "found_docker_ps" {
4860                return Ok(DockerInspect {
4861                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4862                        .to_string(),
4863                    config: DockerInspectConfig {
4864                        labels: DockerConfigLabels {
4865                            metadata: Some(vec![HashMap::from([(
4866                                "remoteUser".to_string(),
4867                                Value::String("node".to_string()),
4868                            )])]),
4869                        },
4870                        image_user: Some("root".to_string()),
4871                        env: vec!["PATH=/initial/path".to_string()],
4872                    },
4873                    mounts: Some(vec![DockerInspectMount {
4874                        source: "/path/to/local/project".to_string(),
4875                        destination: "/workspaces/project".to_string(),
4876                    }]),
4877                    state: None,
4878                });
4879            }
4880            if id.starts_with("rust_a-") {
4881                return Ok(DockerInspect {
4882                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4883                        .to_string(),
4884                    config: DockerInspectConfig {
4885                        labels: DockerConfigLabels {
4886                            metadata: Some(vec![HashMap::from([(
4887                                "remoteUser".to_string(),
4888                                Value::String("vscode".to_string()),
4889                            )])]),
4890                        },
4891                        image_user: Some("root".to_string()),
4892                        env: Vec::new(),
4893                    },
4894                    mounts: None,
4895                    state: None,
4896                });
4897            }
4898            if id == "test_image:latest" {
4899                return Ok(DockerInspect {
4900                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4901                        .to_string(),
4902                    config: DockerInspectConfig {
4903                        labels: DockerConfigLabels {
4904                            metadata: Some(vec![HashMap::from([(
4905                                "remoteUser".to_string(),
4906                                Value::String("node".to_string()),
4907                            )])]),
4908                        },
4909                        env: Vec::new(),
4910                        image_user: Some("root".to_string()),
4911                    },
4912                    mounts: None,
4913                    state: None,
4914                });
4915            }
4916
4917            Err(DevContainerError::DockerNotAvailable)
4918        }
4919        async fn get_docker_compose_config(
4920            &self,
4921            config_files: &Vec<PathBuf>,
4922        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4923            if config_files.len() == 1
4924                && config_files.get(0)
4925                    == Some(&PathBuf::from(
4926                        "/path/to/local/project/.devcontainer/docker-compose.yml",
4927                    ))
4928            {
4929                return Ok(Some(DockerComposeConfig {
4930                    name: None,
4931                    services: HashMap::from([
4932                        (
4933                            "app".to_string(),
4934                            DockerComposeService {
4935                                build: Some(DockerComposeServiceBuild {
4936                                    context: Some(".".to_string()),
4937                                    dockerfile: Some("Dockerfile".to_string()),
4938                                    args: None,
4939                                    additional_contexts: None,
4940                                    target: None,
4941                                }),
4942                                volumes: vec![MountDefinition {
4943                                    source: Some("../..".to_string()),
4944                                    target: "/workspaces".to_string(),
4945                                    mount_type: Some("bind".to_string()),
4946                                }],
4947                                network_mode: Some("service:db".to_string()),
4948                                ..Default::default()
4949                            },
4950                        ),
4951                        (
4952                            "db".to_string(),
4953                            DockerComposeService {
4954                                image: Some("postgres:14.1".to_string()),
4955                                volumes: vec![MountDefinition {
4956                                    source: Some("postgres-data".to_string()),
4957                                    target: "/var/lib/postgresql/data".to_string(),
4958                                    mount_type: Some("volume".to_string()),
4959                                }],
4960                                env_file: Some(vec![".env".to_string()]),
4961                                ..Default::default()
4962                            },
4963                        ),
4964                    ]),
4965                    volumes: HashMap::from([(
4966                        "postgres-data".to_string(),
4967                        DockerComposeVolume::default(),
4968                    )]),
4969                }));
4970            }
4971            if config_files.len() == 1
4972                && config_files.get(0)
4973                    == Some(&PathBuf::from(
4974                        "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4975                    ))
4976            {
4977                return Ok(Some(DockerComposeConfig {
4978                    name: None,
4979                    services: HashMap::from([(
4980                        "app".to_string(),
4981                        DockerComposeService {
4982                            image: Some("test_image:latest".to_string()),
4983                            command: vec!["sleep".to_string(), "infinity".to_string()],
4984                            ..Default::default()
4985                        },
4986                    )]),
4987                    ..Default::default()
4988                }));
4989            }
4990            Err(DevContainerError::DockerNotAvailable)
4991        }
4992        async fn docker_compose_build(
4993            &self,
4994            _config_files: &Vec<PathBuf>,
4995            _project_name: &str,
4996        ) -> Result<(), DevContainerError> {
4997            Ok(())
4998        }
4999        async fn run_docker_exec(
5000            &self,
5001            container_id: &str,
5002            remote_folder: &str,
5003            user: &str,
5004            env: &HashMap<String, String>,
5005            inner_command: Command,
5006        ) -> Result<(), DevContainerError> {
5007            let mut record = self
5008                .exec_commands_recorded
5009                .lock()
5010                .expect("should be available");
5011            record.push(RecordedExecCommand {
5012                _container_id: container_id.to_string(),
5013                _remote_folder: remote_folder.to_string(),
5014                _user: user.to_string(),
5015                env: env.clone(),
5016                _inner_command: inner_command,
5017            });
5018            Ok(())
5019        }
5020        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5021            Err(DevContainerError::DockerNotAvailable)
5022        }
5023        async fn find_process_by_filters(
5024            &self,
5025            _filters: Vec<String>,
5026        ) -> Result<Option<DockerPs>, DevContainerError> {
5027            Ok(Some(DockerPs {
5028                id: "found_docker_ps".to_string(),
5029            }))
5030        }
5031        fn supports_compose_buildkit(&self) -> bool {
5032            !self.podman
5033        }
5034        fn docker_cli(&self) -> String {
5035            if self.podman {
5036                "podman".to_string()
5037            } else {
5038                "docker".to_string()
5039            }
5040        }
5041    }
5042
5043    #[derive(Debug, Clone)]
5044    pub(crate) struct TestCommand {
5045        pub(crate) program: String,
5046        pub(crate) args: Vec<String>,
5047    }
5048
5049    pub(crate) struct TestCommandRunner {
5050        commands_recorded: Mutex<Vec<TestCommand>>,
5051    }
5052
5053    impl TestCommandRunner {
5054        fn new() -> Self {
5055            Self {
5056                commands_recorded: Mutex::new(Vec::new()),
5057            }
5058        }
5059
5060        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5061            let record = self.commands_recorded.lock().expect("poisoned");
5062            record
5063                .iter()
5064                .filter(|r| r.program == program)
5065                .map(|r| r.clone())
5066                .collect()
5067        }
5068    }
5069
5070    #[async_trait]
5071    impl CommandRunner for TestCommandRunner {
5072        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5073            let mut record = self.commands_recorded.lock().expect("poisoned");
5074
5075            record.push(TestCommand {
5076                program: command.get_program().display().to_string(),
5077                args: command
5078                    .get_args()
5079                    .map(|a| a.display().to_string())
5080                    .collect(),
5081            });
5082
5083            Ok(Output {
5084                status: ExitStatus::default(),
5085                stdout: vec![],
5086                stderr: vec![],
5087            })
5088        }
5089    }
5090
5091    fn fake_http_client() -> Arc<dyn HttpClient> {
5092        FakeHttpClient::create(|request| async move {
5093            let (parts, _body) = request.into_parts();
5094            if parts.uri.path() == "/token" {
5095                let token_response = TokenResponse {
5096                    token: "token".to_string(),
5097                };
5098                return Ok(http::Response::builder()
5099                    .status(200)
5100                    .body(http_client::AsyncBody::from(
5101                        serde_json_lenient::to_string(&token_response).unwrap(),
5102                    ))
5103                    .unwrap());
5104            }
5105
5106            // OCI specific things
5107            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5108                let response = r#"
5109                    {
5110                        "schemaVersion": 2,
5111                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
5112                        "config": {
5113                            "mediaType": "application/vnd.devcontainers",
5114                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5115                            "size": 2
5116                        },
5117                        "layers": [
5118                            {
5119                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5120                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5121                                "size": 59392,
5122                                "annotations": {
5123                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5124                                }
5125                            }
5126                        ],
5127                        "annotations": {
5128                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5129                            "com.github.package.type": "devcontainer_feature"
5130                        }
5131                    }
5132                    "#;
5133                return Ok(http::Response::builder()
5134                    .status(200)
5135                    .body(http_client::AsyncBody::from(response))
5136                    .unwrap());
5137            }
5138
5139            if parts.uri.path()
5140                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5141            {
5142                let response = build_tarball(vec![
5143                    ("./NOTES.md", r#"
5144                        ## Limitations
5145
5146                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5147                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5148                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5149                          ```
5150                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5151                          ```
5152                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5153
5154
5155                        ## OS Support
5156
5157                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5158
5159                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5160
5161                        `bash` is required to execute the `install.sh` script."#),
5162                    ("./README.md", r#"
5163                        # Docker (Docker-in-Docker) (docker-in-docker)
5164
5165                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5166
5167                        ## Example Usage
5168
5169                        ```json
5170                        "features": {
5171                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5172                        }
5173                        ```
5174
5175                        ## Options
5176
5177                        | Options Id | Description | Type | Default Value |
5178                        |-----|-----|-----|-----|
5179                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5180                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5181                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5182                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5183                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5184                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5185                        | installDockerBuildx | Install Docker Buildx | boolean | true |
5186                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5187                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5188
5189                        ## Customizations
5190
5191                        ### VS Code Extensions
5192
5193                        - `ms-azuretools.vscode-containers`
5194
5195                        ## Limitations
5196
5197                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5198                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5199                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5200                          ```
5201                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5202                          ```
5203                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5204
5205
5206                        ## OS Support
5207
5208                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5209
5210                        `bash` is required to execute the `install.sh` script.
5211
5212
5213                        ---
5214
5215                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
5216                    ("./devcontainer-feature.json", r#"
5217                        {
5218                          "id": "docker-in-docker",
5219                          "version": "2.16.1",
5220                          "name": "Docker (Docker-in-Docker)",
5221                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5222                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5223                          "options": {
5224                            "version": {
5225                              "type": "string",
5226                              "proposals": [
5227                                "latest",
5228                                "none",
5229                                "20.10"
5230                              ],
5231                              "default": "latest",
5232                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5233                            },
5234                            "moby": {
5235                              "type": "boolean",
5236                              "default": true,
5237                              "description": "Install OSS Moby build instead of Docker CE"
5238                            },
5239                            "mobyBuildxVersion": {
5240                              "type": "string",
5241                              "default": "latest",
5242                              "description": "Install a specific version of moby-buildx when using Moby"
5243                            },
5244                            "dockerDashComposeVersion": {
5245                              "type": "string",
5246                              "enum": [
5247                                "none",
5248                                "v1",
5249                                "v2"
5250                              ],
5251                              "default": "v2",
5252                              "description": "Default version of Docker Compose (v1, v2 or none)"
5253                            },
5254                            "azureDnsAutoDetection": {
5255                              "type": "boolean",
5256                              "default": true,
5257                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5258                            },
5259                            "dockerDefaultAddressPool": {
5260                              "type": "string",
5261                              "default": "",
5262                              "proposals": [],
5263                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5264                            },
5265                            "installDockerBuildx": {
5266                              "type": "boolean",
5267                              "default": true,
5268                              "description": "Install Docker Buildx"
5269                            },
5270                            "installDockerComposeSwitch": {
5271                              "type": "boolean",
5272                              "default": false,
5273                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5274                            },
5275                            "disableIp6tables": {
5276                              "type": "boolean",
5277                              "default": false,
5278                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5279                            }
5280                          },
5281                          "entrypoint": "/usr/local/share/docker-init.sh",
5282                          "privileged": true,
5283                          "containerEnv": {
5284                            "DOCKER_BUILDKIT": "1"
5285                          },
5286                          "customizations": {
5287                            "vscode": {
5288                              "extensions": [
5289                                "ms-azuretools.vscode-containers"
5290                              ],
5291                              "settings": {
5292                                "github.copilot.chat.codeGeneration.instructions": [
5293                                  {
5294                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5295                                  }
5296                                ]
5297                              }
5298                            }
5299                          },
5300                          "mounts": [
5301                            {
5302                              "source": "dind-var-lib-docker-${devcontainerId}",
5303                              "target": "/var/lib/docker",
5304                              "type": "volume"
5305                            }
5306                          ],
5307                          "installsAfter": [
5308                            "ghcr.io/devcontainers/features/common-utils"
5309                          ]
5310                        }"#),
5311                    ("./install.sh", r#"
5312                    #!/usr/bin/env bash
5313                    #-------------------------------------------------------------------------------------------------------------
5314                    # Copyright (c) Microsoft Corporation. All rights reserved.
5315                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5316                    #-------------------------------------------------------------------------------------------------------------
5317                    #
5318                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5319                    # Maintainer: The Dev Container spec maintainers
5320
5321
5322                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5323                    USE_MOBY="${MOBY:-"true"}"
5324                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5325                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5326                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5327                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5328                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5329                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5330                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5331                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5332                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5333                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5334                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5335                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5336
5337                    # Default: Exit on any failure.
5338                    set -e
5339
5340                    # Clean up
5341                    rm -rf /var/lib/apt/lists/*
5342
5343                    # Setup STDERR.
5344                    err() {
5345                        echo "(!) $*" >&2
5346                    }
5347
5348                    if [ "$(id -u)" -ne 0 ]; then
5349                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5350                        exit 1
5351                    fi
5352
5353                    ###################
5354                    # Helper Functions
5355                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5356                    ###################
5357
5358                    # Determine the appropriate non-root user
5359                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5360                        USERNAME=""
5361                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5362                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5363                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5364                                USERNAME=${CURRENT_USER}
5365                                break
5366                            fi
5367                        done
5368                        if [ "${USERNAME}" = "" ]; then
5369                            USERNAME=root
5370                        fi
5371                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5372                        USERNAME=root
5373                    fi
5374
5375                    # Package manager update function
5376                    pkg_mgr_update() {
5377                        case ${ADJUSTED_ID} in
5378                            debian)
5379                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5380                                    echo "Running apt-get update..."
5381                                    apt-get update -y
5382                                fi
5383                                ;;
5384                            rhel)
5385                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5386                                    cache_check_dir="/var/cache/yum"
5387                                else
5388                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5389                                fi
5390                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5391                                    echo "Running ${PKG_MGR_CMD} makecache ..."
5392                                    ${PKG_MGR_CMD} makecache
5393                                fi
5394                                ;;
5395                        esac
5396                    }
5397
5398                    # Checks if packages are installed and installs them if not
5399                    check_packages() {
5400                        case ${ADJUSTED_ID} in
5401                            debian)
5402                                if ! dpkg -s "$@" > /dev/null 2>&1; then
5403                                    pkg_mgr_update
5404                                    apt-get -y install --no-install-recommends "$@"
5405                                fi
5406                                ;;
5407                            rhel)
5408                                if ! rpm -q "$@" > /dev/null 2>&1; then
5409                                    pkg_mgr_update
5410                                    ${PKG_MGR_CMD} -y install "$@"
5411                                fi
5412                                ;;
5413                        esac
5414                    }
5415
5416                    # Figure out correct version of a three part version number is not passed
5417                    find_version_from_git_tags() {
5418                        local variable_name=$1
5419                        local requested_version=${!variable_name}
5420                        if [ "${requested_version}" = "none" ]; then return; fi
5421                        local repository=$2
5422                        local prefix=${3:-"tags/v"}
5423                        local separator=${4:-"."}
5424                        local last_part_optional=${5:-"false"}
5425                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5426                            local escaped_separator=${separator//./\\.}
5427                            local last_part
5428                            if [ "${last_part_optional}" = "true" ]; then
5429                                last_part="(${escaped_separator}[0-9]+)?"
5430                            else
5431                                last_part="${escaped_separator}[0-9]+"
5432                            fi
5433                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5434                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5435                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5436                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5437                            else
5438                                set +e
5439                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5440                                set -e
5441                            fi
5442                        fi
5443                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5444                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5445                            exit 1
5446                        fi
5447                        echo "${variable_name}=${!variable_name}"
5448                    }
5449
5450                    # Use semver logic to decrement a version number then look for the closest match
5451                    find_prev_version_from_git_tags() {
5452                        local variable_name=$1
5453                        local current_version=${!variable_name}
5454                        local repository=$2
5455                        # Normally a "v" is used before the version number, but support alternate cases
5456                        local prefix=${3:-"tags/v"}
5457                        # Some repositories use "_" instead of "." for version number part separation, support that
5458                        local separator=${4:-"."}
5459                        # Some tools release versions that omit the last digit (e.g. go)
5460                        local last_part_optional=${5:-"false"}
5461                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5462                        local version_suffix_regex=$6
5463                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5464                        set +e
5465                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5466                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5467                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5468
5469                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5470                                ((major=major-1))
5471                                declare -g ${variable_name}="${major}"
5472                                # Look for latest version from previous major release
5473                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5474                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
5475                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5476                                ((minor=minor-1))
5477                                declare -g ${variable_name}="${major}.${minor}"
5478                                # Look for latest version from previous minor release
5479                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5480                            else
5481                                ((breakfix=breakfix-1))
5482                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5483                                    declare -g ${variable_name}="${major}.${minor}"
5484                                else
5485                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5486                                fi
5487                            fi
5488                        set -e
5489                    }
5490
5491                    # Function to fetch the version released prior to the latest version
5492                    get_previous_version() {
5493                        local url=$1
5494                        local repo_url=$2
5495                        local variable_name=$3
5496                        prev_version=${!variable_name}
5497
5498                        output=$(curl -s "$repo_url");
5499                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5500                          message=$(echo "$output" | jq -r '.message')
5501
5502                          if [[ $message == "API rate limit exceeded"* ]]; then
5503                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5504                                echo -e "\nAttempting to find latest version using GitHub tags."
5505                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
5506                                declare -g ${variable_name}="${prev_version}"
5507                           fi
5508                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5509                            echo -e "\nAttempting to find latest version using GitHub Api."
5510                            version=$(echo "$output" | jq -r '.[1].tag_name')
5511                            declare -g ${variable_name}="${version#v}"
5512                        fi
5513                        echo "${variable_name}=${!variable_name}"
5514                    }
5515
5516                    get_github_api_repo_url() {
5517                        local url=$1
5518                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5519                    }
5520
5521                    ###########################################
5522                    # Start docker-in-docker installation
5523                    ###########################################
5524
5525                    # Ensure apt is in non-interactive to avoid prompts
5526                    export DEBIAN_FRONTEND=noninteractive
5527
5528                    # Source /etc/os-release to get OS info
5529                    . /etc/os-release
5530
5531                    # Determine adjusted ID and package manager
5532                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5533                        ADJUSTED_ID="debian"
5534                        PKG_MGR_CMD="apt-get"
5535                        # Use dpkg for Debian-based systems
5536                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5537                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5538                        ADJUSTED_ID="rhel"
5539                        # Determine the appropriate package manager for RHEL-based systems
5540                        for pkg_mgr in tdnf dnf microdnf yum; do
5541                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
5542                                PKG_MGR_CMD="$pkg_mgr"
5543                                break
5544                            fi
5545                        done
5546
5547                        if [ -z "${PKG_MGR_CMD}" ]; then
5548                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5549                            exit 1
5550                        fi
5551
5552                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5553                    else
5554                        err "Linux distro ${ID} not supported."
5555                        exit 1
5556                    fi
5557
5558                    # Azure Linux specific setup
5559                    if [ "${ID}" = "azurelinux" ]; then
5560                        VERSION_CODENAME="azurelinux${VERSION_ID}"
5561                    fi
5562
5563                    # Prevent attempting to install Moby on Debian trixie (packages removed)
5564                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5565                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5566                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5567                        exit 1
5568                    fi
5569
5570                    # Check if distro is supported
5571                    if [ "${USE_MOBY}" = "true" ]; then
5572                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5573                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5574                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5575                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5576                                exit 1
5577                            fi
5578                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
5579                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5580                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5581                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5582                            else
5583                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5584                            fi
5585                        fi
5586                    else
5587                        if [ "${ADJUSTED_ID}" = "debian" ]; then
5588                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5589                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5590                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5591                                exit 1
5592                            fi
5593                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5594                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5595
5596                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5597                        fi
5598                    fi
5599
5600                    # Install base dependencies
5601                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5602                    case ${ADJUSTED_ID} in
5603                        debian)
5604                            check_packages apt-transport-https $base_packages dirmngr
5605                            ;;
5606                        rhel)
5607                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
5608
5609                            ;;
5610                    esac
5611
5612                    # Install git if not already present
5613                    if ! command -v git >/dev/null 2>&1; then
5614                        check_packages git
5615                    fi
5616
5617                    # Update CA certificates to ensure HTTPS connections work properly
5618                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5619                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5620                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5621                        update-ca-certificates
5622                    fi
5623
5624                    # Swap to legacy iptables for compatibility (Debian only)
5625                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5626                        update-alternatives --set iptables /usr/sbin/iptables-legacy
5627                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5628                    fi
5629
5630                    # Set up the necessary repositories
5631                    if [ "${USE_MOBY}" = "true" ]; then
5632                        # Name of open source engine/cli
5633                        engine_package_name="moby-engine"
5634                        cli_package_name="moby-cli"
5635
5636                        case ${ADJUSTED_ID} in
5637                            debian)
5638                                # Import key safely and import Microsoft apt repo
5639                                {
5640                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5641                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5642                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5643                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5644                                ;;
5645                            rhel)
5646                                echo "(*) ${ID} detected - checking for Moby packages..."
5647
5648                                # Check if moby packages are available in default repos
5649                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5650                                    echo "(*) Using built-in ${ID} Moby packages"
5651                                else
5652                                    case "${ID}" in
5653                                        azurelinux)
5654                                            echo "(*) Moby packages not found in Azure Linux repositories"
5655                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5656                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5657                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5658                                            exit 1
5659                                            ;;
5660                                        mariner)
5661                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
5662                                            # Add Microsoft repository if packages aren't available locally
5663                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5664                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
5665                    [microsoft]
5666                    name=Microsoft Repository
5667                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5668                    enabled=1
5669                    gpgcheck=1
5670                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5671                    EOF
5672                                    # Verify packages are available after adding repo
5673                                    pkg_mgr_update
5674                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5675                                        echo "(*) Moby packages not found in Microsoft repository either"
5676                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5677                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5678                                        exit 1
5679                                    fi
5680                                    ;;
5681                                *)
5682                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5683                                    exit 1
5684                                    ;;
5685                                esac
5686                            fi
5687                            ;;
5688                        esac
5689                    else
5690                        # Name of licensed engine/cli
5691                        engine_package_name="docker-ce"
5692                        cli_package_name="docker-ce-cli"
5693                        case ${ADJUSTED_ID} in
5694                            debian)
5695                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5696                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5697                                ;;
5698                            rhel)
5699                                # Docker CE repository setup for RHEL-based systems
5700                                setup_docker_ce_repo() {
5701                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5702                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
5703                    [docker-ce-stable]
5704                    name=Docker CE Stable
5705                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5706                    enabled=1
5707                    gpgcheck=1
5708                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5709                    skip_if_unavailable=1
5710                    module_hotfixes=1
5711                    EOF
5712                                }
5713                                install_azure_linux_deps() {
5714                                    echo "(*) Installing device-mapper libraries for Docker CE..."
5715                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5716                                    echo "(*) Installing additional Docker CE dependencies..."
5717                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5718                                        echo "(*) Some optional dependencies could not be installed, continuing..."
5719                                    }
5720                                }
5721                                setup_selinux_context() {
5722                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5723                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
5724                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5725                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5726                                    fi
5727                                }
5728
5729                                # Special handling for RHEL Docker CE installation
5730                                case "${ID}" in
5731                                    azurelinux|mariner)
5732                                        echo "(*) ${ID} detected"
5733                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5734                                        echo "(*) Setting up Docker CE repository..."
5735
5736                                        setup_docker_ce_repo
5737                                        install_azure_linux_deps
5738
5739                                        if [ "${USE_MOBY}" != "true" ]; then
5740                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5741                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5742                                            setup_selinux_context
5743                                        else
5744                                            echo "(*) Using Moby - container-selinux not required"
5745                                        fi
5746                                        ;;
5747                                    *)
5748                                        # Standard RHEL/CentOS/Fedora approach
5749                                        if command -v dnf >/dev/null 2>&1; then
5750                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5751                                        elif command -v yum-config-manager >/dev/null 2>&1; then
5752                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5753                                        else
5754                                            # Manual fallback
5755                                            setup_docker_ce_repo
5756                                fi
5757                                ;;
5758                            esac
5759                            ;;
5760                        esac
5761                    fi
5762
5763                    # Refresh package database
5764                    case ${ADJUSTED_ID} in
5765                        debian)
5766                            apt-get update
5767                            ;;
5768                        rhel)
5769                            pkg_mgr_update
5770                            ;;
5771                    esac
5772
5773                    # Soft version matching
5774                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5775                        # Empty, meaning grab whatever "latest" is in apt repo
5776                        engine_version_suffix=""
5777                        cli_version_suffix=""
5778                    else
5779                        case ${ADJUSTED_ID} in
5780                            debian)
5781                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5782                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5783                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5784                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5785                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5786                        set +e # Don't exit if finding version fails - will handle gracefully
5787                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5788                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5789                        set -e
5790                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5791                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5792                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5793                            exit 1
5794                        fi
5795                        ;;
5796                    rhel)
5797                         # For RHEL-based systems, use dnf/yum to find versions
5798                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
5799                                set +e # Don't exit if finding version fails - will handle gracefully
5800                                    if [ "${USE_MOBY}" = "true" ]; then
5801                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5802                                    else
5803                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5804                                    fi
5805                                set -e
5806                                if [ -n "${available_versions}" ]; then
5807                                    engine_version_suffix="-${available_versions}"
5808                                    cli_version_suffix="-${available_versions}"
5809                                else
5810                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5811                                    engine_version_suffix=""
5812                                    cli_version_suffix=""
5813                                fi
5814                                ;;
5815                        esac
5816                    fi
5817
5818                    # Version matching for moby-buildx
5819                    if [ "${USE_MOBY}" = "true" ]; then
5820                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5821                            # Empty, meaning grab whatever "latest" is in apt repo
5822                            buildx_version_suffix=""
5823                        else
5824                            case ${ADJUSTED_ID} in
5825                                debian)
5826                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5827                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5828                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5829                            set +e
5830                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5831                            set -e
5832                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5833                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5834                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5835                                exit 1
5836                            fi
5837                            ;;
5838                                rhel)
5839                                    # For RHEL-based systems, try to find buildx version or use latest
5840                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5841                                    set +e
5842                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5843                                    set -e
5844                                    if [ -n "${available_buildx}" ]; then
5845                                        buildx_version_suffix="-${available_buildx}"
5846                                    else
5847                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5848                                        buildx_version_suffix=""
5849                                    fi
5850                                    ;;
5851                            esac
5852                            echo "buildx_version_suffix ${buildx_version_suffix}"
5853                        fi
5854                    fi
5855
5856                    # Install Docker / Moby CLI if not already installed
5857                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5858                        echo "Docker / Moby CLI and Engine already installed."
5859                    else
5860                            case ${ADJUSTED_ID} in
5861                            debian)
5862                                if [ "${USE_MOBY}" = "true" ]; then
5863                                    # Install engine
5864                                    set +e # Handle error gracefully
5865                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5866                                        exit_code=$?
5867                                    set -e
5868
5869                                    if [ ${exit_code} -ne 0 ]; then
5870                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5871                                        exit 1
5872                                    fi
5873
5874                                    # Install compose
5875                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5876                                else
5877                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5878                                    # Install compose
5879                                    apt-mark hold docker-ce docker-ce-cli
5880                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5881                                fi
5882                                ;;
5883                            rhel)
5884                                if [ "${USE_MOBY}" = "true" ]; then
5885                                    set +e # Handle error gracefully
5886                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5887                                        exit_code=$?
5888                                    set -e
5889
5890                                    if [ ${exit_code} -ne 0 ]; then
5891                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5892                                        exit 1
5893                                    fi
5894
5895                                    # Install compose
5896                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5897                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5898                                    fi
5899                                else
5900                                                   # Special handling for Azure Linux Docker CE installation
5901                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5902                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5903
5904                                        # Use rpm with --force and --nodeps for Azure Linux
5905                                        set +e  # Don't exit on error for this section
5906                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5907                                        install_result=$?
5908                                        set -e
5909
5910                                        if [ $install_result -ne 0 ]; then
5911                                            echo "(*) Standard installation failed, trying manual installation..."
5912
5913                                            echo "(*) Standard installation failed, trying manual installation..."
5914
5915                                            # Create directory for downloading packages
5916                                            mkdir -p /tmp/docker-ce-install
5917
5918                                            # Download packages manually using curl since tdnf doesn't support download
5919                                            echo "(*) Downloading Docker CE packages manually..."
5920
5921                                            # Get the repository baseurl
5922                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5923
5924                                            # Download packages directly
5925                                            cd /tmp/docker-ce-install
5926
5927                                            # Get package names with versions
5928                                            if [ -n "${cli_version_suffix}" ]; then
5929                                                docker_ce_version="${cli_version_suffix#-}"
5930                                                docker_cli_version="${engine_version_suffix#-}"
5931                                            else
5932                                                # Get latest version from repository
5933                                                docker_ce_version="latest"
5934                                            fi
5935
5936                                            echo "(*) Attempting to download Docker CE packages from repository..."
5937
5938                                            # Try to download latest packages if specific version fails
5939                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5940                                                # Fallback: try to get latest available version
5941                                                echo "(*) Specific version not found, trying latest..."
5942                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5943                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5944                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5945
5946                                                if [ -n "${latest_docker}" ]; then
5947                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5948                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5949                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5950                                                else
5951                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
5952                                                    echo "(*) Please check repository configuration or use 'moby': true"
5953                                                    exit 1
5954                                                fi
5955                                            fi
5956                                            # Install systemd libraries required by Docker CE
5957                                            echo "(*) Installing systemd libraries required by Docker CE..."
5958                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5959                                                echo "(*) WARNING: Could not install systemd libraries"
5960                                                echo "(*) Docker may fail to start without these"
5961                                            }
5962
5963                                            # Install with rpm --force --nodeps
5964                                            echo "(*) Installing Docker CE packages with dependency override..."
5965                                            rpm -Uvh --force --nodeps *.rpm
5966
5967                                            # Cleanup
5968                                            cd /
5969                                            rm -rf /tmp/docker-ce-install
5970
5971                                            echo "(*) Docker CE installation completed with dependency bypass"
5972                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5973                                        fi
5974                                    else
5975                                        # Standard installation for other RHEL-based systems
5976                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5977                                    fi
5978                                    # Install compose
5979                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5980                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5981                                    fi
5982                                fi
5983                                ;;
5984                        esac
5985                    fi
5986
5987                    echo "Finished installing docker / moby!"
5988
5989                    docker_home="/usr/libexec/docker"
5990                    cli_plugins_dir="${docker_home}/cli-plugins"
5991
5992                    # fallback for docker-compose
5993                    fallback_compose(){
5994                        local url=$1
5995                        local repo_url=$(get_github_api_repo_url "$url")
5996                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5997                        get_previous_version "${url}" "${repo_url}" compose_version
5998                        echo -e "\nAttempting to install v${compose_version}"
5999                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6000                    }
6001
6002                    # If 'docker-compose' command is to be included
6003                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6004                        case "${architecture}" in
6005                        amd64|x86_64) target_compose_arch=x86_64 ;;
6006                        arm64|aarch64) target_compose_arch=aarch64 ;;
6007                        *)
6008                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6009                            exit 1
6010                        esac
6011
6012                        docker_compose_path="/usr/local/bin/docker-compose"
6013                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6014                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6015                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
6016
6017                            if [ "${target_compose_arch}" = "x86_64" ]; then
6018                                echo "(*) Installing docker compose v1..."
6019                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6020                                chmod +x ${docker_compose_path}
6021
6022                                # Download the SHA256 checksum
6023                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6024                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6025                                sha256sum -c docker-compose.sha256sum --ignore-missing
6026                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6027                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6028                                exit 1
6029                            else
6030                                # Use pip to get a version that runs on this architecture
6031                                check_packages python3-minimal python3-pip libffi-dev python3-venv
6032                                echo "(*) Installing docker compose v1 via pip..."
6033                                export PYTHONUSERBASE=/usr/local
6034                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6035                            fi
6036                        else
6037                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6038                            docker_compose_url="https://github.com/docker/compose"
6039                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6040                            echo "(*) Installing docker-compose ${compose_version}..."
6041                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6042                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6043                                     fallback_compose "$docker_compose_url"
6044                            }
6045
6046                            chmod +x ${docker_compose_path}
6047
6048                            # Download the SHA256 checksum
6049                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6050                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6051                            sha256sum -c docker-compose.sha256sum --ignore-missing
6052
6053                            mkdir -p ${cli_plugins_dir}
6054                            cp ${docker_compose_path} ${cli_plugins_dir}
6055                        fi
6056                    fi
6057
6058                    # fallback method for compose-switch
6059                    fallback_compose-switch() {
6060                        local url=$1
6061                        local repo_url=$(get_github_api_repo_url "$url")
6062                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6063                        get_previous_version "$url" "$repo_url" compose_switch_version
6064                        echo -e "\nAttempting to install v${compose_switch_version}"
6065                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6066                    }
6067                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6068                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6069                        if type docker-compose > /dev/null 2>&1; then
6070                            echo "(*) Installing compose-switch..."
6071                            current_compose_path="$(command -v docker-compose)"
6072                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6073                            compose_switch_version="latest"
6074                            compose_switch_url="https://github.com/docker/compose-switch"
6075                            # Try to get latest version, fallback to known stable version if GitHub API fails
6076                            set +e
6077                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
6078                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6079                                echo "(*) GitHub API rate limited or failed, using fallback method"
6080                                fallback_compose-switch "$compose_switch_url"
6081                            fi
6082                            set -e
6083
6084                            # Map architecture for compose-switch downloads
6085                            case "${architecture}" in
6086                                amd64|x86_64) target_switch_arch=amd64 ;;
6087                                arm64|aarch64) target_switch_arch=arm64 ;;
6088                                *) target_switch_arch=${architecture} ;;
6089                            esac
6090                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6091                            chmod +x /usr/local/bin/compose-switch
6092                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6093                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6094                            mv "${current_compose_path}" "${target_compose_path}"
6095                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6096                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6097                        else
6098                            err "Skipping installation of compose-switch as docker compose is unavailable..."
6099                        fi
6100                    fi
6101
6102                    # If init file already exists, exit
6103                    if [ -f "/usr/local/share/docker-init.sh" ]; then
6104                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
6105                        # Clean up
6106                        rm -rf /var/lib/apt/lists/*
6107                        exit 0
6108                    fi
6109                    echo "docker-init doesn't exist, adding..."
6110
6111                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6112                            groupadd -r docker
6113                    fi
6114
6115                    usermod -aG docker ${USERNAME}
6116
6117                    # fallback for docker/buildx
6118                    fallback_buildx() {
6119                        local url=$1
6120                        local repo_url=$(get_github_api_repo_url "$url")
6121                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6122                        get_previous_version "$url" "$repo_url" buildx_version
6123                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6124                        echo -e "\nAttempting to install v${buildx_version}"
6125                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6126                    }
6127
6128                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6129                        buildx_version="latest"
6130                        docker_buildx_url="https://github.com/docker/buildx"
6131                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6132                        echo "(*) Installing buildx ${buildx_version}..."
6133
6134                          # Map architecture for buildx downloads
6135                        case "${architecture}" in
6136                            amd64|x86_64) target_buildx_arch=amd64 ;;
6137                            arm64|aarch64) target_buildx_arch=arm64 ;;
6138                            *) target_buildx_arch=${architecture} ;;
6139                        esac
6140
6141                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6142
6143                        cd /tmp
6144                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6145
6146                        docker_home="/usr/libexec/docker"
6147                        cli_plugins_dir="${docker_home}/cli-plugins"
6148
6149                        mkdir -p ${cli_plugins_dir}
6150                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6151                        chmod +x ${cli_plugins_dir}/docker-buildx
6152
6153                        chown -R "${USERNAME}:docker" "${docker_home}"
6154                        chmod -R g+r+w "${docker_home}"
6155                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6156                    fi
6157
6158                    DOCKER_DEFAULT_IP6_TABLES=""
6159                    if [ "$DISABLE_IP6_TABLES" == true ]; then
6160                        requested_version=""
6161                        # checking whether the version requested either is in semver format or just a number denoting the major version
6162                        # and, extracting the major version number out of the two scenarios
6163                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6164                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6165                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6166                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6167                            requested_version=$DOCKER_VERSION
6168                        fi
6169                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6170                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6171                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6172                        fi
6173                    fi
6174
6175                    if [ ! -d /usr/local/share ]; then
6176                        mkdir -p /usr/local/share
6177                    fi
6178
6179                    tee /usr/local/share/docker-init.sh > /dev/null \
6180                    << EOF
6181                    #!/bin/sh
6182                    #-------------------------------------------------------------------------------------------------------------
6183                    # Copyright (c) Microsoft Corporation. All rights reserved.
6184                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6185                    #-------------------------------------------------------------------------------------------------------------
6186
6187                    set -e
6188
6189                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6190                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6191                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6192                    EOF
6193
6194                    tee -a /usr/local/share/docker-init.sh > /dev/null \
6195                    << 'EOF'
6196                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6197                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6198                        find /run /var/run -iname 'docker*.pid' -delete || :
6199                        find /run /var/run -iname 'container*.pid' -delete || :
6200
6201                        # -- Start: dind wrapper script --
6202                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6203
6204                        export container=docker
6205
6206                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6207                            mount -t securityfs none /sys/kernel/security || {
6208                                echo >&2 'Could not mount /sys/kernel/security.'
6209                                echo >&2 'AppArmor detection and --privileged mode might break.'
6210                            }
6211                        fi
6212
6213                        # Mount /tmp (conditionally)
6214                        if ! mountpoint -q /tmp; then
6215                            mount -t tmpfs none /tmp
6216                        fi
6217
6218                        set_cgroup_nesting()
6219                        {
6220                            # cgroup v2: enable nesting
6221                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6222                                # move the processes from the root group to the /init group,
6223                                # otherwise writing subtree_control fails with EBUSY.
6224                                # An error during moving non-existent process (i.e., "cat") is ignored.
6225                                mkdir -p /sys/fs/cgroup/init
6226                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6227                                # enable controllers
6228                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6229                                    > /sys/fs/cgroup/cgroup.subtree_control
6230                            fi
6231                        }
6232
6233                        # Set cgroup nesting, retrying if necessary
6234                        retry_cgroup_nesting=0
6235
6236                        until [ "${retry_cgroup_nesting}" -eq "5" ];
6237                        do
6238                            set +e
6239                                set_cgroup_nesting
6240
6241                                if [ $? -ne 0 ]; then
6242                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6243                                else
6244                                    break
6245                                fi
6246
6247                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6248                            set -e
6249                        done
6250
6251                        # -- End: dind wrapper script --
6252
6253                        # Handle DNS
6254                        set +e
6255                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6256                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6257                            then
6258                                echo "Setting dockerd Azure DNS."
6259                                CUSTOMDNS="--dns 168.63.129.16"
6260                            else
6261                                echo "Not setting dockerd DNS manually."
6262                                CUSTOMDNS=""
6263                            fi
6264                        set -e
6265
6266                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6267                        then
6268                            DEFAULT_ADDRESS_POOL=""
6269                        else
6270                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6271                        fi
6272
6273                        # Start docker/moby engine
6274                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6275                    INNEREOF
6276                    )"
6277
6278                    sudo_if() {
6279                        COMMAND="$*"
6280
6281                        if [ "$(id -u)" -ne 0 ]; then
6282                            sudo $COMMAND
6283                        else
6284                            $COMMAND
6285                        fi
6286                    }
6287
6288                    retry_docker_start_count=0
6289                    docker_ok="false"
6290
6291                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
6292                    do
6293                        # Start using sudo if not invoked as root
6294                        if [ "$(id -u)" -ne 0 ]; then
6295                            sudo /bin/sh -c "${dockerd_start}"
6296                        else
6297                            eval "${dockerd_start}"
6298                        fi
6299
6300                        retry_count=0
6301                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
6302                        do
6303                            sleep 1s
6304                            set +e
6305                                docker info > /dev/null 2>&1 && docker_ok="true"
6306                            set -e
6307
6308                            retry_count=`expr $retry_count + 1`
6309                        done
6310
6311                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6312                            echo "(*) Failed to start docker, retrying..."
6313                            set +e
6314                                sudo_if pkill dockerd
6315                                sudo_if pkill containerd
6316                            set -e
6317                        fi
6318
6319                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
6320                    done
6321
6322                    # Execute whatever commands were passed in (if any). This allows us
6323                    # to set this script to ENTRYPOINT while still executing the default CMD.
6324                    exec "$@"
6325                    EOF
6326
6327                    chmod +x /usr/local/share/docker-init.sh
6328                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
6329
6330                    # Clean up
6331                    rm -rf /var/lib/apt/lists/*
6332
6333                    echo 'docker-in-docker-debian script has completed!'"#),
6334                ]).await;
6335
6336                return Ok(http::Response::builder()
6337                    .status(200)
6338                    .body(AsyncBody::from(response))
6339                    .unwrap());
6340            }
6341            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6342                let response = r#"
6343                    {
6344                        "schemaVersion": 2,
6345                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6346                        "config": {
6347                            "mediaType": "application/vnd.devcontainers",
6348                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6349                            "size": 2
6350                        },
6351                        "layers": [
6352                            {
6353                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6354                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6355                                "size": 20992,
6356                                "annotations": {
6357                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6358                                }
6359                            }
6360                        ],
6361                        "annotations": {
6362                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6363                            "com.github.package.type": "devcontainer_feature"
6364                        }
6365                    }
6366                    "#;
6367
6368                return Ok(http::Response::builder()
6369                    .status(200)
6370                    .body(http_client::AsyncBody::from(response))
6371                    .unwrap());
6372            }
6373            if parts.uri.path()
6374                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6375            {
6376                let response = build_tarball(vec![
6377                    ("./devcontainer-feature.json", r#"
6378                        {
6379                            "id": "go",
6380                            "version": "1.3.3",
6381                            "name": "Go",
6382                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6383                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6384                            "options": {
6385                                "version": {
6386                                    "type": "string",
6387                                    "proposals": [
6388                                        "latest",
6389                                        "none",
6390                                        "1.24",
6391                                        "1.23"
6392                                    ],
6393                                    "default": "latest",
6394                                    "description": "Select or enter a Go version to install"
6395                                },
6396                                "golangciLintVersion": {
6397                                    "type": "string",
6398                                    "default": "latest",
6399                                    "description": "Version of golangci-lint to install"
6400                                }
6401                            },
6402                            "init": true,
6403                            "customizations": {
6404                                "vscode": {
6405                                    "extensions": [
6406                                        "golang.Go"
6407                                    ],
6408                                    "settings": {
6409                                        "github.copilot.chat.codeGeneration.instructions": [
6410                                            {
6411                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6412                                            }
6413                                        ]
6414                                    }
6415                                }
6416                            },
6417                            "containerEnv": {
6418                                "GOROOT": "/usr/local/go",
6419                                "GOPATH": "/go",
6420                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6421                            },
6422                            "capAdd": [
6423                                "SYS_PTRACE"
6424                            ],
6425                            "securityOpt": [
6426                                "seccomp=unconfined"
6427                            ],
6428                            "installsAfter": [
6429                                "ghcr.io/devcontainers/features/common-utils"
6430                            ]
6431                        }
6432                        "#),
6433                    ("./install.sh", r#"
6434                    #!/usr/bin/env bash
6435                    #-------------------------------------------------------------------------------------------------------------
6436                    # Copyright (c) Microsoft Corporation. All rights reserved.
6437                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6438                    #-------------------------------------------------------------------------------------------------------------
6439                    #
6440                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6441                    # Maintainer: The VS Code and Codespaces Teams
6442
6443                    TARGET_GO_VERSION="${VERSION:-"latest"}"
6444                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6445
6446                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6447                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6448                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6449                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6450
6451                    # https://www.google.com/linuxrepositories/
6452                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6453
6454                    set -e
6455
6456                    if [ "$(id -u)" -ne 0 ]; then
6457                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6458                        exit 1
6459                    fi
6460
6461                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6462                    . /etc/os-release
6463                    # Get an adjusted ID independent of distro variants
6464                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6465                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6466                        ADJUSTED_ID="debian"
6467                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6468                        ADJUSTED_ID="rhel"
6469                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6470                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6471                        else
6472                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6473                        fi
6474                    else
6475                        echo "Linux distro ${ID} not supported."
6476                        exit 1
6477                    fi
6478
6479                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6480                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6481                        # Update the repo files to reference vault.centos.org.
6482                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6483                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6484                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6485                    fi
6486
6487                    # Setup INSTALL_CMD & PKG_MGR_CMD
6488                    if type apt-get > /dev/null 2>&1; then
6489                        PKG_MGR_CMD=apt-get
6490                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6491                    elif type microdnf > /dev/null 2>&1; then
6492                        PKG_MGR_CMD=microdnf
6493                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6494                    elif type dnf > /dev/null 2>&1; then
6495                        PKG_MGR_CMD=dnf
6496                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6497                    else
6498                        PKG_MGR_CMD=yum
6499                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6500                    fi
6501
6502                    # Clean up
6503                    clean_up() {
6504                        case ${ADJUSTED_ID} in
6505                            debian)
6506                                rm -rf /var/lib/apt/lists/*
6507                                ;;
6508                            rhel)
6509                                rm -rf /var/cache/dnf/* /var/cache/yum/*
6510                                rm -rf /tmp/yum.log
6511                                rm -rf ${GPG_INSTALL_PATH}
6512                                ;;
6513                        esac
6514                    }
6515                    clean_up
6516
6517
6518                    # Figure out correct version of a three part version number is not passed
6519                    find_version_from_git_tags() {
6520                        local variable_name=$1
6521                        local requested_version=${!variable_name}
6522                        if [ "${requested_version}" = "none" ]; then return; fi
6523                        local repository=$2
6524                        local prefix=${3:-"tags/v"}
6525                        local separator=${4:-"."}
6526                        local last_part_optional=${5:-"false"}
6527                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6528                            local escaped_separator=${separator//./\\.}
6529                            local last_part
6530                            if [ "${last_part_optional}" = "true" ]; then
6531                                last_part="(${escaped_separator}[0-9]+)?"
6532                            else
6533                                last_part="${escaped_separator}[0-9]+"
6534                            fi
6535                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6536                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6537                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6538                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6539                            else
6540                                set +e
6541                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6542                                set -e
6543                            fi
6544                        fi
6545                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6546                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6547                            exit 1
6548                        fi
6549                        echo "${variable_name}=${!variable_name}"
6550                    }
6551
6552                    pkg_mgr_update() {
6553                        case $ADJUSTED_ID in
6554                            debian)
6555                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6556                                    echo "Running apt-get update..."
6557                                    ${PKG_MGR_CMD} update -y
6558                                fi
6559                                ;;
6560                            rhel)
6561                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6562                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6563                                        echo "Running ${PKG_MGR_CMD} makecache ..."
6564                                        ${PKG_MGR_CMD} makecache
6565                                    fi
6566                                else
6567                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6568                                        echo "Running ${PKG_MGR_CMD} check-update ..."
6569                                        set +e
6570                                        ${PKG_MGR_CMD} check-update
6571                                        rc=$?
6572                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
6573                                            exit 1
6574                                        fi
6575                                        set -e
6576                                    fi
6577                                fi
6578                                ;;
6579                        esac
6580                    }
6581
6582                    # Checks if packages are installed and installs them if not
6583                    check_packages() {
6584                        case ${ADJUSTED_ID} in
6585                            debian)
6586                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6587                                    pkg_mgr_update
6588                                    ${INSTALL_CMD} "$@"
6589                                fi
6590                                ;;
6591                            rhel)
6592                                if ! rpm -q "$@" > /dev/null 2>&1; then
6593                                    pkg_mgr_update
6594                                    ${INSTALL_CMD} "$@"
6595                                fi
6596                                ;;
6597                        esac
6598                    }
6599
6600                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6601                    rm -f /etc/profile.d/00-restore-env.sh
6602                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6603                    chmod +x /etc/profile.d/00-restore-env.sh
6604
6605                    # Some distributions do not install awk by default (e.g. Mariner)
6606                    if ! type awk >/dev/null 2>&1; then
6607                        check_packages awk
6608                    fi
6609
6610                    # Determine the appropriate non-root user
6611                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6612                        USERNAME=""
6613                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6614                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6615                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6616                                USERNAME=${CURRENT_USER}
6617                                break
6618                            fi
6619                        done
6620                        if [ "${USERNAME}" = "" ]; then
6621                            USERNAME=root
6622                        fi
6623                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6624                        USERNAME=root
6625                    fi
6626
6627                    export DEBIAN_FRONTEND=noninteractive
6628
6629                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
6630
6631                    if [ $ADJUSTED_ID = "debian" ]; then
6632                        check_packages g++ libc6-dev
6633                    else
6634                        check_packages gcc-c++ glibc-devel
6635                    fi
6636                    # Install curl, git, other dependencies if missing
6637                    if ! type curl > /dev/null 2>&1; then
6638                        check_packages curl
6639                    fi
6640                    if ! type git > /dev/null 2>&1; then
6641                        check_packages git
6642                    fi
6643                    # Some systems, e.g. Mariner, still a few more packages
6644                    if ! type as > /dev/null 2>&1; then
6645                        check_packages binutils
6646                    fi
6647                    if ! [ -f /usr/include/linux/errno.h ]; then
6648                        check_packages kernel-headers
6649                    fi
6650                    # Minimal RHEL install may need findutils installed
6651                    if ! [ -f /usr/bin/find ]; then
6652                        check_packages findutils
6653                    fi
6654
6655                    # Get closest match for version number specified
6656                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6657
6658                    architecture="$(uname -m)"
6659                    case $architecture in
6660                        x86_64) architecture="amd64";;
6661                        aarch64 | armv8*) architecture="arm64";;
6662                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
6663                        i?86) architecture="386";;
6664                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6665                    esac
6666
6667                    # Install Go
6668                    umask 0002
6669                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6670                        groupadd -r golang
6671                    fi
6672                    usermod -a -G golang "${USERNAME}"
6673                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6674
6675                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6676                        # Use a temporary location for gpg keys to avoid polluting image
6677                        export GNUPGHOME="/tmp/tmp-gnupg"
6678                        mkdir -p ${GNUPGHOME}
6679                        chmod 700 ${GNUPGHOME}
6680                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6681                        gpg -q --import /tmp/tmp-gnupg/golang_key
6682                        echo "Downloading Go ${TARGET_GO_VERSION}..."
6683                        set +e
6684                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6685                        exit_code=$?
6686                        set -e
6687                        if [ "$exit_code" != "0" ]; then
6688                            echo "(!) Download failed."
6689                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6690                            set +e
6691                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6692                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6693                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6694                            # Handle Go's odd version pattern where "0" releases omit the last part
6695                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6696                                ((minor=minor-1))
6697                                TARGET_GO_VERSION="${major}.${minor}"
6698                                # Look for latest version from previous minor release
6699                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6700                            else
6701                                ((breakfix=breakfix-1))
6702                                if [ "${breakfix}" = "0" ]; then
6703                                    TARGET_GO_VERSION="${major}.${minor}"
6704                                else
6705                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6706                                fi
6707                            fi
6708                            set -e
6709                            echo "Trying ${TARGET_GO_VERSION}..."
6710                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6711                        fi
6712                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6713                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6714                        echo "Extracting Go ${TARGET_GO_VERSION}..."
6715                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6716                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6717                    else
6718                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6719                    fi
6720
6721                    # Install Go tools that are isImportant && !replacedByGopls based on
6722                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6723                    GO_TOOLS="\
6724                        golang.org/x/tools/gopls@latest \
6725                        honnef.co/go/tools/cmd/staticcheck@latest \
6726                        golang.org/x/lint/golint@latest \
6727                        github.com/mgechev/revive@latest \
6728                        github.com/go-delve/delve/cmd/dlv@latest \
6729                        github.com/fatih/gomodifytags@latest \
6730                        github.com/haya14busa/goplay/cmd/goplay@latest \
6731                        github.com/cweill/gotests/gotests@latest \
6732                        github.com/josharian/impl@latest"
6733
6734                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6735                        echo "Installing common Go tools..."
6736                        export PATH=${TARGET_GOROOT}/bin:${PATH}
6737                        export GOPATH=/tmp/gotools
6738                        export GOCACHE="${GOPATH}/cache"
6739
6740                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6741                        cd "${GOPATH}"
6742
6743                        # Use go get for versions of go under 1.16
6744                        go_install_command=install
6745                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6746                            export GO111MODULE=on
6747                            go_install_command=get
6748                            echo "Go version < 1.16, using go get."
6749                        fi
6750
6751                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6752
6753                        # Move Go tools into path
6754                        if [ -d "${GOPATH}/bin" ]; then
6755                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6756                        fi
6757
6758                        # Install golangci-lint from precompiled binaries
6759                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6760                            echo "Installing golangci-lint latest..."
6761                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6762                                sh -s -- -b "${TARGET_GOPATH}/bin"
6763                        else
6764                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6765                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6766                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6767                        fi
6768
6769                        # Remove Go tools temp directory
6770                        rm -rf "${GOPATH}"
6771                    fi
6772
6773
6774                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6775                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6776                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6777                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6778
6779                    # Clean up
6780                    clean_up
6781
6782                    echo "Done!"
6783                        "#),
6784                ])
6785                .await;
6786                return Ok(http::Response::builder()
6787                    .status(200)
6788                    .body(AsyncBody::from(response))
6789                    .unwrap());
6790            }
6791            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6792                let response = r#"
6793                    {
6794                        "schemaVersion": 2,
6795                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6796                        "config": {
6797                            "mediaType": "application/vnd.devcontainers",
6798                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6799                            "size": 2
6800                        },
6801                        "layers": [
6802                            {
6803                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6804                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6805                                "size": 19968,
6806                                "annotations": {
6807                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6808                                }
6809                            }
6810                        ],
6811                        "annotations": {
6812                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6813                            "com.github.package.type": "devcontainer_feature"
6814                        }
6815                    }"#;
6816                return Ok(http::Response::builder()
6817                    .status(200)
6818                    .body(AsyncBody::from(response))
6819                    .unwrap());
6820            }
6821            if parts.uri.path()
6822                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6823            {
6824                let response = build_tarball(vec![
6825                    (
6826                        "./devcontainer-feature.json",
6827                        r#"
6828{
6829    "id": "aws-cli",
6830    "version": "1.1.3",
6831    "name": "AWS CLI",
6832    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6833    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6834    "options": {
6835        "version": {
6836            "type": "string",
6837            "proposals": [
6838                "latest"
6839            ],
6840            "default": "latest",
6841            "description": "Select or enter an AWS CLI version."
6842        },
6843        "verbose": {
6844            "type": "boolean",
6845            "default": true,
6846            "description": "Suppress verbose output."
6847        }
6848    },
6849    "customizations": {
6850        "vscode": {
6851            "extensions": [
6852                "AmazonWebServices.aws-toolkit-vscode"
6853            ],
6854            "settings": {
6855                "github.copilot.chat.codeGeneration.instructions": [
6856                    {
6857                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6858                    }
6859                ]
6860            }
6861        }
6862    },
6863    "installsAfter": [
6864        "ghcr.io/devcontainers/features/common-utils"
6865    ]
6866}
6867                    "#,
6868                    ),
6869                    (
6870                        "./install.sh",
6871                        r#"#!/usr/bin/env bash
6872                    #-------------------------------------------------------------------------------------------------------------
6873                    # Copyright (c) Microsoft Corporation. All rights reserved.
6874                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6875                    #-------------------------------------------------------------------------------------------------------------
6876                    #
6877                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6878                    # Maintainer: The VS Code and Codespaces Teams
6879
6880                    set -e
6881
6882                    # Clean up
6883                    rm -rf /var/lib/apt/lists/*
6884
6885                    VERSION=${VERSION:-"latest"}
6886                    VERBOSE=${VERBOSE:-"true"}
6887
6888                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6889                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6890
6891                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6892                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6893                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6894                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6895                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6896                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6897                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6898                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6899                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6900                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6901                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6902                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6903                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6904                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6905                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6906                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6907                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6908                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6909                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6910                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6911                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6912                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6913                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6914                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6915                    YLZATHZKTJyiqA==
6916                    =vYOk
6917                    -----END PGP PUBLIC KEY BLOCK-----"
6918
6919                    if [ "$(id -u)" -ne 0 ]; then
6920                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6921                        exit 1
6922                    fi
6923
6924                    apt_get_update()
6925                    {
6926                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6927                            echo "Running apt-get update..."
6928                            apt-get update -y
6929                        fi
6930                    }
6931
6932                    # Checks if packages are installed and installs them if not
6933                    check_packages() {
6934                        if ! dpkg -s "$@" > /dev/null 2>&1; then
6935                            apt_get_update
6936                            apt-get -y install --no-install-recommends "$@"
6937                        fi
6938                    }
6939
6940                    export DEBIAN_FRONTEND=noninteractive
6941
6942                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6943
6944                    verify_aws_cli_gpg_signature() {
6945                        local filePath=$1
6946                        local sigFilePath=$2
6947                        local awsGpgKeyring=aws-cli-public-key.gpg
6948
6949                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6950                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6951                        local status=$?
6952
6953                        rm "./${awsGpgKeyring}"
6954
6955                        return ${status}
6956                    }
6957
6958                    install() {
6959                        local scriptZipFile=awscli.zip
6960                        local scriptSigFile=awscli.sig
6961
6962                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6963                        if [ "${VERSION}" != "latest" ]; then
6964                            local versionStr=-${VERSION}
6965                        fi
6966                        architecture=$(dpkg --print-architecture)
6967                        case "${architecture}" in
6968                            amd64) architectureStr=x86_64 ;;
6969                            arm64) architectureStr=aarch64 ;;
6970                            *)
6971                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6972                                exit 1
6973                        esac
6974                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6975                        curl "${scriptUrl}" -o "${scriptZipFile}"
6976                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6977
6978                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6979                        if (( $? > 0 )); then
6980                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6981                            exit 1
6982                        fi
6983
6984                        if [ "${VERBOSE}" = "false" ]; then
6985                            unzip -q "${scriptZipFile}"
6986                        else
6987                            unzip "${scriptZipFile}"
6988                        fi
6989
6990                        ./aws/install
6991
6992                        # kubectl bash completion
6993                        mkdir -p /etc/bash_completion.d
6994                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6995
6996                        # kubectl zsh completion
6997                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6998                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6999                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7000                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7001                        fi
7002
7003                        rm -rf ./aws
7004                    }
7005
7006                    echo "(*) Installing AWS CLI..."
7007
7008                    install
7009
7010                    # Clean up
7011                    rm -rf /var/lib/apt/lists/*
7012
7013                    echo "Done!""#,
7014                    ),
7015                    ("./scripts/", r#""#),
7016                    (
7017                        "./scripts/fetch-latest-completer-scripts.sh",
7018                        r#"
7019                        #!/bin/bash
7020                        #-------------------------------------------------------------------------------------------------------------
7021                        # Copyright (c) Microsoft Corporation. All rights reserved.
7022                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7023                        #-------------------------------------------------------------------------------------------------------------
7024                        #
7025                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7026                        # Maintainer: The Dev Container spec maintainers
7027                        #
7028                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7029                        #
7030                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7031                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7032                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7033
7034                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7035                        chmod +x "$BASH_COMPLETER_SCRIPT"
7036
7037                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7038                        chmod +x "$ZSH_COMPLETER_SCRIPT"
7039                        "#,
7040                    ),
7041                    ("./scripts/vendor/", r#""#),
7042                    (
7043                        "./scripts/vendor/aws_bash_completer",
7044                        r#"
7045                        # Typically that would be added under one of the following paths:
7046                        # - /etc/bash_completion.d
7047                        # - /usr/local/etc/bash_completion.d
7048                        # - /usr/share/bash-completion/completions
7049
7050                        complete -C aws_completer aws
7051                        "#,
7052                    ),
7053                    (
7054                        "./scripts/vendor/aws_zsh_completer.sh",
7055                        r#"
7056                        # Source this file to activate auto completion for zsh using the bash
7057                        # compatibility helper.  Make sure to run `compinit` before, which should be
7058                        # given usually.
7059                        #
7060                        # % source /path/to/zsh_complete.sh
7061                        #
7062                        # Typically that would be called somewhere in your .zshrc.
7063                        #
7064                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7065                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7066                        #
7067                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7068                        #
7069                        # zsh releases prior to that version do not export the required env variables!
7070
7071                        autoload -Uz bashcompinit
7072                        bashcompinit -i
7073
7074                        _bash_complete() {
7075                          local ret=1
7076                          local -a suf matches
7077                          local -x COMP_POINT COMP_CWORD
7078                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7079                          local -x COMP_LINE="$words"
7080                          local -A savejobstates savejobtexts
7081
7082                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7083                          (( COMP_CWORD = CURRENT - 1))
7084                          COMP_WORDS=( $words )
7085                          BASH_VERSINFO=( 2 05b 0 1 release )
7086
7087                          savejobstates=( ${(kv)jobstates} )
7088                          savejobtexts=( ${(kv)jobtexts} )
7089
7090                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7091
7092                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7093
7094                          if [[ -n $matches ]]; then
7095                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7096                              compset -P '*/' && matches=( ${matches##*/} )
7097                              compset -S '/*' && matches=( ${matches%%/*} )
7098                              compadd -Q -f "${suf[@]}" -a matches && ret=0
7099                            else
7100                              compadd -Q "${suf[@]}" -a matches && ret=0
7101                            fi
7102                          fi
7103
7104                          if (( ret )); then
7105                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7106                              _default "${suf[@]}" && ret=0
7107                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7108                              _directories "${suf[@]}" && ret=0
7109                            fi
7110                          fi
7111
7112                          return ret
7113                        }
7114
7115                        complete -C aws_completer aws
7116                        "#,
7117                    ),
7118                ]).await;
7119
7120                return Ok(http::Response::builder()
7121                    .status(200)
7122                    .body(AsyncBody::from(response))
7123                    .unwrap());
7124            }
7125
7126            Ok(http::Response::builder()
7127                .status(404)
7128                .body(http_client::AsyncBody::default())
7129                .unwrap())
7130        })
7131    }
7132}