devcontainer_manifest.rs

   1use std::{
   2    collections::HashMap,
   3    fmt::Debug,
   4    hash::{DefaultHasher, Hash, Hasher},
   5    path::{Path, PathBuf},
   6    sync::Arc,
   7};
   8
   9use regex::Regex;
  10
  11use fs::Fs;
  12use http_client::HttpClient;
  13use util::{ResultExt, command::Command, normalize_path};
  14
  15use crate::{
  16    DevContainerConfig, DevContainerContext,
  17    command_json::{CommandRunner, DefaultCommandRunner},
  18    devcontainer_api::{DevContainerError, DevContainerUp},
  19    devcontainer_json::{
  20        ContainerBuild, DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort,
  21        MountDefinition, deserialize_devcontainer_json, deserialize_devcontainer_json_from_value,
  22        deserialize_devcontainer_json_to_value,
  23    },
  24    docker::{
  25        Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
  26        DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
  27    },
  28    features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
  29    get_oci_token,
  30    oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
  31    safe_id_lower,
  32};
  33
  34enum ConfigStatus {
  35    Deserialized(DevContainer),
  36    VariableParsed(DevContainer),
  37}
  38
  39#[derive(Debug, Clone, Eq, PartialEq, Default)]
  40pub(crate) struct DockerComposeResources {
  41    files: Vec<PathBuf>,
  42    config: DockerComposeConfig,
  43}
  44
  45struct DevContainerManifest {
  46    http_client: Arc<dyn HttpClient>,
  47    fs: Arc<dyn Fs>,
  48    docker_client: Arc<dyn DockerClient>,
  49    command_runner: Arc<dyn CommandRunner>,
  50    raw_config: String,
  51    config: ConfigStatus,
  52    local_environment: HashMap<String, String>,
  53    local_project_directory: PathBuf,
  54    config_directory: PathBuf,
  55    file_name: String,
  56    root_image: Option<DockerInspect>,
  57    features_build_info: Option<FeaturesBuildInfo>,
  58    features: Vec<FeatureManifest>,
  59}
  60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
  61impl DevContainerManifest {
  62    async fn new(
  63        context: &DevContainerContext,
  64        environment: HashMap<String, String>,
  65        docker_client: Arc<dyn DockerClient>,
  66        command_runner: Arc<dyn CommandRunner>,
  67        local_config: DevContainerConfig,
  68        local_project_path: &Path,
  69    ) -> Result<Self, DevContainerError> {
  70        let config_path = local_project_path.join(local_config.config_path.clone());
  71        log::debug!("parsing devcontainer json found in {:?}", &config_path);
  72        let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
  73            log::error!("Unable to read devcontainer contents: {e}");
  74            DevContainerError::DevContainerParseFailed
  75        })?;
  76
  77        let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
  78
  79        let devcontainer_directory = config_path.parent().ok_or_else(|| {
  80            log::error!("Dev container file should be in a directory");
  81            DevContainerError::NotInValidProject
  82        })?;
  83        let file_name = config_path
  84            .file_name()
  85            .and_then(|f| f.to_str())
  86            .ok_or_else(|| {
  87                log::error!("Dev container file has no file name, or is invalid unicode");
  88                DevContainerError::DevContainerParseFailed
  89            })?;
  90
  91        Ok(Self {
  92            fs: context.fs.clone(),
  93            http_client: context.http_client.clone(),
  94            docker_client,
  95            command_runner,
  96            raw_config: devcontainer_contents,
  97            config: ConfigStatus::Deserialized(devcontainer),
  98            local_project_directory: local_project_path.to_path_buf(),
  99            local_environment: environment,
 100            config_directory: devcontainer_directory.to_path_buf(),
 101            file_name: file_name.to_string(),
 102            root_image: None,
 103            features_build_info: None,
 104            features: Vec::new(),
 105        })
 106    }
 107
 108    fn devcontainer_id(&self) -> String {
 109        let mut labels = self.identifying_labels();
 110        labels.sort_by_key(|(key, _)| *key);
 111
 112        let mut hasher = DefaultHasher::new();
 113        for (key, value) in &labels {
 114            key.hash(&mut hasher);
 115            value.hash(&mut hasher);
 116        }
 117
 118        format!("{:016x}", hasher.finish())
 119    }
 120
 121    fn identifying_labels(&self) -> Vec<(&str, String)> {
 122        let labels = vec![
 123            (
 124                "devcontainer.local_folder",
 125                (self.local_project_directory.display()).to_string(),
 126            ),
 127            (
 128                "devcontainer.config_file",
 129                (self.config_file().display()).to_string(),
 130            ),
 131        ];
 132        labels
 133    }
 134
 135    fn parse_nonremote_vars_for_content(
 136        &self,
 137        content: &str,
 138    ) -> Result<serde_json_lenient::Value, DevContainerError> {
 139        let mut value = deserialize_devcontainer_json_to_value(content)?;
 140        let mut to_visit = vec![&mut value];
 141
 142        while let Some(value) = to_visit.pop() {
 143            use serde_json_lenient::Value;
 144
 145            match value {
 146                Value::String(string) => {
 147                    *string = string
 148                        .replace("${devcontainerId}", &self.devcontainer_id())
 149                        .replace(
 150                            "${containerWorkspaceFolderBasename}",
 151                            &self.remote_workspace_base_name().unwrap_or_default(),
 152                        )
 153                        .replace(
 154                            "${localWorkspaceFolderBasename}",
 155                            &self.local_workspace_base_name()?,
 156                        )
 157                        .replace(
 158                            "${containerWorkspaceFolder}",
 159                            &self
 160                                .remote_workspace_folder()
 161                                .map(|path| path.display().to_string())
 162                                .unwrap_or_default()
 163                                .replace('\\', "/"),
 164                        )
 165                        .replace(
 166                            "${localWorkspaceFolder}",
 167                            &self.local_workspace_folder().replace('\\', "/"),
 168                        );
 169                    *string = Self::replace_environment_variables(
 170                        string,
 171                        "localEnv",
 172                        &self.local_environment,
 173                    );
 174                }
 175
 176                Value::Array(array) => to_visit.extend(array.iter_mut()),
 177                Value::Object(object) => to_visit.extend(object.values_mut()),
 178
 179                Value::Null | Value::Bool(_) | Value::Number(_) => {}
 180            }
 181        }
 182
 183        Ok(value)
 184    }
 185
 186    fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
 187        let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
 188        let parsed_config = deserialize_devcontainer_json_from_value(replaced_content)?;
 189
 190        self.config = ConfigStatus::VariableParsed(parsed_config);
 191
 192        Ok(())
 193    }
 194
 195    fn runtime_remote_env(
 196        &self,
 197        container_env: &HashMap<String, String>,
 198    ) -> Result<HashMap<String, String>, DevContainerError> {
 199        let mut merged_remote_env = container_env.clone();
 200        // HOME is user-specific, and we will often not run as the image user
 201        merged_remote_env.remove("HOME");
 202        if let Some(mut remote_env) = self.dev_container().remote_env.clone() {
 203            remote_env.values_mut().for_each(|value| {
 204                *value = Self::replace_environment_variables(value, "containerEnv", &container_env)
 205            });
 206            for (k, v) in remote_env {
 207                merged_remote_env.insert(k, v);
 208            }
 209        }
 210        Ok(merged_remote_env)
 211    }
 212
 213    fn replace_environment_variables(
 214        mut orig: &str,
 215        environment_source: &str,
 216        environment: &HashMap<String, String>,
 217    ) -> String {
 218        let mut replaced = String::with_capacity(orig.len());
 219        let prefix = format!("${{{environment_source}:");
 220        while let Some(start) = orig.find(&prefix) {
 221            let var_name_start = start + prefix.len();
 222            let Some(end) = orig[var_name_start..].find('}') else {
 223                // No closing `}` => malformed variable reference => paste as is.
 224                break;
 225            };
 226            let end = var_name_start + end;
 227
 228            let (var_name_end, default_start) =
 229                if let Some(var_name_end) = orig[var_name_start..end].find(':') {
 230                    let var_name_end = var_name_start + var_name_end;
 231                    (var_name_end, var_name_end + 1)
 232                } else {
 233                    (end, end)
 234                };
 235
 236            let var_name = &orig[var_name_start..var_name_end];
 237            if var_name.is_empty() {
 238                // Empty variable name => paste as is.
 239                replaced.push_str(&orig[..end + 1]);
 240                orig = &orig[end + 1..];
 241                continue;
 242            }
 243            let default = &orig[default_start..end];
 244
 245            replaced.push_str(&orig[..start]);
 246            replaced.push_str(
 247                environment
 248                    .get(var_name)
 249                    .map(|value| value.as_str())
 250                    .unwrap_or(default),
 251            );
 252            orig = &orig[end + 1..];
 253        }
 254        replaced.push_str(orig);
 255        replaced
 256    }
 257
 258    fn config_file(&self) -> PathBuf {
 259        self.config_directory.join(&self.file_name)
 260    }
 261
 262    fn dev_container(&self) -> &DevContainer {
 263        match &self.config {
 264            ConfigStatus::Deserialized(dev_container) => dev_container,
 265            ConfigStatus::VariableParsed(dev_container) => dev_container,
 266        }
 267    }
 268
 269    async fn dockerfile_location(&self) -> Option<PathBuf> {
 270        let dev_container = self.dev_container();
 271        match dev_container.build_type() {
 272            DevContainerBuildType::Image(_) => None,
 273            DevContainerBuildType::Dockerfile(build) => {
 274                Some(self.config_directory.join(&build.dockerfile))
 275            }
 276            DevContainerBuildType::DockerCompose => {
 277                let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
 278                    return None;
 279                };
 280                let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
 281                else {
 282                    return None;
 283                };
 284                main_service.build.and_then(|b| {
 285                    let compose_file = docker_compose_manifest.files.first()?;
 286                    resolve_compose_dockerfile(
 287                        compose_file,
 288                        b.context.as_deref(),
 289                        b.dockerfile.as_deref()?,
 290                    )
 291                })
 292            }
 293            DevContainerBuildType::None => None,
 294        }
 295    }
 296
 297    fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
 298        let mut hasher = DefaultHasher::new();
 299        let prefix = match &self.dev_container().name {
 300            Some(name) => &safe_id_lower(name),
 301            None => "zed-dc",
 302        };
 303        let prefix = prefix.get(..6).unwrap_or(prefix);
 304        let prefix = prefix.trim_matches(|c: char| !c.is_alphanumeric());
 305
 306        dockerfile_build_path.hash(&mut hasher);
 307
 308        let hash = hasher.finish();
 309        format!("{}-{:x}-features", prefix, hash)
 310    }
 311
 312    /// Gets the base image from the devcontainer with the following precedence:
 313    /// - The devcontainer image if an image is specified
 314    /// - The image sourced in the Dockerfile if a Dockerfile is specified
 315    /// - The image sourced in the docker-compose main service, if one is specified
 316    /// - The image sourced in the docker-compose main service dockerfile, if one is specified
 317    /// If no such image is available, return an error
 318    async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
 319        match self.dev_container().build_type() {
 320            DevContainerBuildType::Image(image) => {
 321                return Ok(image);
 322            }
 323            DevContainerBuildType::Dockerfile(build) => {
 324                let dockerfile_contents = self.expanded_dockerfile_content().await?;
 325                return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
 326                    || {
 327                        log::error!("Unable to find base image in Dockerfile");
 328                        DevContainerError::DevContainerParseFailed
 329                    },
 330                );
 331            }
 332            DevContainerBuildType::DockerCompose => {
 333                let docker_compose_manifest = self.docker_compose_manifest().await?;
 334                let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
 335
 336                if let Some(_) = main_service
 337                    .build
 338                    .as_ref()
 339                    .and_then(|b| b.dockerfile.as_ref())
 340                {
 341                    let dockerfile_contents = self.expanded_dockerfile_content().await?;
 342                    return image_from_dockerfile(
 343                        dockerfile_contents,
 344                        &main_service.build.as_ref().and_then(|b| b.target.clone()),
 345                    )
 346                    .ok_or_else(|| {
 347                        log::error!("Unable to find base image in Dockerfile");
 348                        DevContainerError::DevContainerParseFailed
 349                    });
 350                }
 351                if let Some(image) = &main_service.image {
 352                    return Ok(image.to_string());
 353                }
 354
 355                log::error!("No valid base image found in docker-compose configuration");
 356                return Err(DevContainerError::DevContainerParseFailed);
 357            }
 358            DevContainerBuildType::None => {
 359                log::error!("Not a valid devcontainer config for build");
 360                return Err(DevContainerError::NotInValidProject);
 361            }
 362        }
 363    }
 364
 365    async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
 366        let dev_container = match &self.config {
 367            ConfigStatus::Deserialized(_) => {
 368                log::error!(
 369                    "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
 370                );
 371                return Err(DevContainerError::DevContainerParseFailed);
 372            }
 373            ConfigStatus::VariableParsed(dev_container) => dev_container,
 374        };
 375        let root_image_tag = self.get_base_image_from_config().await?;
 376        let root_image = self.docker_client.inspect(&root_image_tag).await?;
 377
 378        let temp_base = std::env::temp_dir().join("devcontainer-zed");
 379        let timestamp = std::time::SystemTime::now()
 380            .duration_since(std::time::UNIX_EPOCH)
 381            .map(|d| d.as_millis())
 382            .unwrap_or(0);
 383
 384        let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
 385        let empty_context_dir = temp_base.join("empty-folder");
 386
 387        self.fs
 388            .create_dir(&features_content_dir)
 389            .await
 390            .map_err(|e| {
 391                log::error!("Failed to create features content dir: {e}");
 392                DevContainerError::FilesystemError
 393            })?;
 394
 395        self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
 396            log::error!("Failed to create empty context dir: {e}");
 397            DevContainerError::FilesystemError
 398        })?;
 399
 400        let dockerfile_path = features_content_dir.join("Dockerfile.extended");
 401        let image_tag =
 402            self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
 403
 404        let build_info = FeaturesBuildInfo {
 405            dockerfile_path,
 406            features_content_dir,
 407            empty_context_dir,
 408            build_image: dev_container.image.clone(),
 409            image_tag,
 410        };
 411
 412        let features = match &dev_container.features {
 413            Some(features) => features,
 414            None => &HashMap::new(),
 415        };
 416
 417        let container_user = get_container_user_from_config(&root_image, self)?;
 418        let remote_user = get_remote_user_from_config(&root_image, self)?;
 419
 420        let builtin_env_content = format!(
 421            "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
 422            container_user, remote_user
 423        );
 424
 425        let builtin_env_path = build_info
 426            .features_content_dir
 427            .join("devcontainer-features.builtin.env");
 428
 429        self.fs
 430            .write(&builtin_env_path, &builtin_env_content.as_bytes())
 431            .await
 432            .map_err(|e| {
 433                log::error!("Failed to write builtin env file: {e}");
 434                DevContainerError::FilesystemError
 435            })?;
 436
 437        let ordered_features =
 438            resolve_feature_order(features, &dev_container.override_feature_install_order);
 439
 440        for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
 441            if matches!(options, FeatureOptions::Bool(false)) {
 442                log::debug!(
 443                    "Feature '{}' is disabled (set to false), skipping",
 444                    feature_ref
 445                );
 446                continue;
 447            }
 448
 449            let feature_id = extract_feature_id(feature_ref);
 450            let consecutive_id = format!("{}_{}", feature_id, index);
 451            let feature_dir = build_info.features_content_dir.join(&consecutive_id);
 452
 453            self.fs.create_dir(&feature_dir).await.map_err(|e| {
 454                log::error!(
 455                    "Failed to create feature directory for {}: {e}",
 456                    feature_ref
 457                );
 458                DevContainerError::FilesystemError
 459            })?;
 460
 461            let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
 462                log::error!(
 463                    "Feature '{}' is not a supported OCI feature reference",
 464                    feature_ref
 465                );
 466                DevContainerError::DevContainerParseFailed
 467            })?;
 468            let TokenResponse { token } =
 469                get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
 470                    .await
 471                    .map_err(|e| {
 472                        log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
 473                        DevContainerError::ResourceFetchFailed
 474                    })?;
 475            let manifest = get_oci_manifest(
 476                &oci_ref.registry,
 477                &oci_ref.path,
 478                &token,
 479                &self.http_client,
 480                &oci_ref.version,
 481                None,
 482            )
 483            .await
 484            .map_err(|e| {
 485                log::error!(
 486                    "Failed to fetch OCI manifest for feature '{}': {e}",
 487                    feature_ref
 488                );
 489                DevContainerError::ResourceFetchFailed
 490            })?;
 491            let digest = &manifest
 492                .layers
 493                .first()
 494                .ok_or_else(|| {
 495                    log::error!(
 496                        "OCI manifest for feature '{}' contains no layers",
 497                        feature_ref
 498                    );
 499                    DevContainerError::ResourceFetchFailed
 500                })?
 501                .digest;
 502            download_oci_tarball(
 503                &token,
 504                &oci_ref.registry,
 505                &oci_ref.path,
 506                digest,
 507                "application/vnd.devcontainers.layer.v1+tar",
 508                &feature_dir,
 509                &self.http_client,
 510                &self.fs,
 511                None,
 512            )
 513            .await?;
 514
 515            let feature_json_path = &feature_dir.join("devcontainer-feature.json");
 516            if !self.fs.is_file(feature_json_path).await {
 517                let message = format!(
 518                    "No devcontainer-feature.json found in {:?}, no defaults to apply",
 519                    feature_json_path
 520                );
 521                log::error!("{}", &message);
 522                return Err(DevContainerError::ResourceFetchFailed);
 523            }
 524
 525            let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
 526                log::error!("error reading devcontainer-feature.json: {:?}", e);
 527                DevContainerError::FilesystemError
 528            })?;
 529
 530            let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
 531
 532            let feature_json: DevContainerFeatureJson =
 533                serde_json_lenient::from_value(contents_parsed).map_err(|e| {
 534                    log::error!("Failed to parse devcontainer-feature.json: {e}");
 535                    DevContainerError::ResourceFetchFailed
 536                })?;
 537
 538            let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
 539
 540            log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
 541
 542            let env_content = feature_manifest
 543                .write_feature_env(&self.fs, options)
 544                .await?;
 545
 546            let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
 547
 548            self.fs
 549                .write(
 550                    &feature_manifest
 551                        .file_path()
 552                        .join("devcontainer-features-install.sh"),
 553                    &wrapper_content.as_bytes(),
 554                )
 555                .await
 556                .map_err(|e| {
 557                    log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
 558                    DevContainerError::FilesystemError
 559                })?;
 560
 561            self.features.push(feature_manifest);
 562        }
 563
 564        // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
 565
 566        let is_compose = match dev_container.build_type() {
 567            DevContainerBuildType::DockerCompose => true,
 568            _ => false,
 569        };
 570        let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
 571
 572        let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
 573            self.fs.load(location).await.log_err()
 574        } else {
 575            None
 576        };
 577
 578        let build_target = if is_compose {
 579            find_primary_service(&self.docker_compose_manifest().await?, self)?
 580                .1
 581                .build
 582                .and_then(|b| b.target)
 583        } else {
 584            dev_container.build.as_ref().and_then(|b| b.target.clone())
 585        };
 586
 587        let dockerfile_content = dockerfile_base_content
 588            .map(|content| {
 589                dockerfile_inject_alias(
 590                    &content,
 591                    "dev_container_auto_added_stage_label",
 592                    build_target,
 593                )
 594            })
 595            .unwrap_or_default();
 596
 597        let dockerfile_content = self.generate_dockerfile_extended(
 598            &container_user,
 599            &remote_user,
 600            dockerfile_content,
 601            use_buildkit,
 602        );
 603
 604        self.fs
 605            .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
 606            .await
 607            .map_err(|e| {
 608                log::error!("Failed to write Dockerfile.extended: {e}");
 609                DevContainerError::FilesystemError
 610            })?;
 611
 612        log::debug!(
 613            "Features build resources written to {:?}",
 614            build_info.features_content_dir
 615        );
 616
 617        self.root_image = Some(root_image);
 618        self.features_build_info = Some(build_info);
 619
 620        Ok(())
 621    }
 622
 623    fn generate_dockerfile_extended(
 624        &self,
 625        container_user: &str,
 626        remote_user: &str,
 627        dockerfile_content: String,
 628        use_buildkit: bool,
 629    ) -> String {
 630        #[cfg(not(target_os = "windows"))]
 631        let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
 632        #[cfg(target_os = "windows")]
 633        let update_remote_user_uid = false;
 634        let feature_layers: String = self
 635            .features
 636            .iter()
 637            .map(|manifest| {
 638                manifest.generate_dockerfile_feature_layer(
 639                    use_buildkit,
 640                    FEATURES_CONTAINER_TEMP_DEST_FOLDER,
 641                )
 642            })
 643            .collect();
 644
 645        let container_home_cmd = get_ent_passwd_shell_command(container_user);
 646        let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
 647
 648        let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
 649
 650        let feature_content_source_stage = if use_buildkit {
 651            "".to_string()
 652        } else {
 653            "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
 654                .to_string()
 655        };
 656
 657        let builtin_env_source_path = if use_buildkit {
 658            "./devcontainer-features.builtin.env"
 659        } else {
 660            "/tmp/build-features/devcontainer-features.builtin.env"
 661        };
 662
 663        let mut extended_dockerfile = format!(
 664            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
 665
 666{dockerfile_content}
 667{feature_content_source_stage}
 668FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
 669USER root
 670COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
 671RUN chmod -R 0755 /tmp/build-features/
 672
 673FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
 674
 675USER root
 676
 677RUN mkdir -p {dest}
 678COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
 679
 680RUN \
 681echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
 682echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
 683
 684{feature_layers}
 685
 686ARG _DEV_CONTAINERS_IMAGE_USER=root
 687USER $_DEV_CONTAINERS_IMAGE_USER
 688"#
 689        );
 690
 691        // If we're not adding a uid update layer, then we should add env vars to this layer instead
 692        if !update_remote_user_uid {
 693            extended_dockerfile = format!(
 694                r#"{extended_dockerfile}
 695# Ensure that /etc/profile does not clobber the existing path
 696RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
 697"#
 698            );
 699
 700            for feature in &self.features {
 701                let container_env_layer = feature.generate_dockerfile_env();
 702                extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
 703            }
 704
 705            if let Some(env) = &self.dev_container().container_env {
 706                for (key, value) in env {
 707                    extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
 708                }
 709            }
 710        }
 711
 712        extended_dockerfile
 713    }
 714
 715    fn build_merged_resources(
 716        &self,
 717        base_image: DockerInspect,
 718    ) -> Result<DockerBuildResources, DevContainerError> {
 719        let dev_container = match &self.config {
 720            ConfigStatus::Deserialized(_) => {
 721                log::error!(
 722                    "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
 723                );
 724                return Err(DevContainerError::DevContainerParseFailed);
 725            }
 726            ConfigStatus::VariableParsed(dev_container) => dev_container,
 727        };
 728        let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
 729
 730        let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
 731
 732        mounts.append(&mut feature_mounts);
 733
 734        let privileged = dev_container.privileged.unwrap_or(false)
 735            || self.features.iter().any(|f| f.privileged());
 736
 737        let mut entrypoint_script_lines = vec![
 738            "echo Container started".to_string(),
 739            "trap \"exit 0\" 15".to_string(),
 740        ];
 741
 742        for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
 743            entrypoint_script_lines.push(entrypoint.clone());
 744        }
 745        entrypoint_script_lines.append(&mut vec![
 746            "exec \"$@\"".to_string(),
 747            "while sleep 1 & wait $!; do :; done".to_string(),
 748        ]);
 749
 750        Ok(DockerBuildResources {
 751            image: base_image,
 752            additional_mounts: mounts,
 753            privileged,
 754            entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
 755        })
 756    }
 757
 758    async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
 759        if let ConfigStatus::Deserialized(_) = &self.config {
 760            log::error!(
 761                "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
 762            );
 763            return Err(DevContainerError::DevContainerParseFailed);
 764        }
 765        let dev_container = self.dev_container();
 766        match dev_container.build_type() {
 767            DevContainerBuildType::Image(base_image) => {
 768                let built_docker_image = self.build_docker_image().await?;
 769
 770                let built_docker_image = self
 771                    .update_remote_user_uid(built_docker_image, &base_image)
 772                    .await?;
 773
 774                let resources = self.build_merged_resources(built_docker_image)?;
 775                Ok(DevContainerBuildResources::Docker(resources))
 776            }
 777            DevContainerBuildType::Dockerfile(_) => {
 778                let built_docker_image = self.build_docker_image().await?;
 779                let Some(features_build_info) = &self.features_build_info else {
 780                    log::error!(
 781                        "Can't attempt to build update UID dockerfile before initial docker build"
 782                    );
 783                    return Err(DevContainerError::DevContainerParseFailed);
 784                };
 785                let built_docker_image = self
 786                    .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
 787                    .await?;
 788
 789                let resources = self.build_merged_resources(built_docker_image)?;
 790                Ok(DevContainerBuildResources::Docker(resources))
 791            }
 792            DevContainerBuildType::DockerCompose => {
 793                log::debug!("Using docker compose. Building extended compose files");
 794                let docker_compose_resources = self.build_and_extend_compose_files().await?;
 795
 796                return Ok(DevContainerBuildResources::DockerCompose(
 797                    docker_compose_resources,
 798                ));
 799            }
 800            DevContainerBuildType::None => {
 801                return Err(DevContainerError::DevContainerParseFailed);
 802            }
 803        }
 804    }
 805
 806    async fn run_dev_container(
 807        &self,
 808        build_resources: DevContainerBuildResources,
 809    ) -> Result<DevContainerUp, DevContainerError> {
 810        let ConfigStatus::VariableParsed(_) = &self.config else {
 811            log::error!(
 812                "Variables have not been parsed; cannot proceed with running the dev container"
 813            );
 814            return Err(DevContainerError::DevContainerParseFailed);
 815        };
 816        let running_container = match build_resources {
 817            DevContainerBuildResources::DockerCompose(resources) => {
 818                self.run_docker_compose(resources).await?
 819            }
 820            DevContainerBuildResources::Docker(resources) => {
 821                self.run_docker_image(resources).await?
 822            }
 823        };
 824
 825        let remote_user = get_remote_user_from_config(&running_container, self)?;
 826        let remote_workspace_folder = self.remote_workspace_folder()?;
 827
 828        let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
 829
 830        Ok(DevContainerUp {
 831            container_id: running_container.id,
 832            remote_user,
 833            remote_workspace_folder: remote_workspace_folder.display().to_string(),
 834            extension_ids: self.extension_ids(),
 835            remote_env,
 836        })
 837    }
 838
 839    async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
 840        let dev_container = match &self.config {
 841            ConfigStatus::Deserialized(_) => {
 842                log::error!(
 843                    "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
 844                );
 845                return Err(DevContainerError::DevContainerParseFailed);
 846            }
 847            ConfigStatus::VariableParsed(dev_container) => dev_container,
 848        };
 849        let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
 850            return Err(DevContainerError::DevContainerParseFailed);
 851        };
 852        // Normalize upfront so every downstream consumer of
 853        // `DockerComposeResources.files` (compose fragment reads, project-name
 854        // derivation, `docker compose -f` invocations, …) sees resolved paths.
 855        // `dockerComposeFile` entries are joined verbatim with
 856        // `config_directory`, so raw entries can carry `..` components.
 857        let docker_compose_full_paths = docker_compose_files
 858            .iter()
 859            .map(|relative| normalize_path(&self.config_directory.join(relative)))
 860            .collect::<Vec<PathBuf>>();
 861
 862        let Some(config) = self
 863            .docker_client
 864            .get_docker_compose_config(&docker_compose_full_paths)
 865            .await?
 866        else {
 867            log::error!("Output could not deserialize into DockerComposeConfig");
 868            return Err(DevContainerError::DevContainerParseFailed);
 869        };
 870        Ok(DockerComposeResources {
 871            files: docker_compose_full_paths,
 872            config,
 873        })
 874    }
 875
 876    async fn build_and_extend_compose_files(
 877        &self,
 878    ) -> Result<DockerComposeResources, DevContainerError> {
 879        let dev_container = match &self.config {
 880            ConfigStatus::Deserialized(_) => {
 881                log::error!(
 882                    "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
 883                );
 884                return Err(DevContainerError::DevContainerParseFailed);
 885            }
 886            ConfigStatus::VariableParsed(dev_container) => dev_container,
 887        };
 888
 889        let Some(features_build_info) = &self.features_build_info else {
 890            log::error!(
 891                "Cannot build and extend compose files: features build info is not yet constructed"
 892            );
 893            return Err(DevContainerError::DevContainerParseFailed);
 894        };
 895        let mut docker_compose_resources = self.docker_compose_manifest().await?;
 896        let supports_buildkit = self.docker_client.supports_compose_buildkit();
 897
 898        let (main_service_name, main_service) =
 899            find_primary_service(&docker_compose_resources, self)?;
 900        let (built_service_image, built_service_image_tag) = if main_service
 901            .build
 902            .as_ref()
 903            .map(|b| b.dockerfile.as_ref())
 904            .is_some()
 905        {
 906            if !supports_buildkit {
 907                self.build_feature_content_image().await?;
 908            }
 909
 910            let dockerfile_path = &features_build_info.dockerfile_path;
 911
 912            let build_args = if !supports_buildkit {
 913                HashMap::from([
 914                    (
 915                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 916                        "dev_container_auto_added_stage_label".to_string(),
 917                    ),
 918                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 919                ])
 920            } else {
 921                HashMap::from([
 922                    ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
 923                    (
 924                        "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
 925                        "dev_container_auto_added_stage_label".to_string(),
 926                    ),
 927                    ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
 928                ])
 929            };
 930
 931            let additional_contexts = if !supports_buildkit {
 932                None
 933            } else {
 934                Some(HashMap::from([(
 935                    "dev_containers_feature_content_source".to_string(),
 936                    features_build_info
 937                        .features_content_dir
 938                        .display()
 939                        .to_string(),
 940                )]))
 941            };
 942
 943            let build_override = DockerComposeConfig {
 944                name: None,
 945                services: HashMap::from([(
 946                    main_service_name.clone(),
 947                    DockerComposeService {
 948                        image: Some(features_build_info.image_tag.clone()),
 949                        entrypoint: None,
 950                        cap_add: None,
 951                        security_opt: None,
 952                        labels: None,
 953                        build: Some(DockerComposeServiceBuild {
 954                            context: Some(
 955                                main_service
 956                                    .build
 957                                    .as_ref()
 958                                    .and_then(|b| b.context.clone())
 959                                    .unwrap_or_else(|| {
 960                                        features_build_info.empty_context_dir.display().to_string()
 961                                    }),
 962                            ),
 963                            dockerfile: Some(dockerfile_path.display().to_string()),
 964                            target: Some("dev_containers_target_stage".to_string()),
 965                            args: Some(build_args),
 966                            additional_contexts,
 967                        }),
 968                        volumes: Vec::new(),
 969                        ..Default::default()
 970                    },
 971                )]),
 972                volumes: HashMap::new(),
 973            };
 974
 975            let temp_base = std::env::temp_dir().join("devcontainer-zed");
 976            let config_location = temp_base.join("docker_compose_build.json");
 977
 978            let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
 979                log::error!("Error serializing docker compose runtime override: {e}");
 980                DevContainerError::DevContainerParseFailed
 981            })?;
 982
 983            self.fs
 984                .write(&config_location, config_json.as_bytes())
 985                .await
 986                .map_err(|e| {
 987                    log::error!("Error writing the runtime override file: {e}");
 988                    DevContainerError::FilesystemError
 989                })?;
 990
 991            docker_compose_resources.files.push(config_location);
 992
 993            let project_name = self.project_name().await?;
 994            self.docker_client
 995                .docker_compose_build(&docker_compose_resources.files, &project_name)
 996                .await?;
 997            (
 998                self.docker_client
 999                    .inspect(&features_build_info.image_tag)
1000                    .await?,
1001                &features_build_info.image_tag,
1002            )
1003        } else if let Some(image) = &main_service.image {
1004            if dev_container
1005                .features
1006                .as_ref()
1007                .is_none_or(|features| features.is_empty())
1008            {
1009                (self.docker_client.inspect(image).await?, image)
1010            } else {
1011                if !supports_buildkit {
1012                    self.build_feature_content_image().await?;
1013                }
1014
1015                let dockerfile_path = &features_build_info.dockerfile_path;
1016
1017                let build_args = if !supports_buildkit {
1018                    HashMap::from([
1019                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1020                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1021                    ])
1022                } else {
1023                    HashMap::from([
1024                        ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
1025                        ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1026                        ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1027                    ])
1028                };
1029
1030                let additional_contexts = if !supports_buildkit {
1031                    None
1032                } else {
1033                    Some(HashMap::from([(
1034                        "dev_containers_feature_content_source".to_string(),
1035                        features_build_info
1036                            .features_content_dir
1037                            .display()
1038                            .to_string(),
1039                    )]))
1040                };
1041
1042                let build_override = DockerComposeConfig {
1043                    name: None,
1044                    services: HashMap::from([(
1045                        main_service_name.clone(),
1046                        DockerComposeService {
1047                            image: Some(features_build_info.image_tag.clone()),
1048                            entrypoint: None,
1049                            cap_add: None,
1050                            security_opt: None,
1051                            labels: None,
1052                            build: Some(DockerComposeServiceBuild {
1053                                context: Some(
1054                                    features_build_info.empty_context_dir.display().to_string(),
1055                                ),
1056                                dockerfile: Some(dockerfile_path.display().to_string()),
1057                                target: Some("dev_containers_target_stage".to_string()),
1058                                args: Some(build_args),
1059                                additional_contexts,
1060                            }),
1061                            volumes: Vec::new(),
1062                            ..Default::default()
1063                        },
1064                    )]),
1065                    volumes: HashMap::new(),
1066                };
1067
1068                let temp_base = std::env::temp_dir().join("devcontainer-zed");
1069                let config_location = temp_base.join("docker_compose_build.json");
1070
1071                let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1072                    log::error!("Error serializing docker compose runtime override: {e}");
1073                    DevContainerError::DevContainerParseFailed
1074                })?;
1075
1076                self.fs
1077                    .write(&config_location, config_json.as_bytes())
1078                    .await
1079                    .map_err(|e| {
1080                        log::error!("Error writing the runtime override file: {e}");
1081                        DevContainerError::FilesystemError
1082                    })?;
1083
1084                docker_compose_resources.files.push(config_location);
1085
1086                let project_name = self.project_name().await?;
1087                self.docker_client
1088                    .docker_compose_build(&docker_compose_resources.files, &project_name)
1089                    .await?;
1090
1091                (
1092                    self.docker_client
1093                        .inspect(&features_build_info.image_tag)
1094                        .await?,
1095                    &features_build_info.image_tag,
1096                )
1097            }
1098        } else {
1099            log::error!("Docker compose must have either image or dockerfile defined");
1100            return Err(DevContainerError::DevContainerParseFailed);
1101        };
1102
1103        let built_service_image = self
1104            .update_remote_user_uid(built_service_image, built_service_image_tag)
1105            .await?;
1106
1107        let resources = self.build_merged_resources(built_service_image)?;
1108
1109        let network_mode = main_service.network_mode.as_ref();
1110        let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1111        let runtime_override_file = self
1112            .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1113            .await?;
1114
1115        docker_compose_resources.files.push(runtime_override_file);
1116
1117        Ok(docker_compose_resources)
1118    }
1119
1120    async fn write_runtime_override_file(
1121        &self,
1122        main_service_name: &str,
1123        network_mode_service: Option<&str>,
1124        resources: DockerBuildResources,
1125    ) -> Result<PathBuf, DevContainerError> {
1126        let config =
1127            self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1128        let temp_base = std::env::temp_dir().join("devcontainer-zed");
1129        let config_location = temp_base.join("docker_compose_runtime.json");
1130
1131        let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1132            log::error!("Error serializing docker compose runtime override: {e}");
1133            DevContainerError::DevContainerParseFailed
1134        })?;
1135
1136        self.fs
1137            .write(&config_location, config_json.as_bytes())
1138            .await
1139            .map_err(|e| {
1140                log::error!("Error writing the runtime override file: {e}");
1141                DevContainerError::FilesystemError
1142            })?;
1143
1144        Ok(config_location)
1145    }
1146
1147    fn build_runtime_override(
1148        &self,
1149        main_service_name: &str,
1150        network_mode_service: Option<&str>,
1151        resources: DockerBuildResources,
1152    ) -> Result<DockerComposeConfig, DevContainerError> {
1153        let mut runtime_labels = HashMap::new();
1154
1155        if let Some(metadata) = &resources.image.config.labels.metadata {
1156            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1157                log::error!("Error serializing docker image metadata: {e}");
1158                DevContainerError::ContainerNotValid(resources.image.id.clone())
1159            })?;
1160
1161            runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1162        }
1163
1164        for (k, v) in self.identifying_labels() {
1165            runtime_labels.insert(k.to_string(), v.to_string());
1166        }
1167
1168        let config_volumes: HashMap<String, DockerComposeVolume> = resources
1169            .additional_mounts
1170            .iter()
1171            .filter_map(|mount| {
1172                if let Some(mount_type) = &mount.mount_type
1173                    && mount_type.to_lowercase() == "volume"
1174                    && let Some(source) = &mount.source
1175                {
1176                    Some((
1177                        source.clone(),
1178                        DockerComposeVolume {
1179                            name: source.clone(),
1180                        },
1181                    ))
1182                } else {
1183                    None
1184                }
1185            })
1186            .collect();
1187
1188        let volumes: Vec<MountDefinition> = resources
1189            .additional_mounts
1190            .iter()
1191            .map(|v| MountDefinition {
1192                source: v.source.clone(),
1193                target: v.target.clone(),
1194                mount_type: v.mount_type.clone(),
1195            })
1196            .collect();
1197
1198        let mut main_service = DockerComposeService {
1199            entrypoint: Some(vec![
1200                "/bin/sh".to_string(),
1201                "-c".to_string(),
1202                resources.entrypoint_script,
1203                "-".to_string(),
1204            ]),
1205            cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1206            security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1207            labels: Some(runtime_labels),
1208            volumes,
1209            privileged: Some(resources.privileged),
1210            ..Default::default()
1211        };
1212        // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1213        let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1214        if let Some(forward_ports) = &self.dev_container().forward_ports {
1215            let main_service_ports: Vec<String> = forward_ports
1216                .iter()
1217                .filter_map(|f| match f {
1218                    ForwardPort::Number(port) => Some(port.to_string()),
1219                    ForwardPort::String(port) => {
1220                        let parts: Vec<&str> = port.split(":").collect();
1221                        if parts.len() <= 1 {
1222                            Some(port.to_string())
1223                        } else if parts.len() == 2 {
1224                            if parts[0] == main_service_name {
1225                                Some(parts[1].to_string())
1226                            } else {
1227                                None
1228                            }
1229                        } else {
1230                            None
1231                        }
1232                    }
1233                })
1234                .collect();
1235            for port in main_service_ports {
1236                // If the main service uses a different service's network bridge, append to that service's ports instead
1237                if let Some(network_service_name) = network_mode_service {
1238                    if let Some(service) = service_declarations.get_mut(network_service_name) {
1239                        service.ports.push(DockerComposeServicePort {
1240                            target: port.clone(),
1241                            published: port.clone(),
1242                            ..Default::default()
1243                        });
1244                    } else {
1245                        service_declarations.insert(
1246                            network_service_name.to_string(),
1247                            DockerComposeService {
1248                                ports: vec![DockerComposeServicePort {
1249                                    target: port.clone(),
1250                                    published: port.clone(),
1251                                    ..Default::default()
1252                                }],
1253                                ..Default::default()
1254                            },
1255                        );
1256                    }
1257                } else {
1258                    main_service.ports.push(DockerComposeServicePort {
1259                        target: port.clone(),
1260                        published: port.clone(),
1261                        ..Default::default()
1262                    });
1263                }
1264            }
1265            let other_service_ports: Vec<(&str, &str)> = forward_ports
1266                .iter()
1267                .filter_map(|f| match f {
1268                    ForwardPort::Number(_) => None,
1269                    ForwardPort::String(port) => {
1270                        let parts: Vec<&str> = port.split(":").collect();
1271                        if parts.len() != 2 {
1272                            None
1273                        } else {
1274                            if parts[0] == main_service_name {
1275                                None
1276                            } else {
1277                                Some((parts[0], parts[1]))
1278                            }
1279                        }
1280                    }
1281                })
1282                .collect();
1283            for (service_name, port) in other_service_ports {
1284                if let Some(service) = service_declarations.get_mut(service_name) {
1285                    service.ports.push(DockerComposeServicePort {
1286                        target: port.to_string(),
1287                        published: port.to_string(),
1288                        ..Default::default()
1289                    });
1290                } else {
1291                    service_declarations.insert(
1292                        service_name.to_string(),
1293                        DockerComposeService {
1294                            ports: vec![DockerComposeServicePort {
1295                                target: port.to_string(),
1296                                published: port.to_string(),
1297                                ..Default::default()
1298                            }],
1299                            ..Default::default()
1300                        },
1301                    );
1302                }
1303            }
1304        }
1305
1306        service_declarations.insert(main_service_name.to_string(), main_service);
1307        let new_docker_compose_config = DockerComposeConfig {
1308            name: None,
1309            services: service_declarations,
1310            volumes: config_volumes,
1311        };
1312
1313        Ok(new_docker_compose_config)
1314    }
1315
1316    async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1317        let dev_container = match &self.config {
1318            ConfigStatus::Deserialized(_) => {
1319                log::error!(
1320                    "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1321                );
1322                return Err(DevContainerError::DevContainerParseFailed);
1323            }
1324            ConfigStatus::VariableParsed(dev_container) => dev_container,
1325        };
1326
1327        match dev_container.build_type() {
1328            DevContainerBuildType::Image(image_tag) => {
1329                let base_image = self.docker_client.inspect(&image_tag).await?;
1330                if dev_container
1331                    .features
1332                    .as_ref()
1333                    .is_none_or(|features| features.is_empty())
1334                {
1335                    log::debug!("No features to add. Using base image");
1336                    return Ok(base_image);
1337                }
1338            }
1339            DevContainerBuildType::Dockerfile(_) => {}
1340            DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1341                return Err(DevContainerError::DevContainerParseFailed);
1342            }
1343        };
1344
1345        let mut command = self.create_docker_build()?;
1346
1347        let output = self
1348            .command_runner
1349            .run_command(&mut command)
1350            .await
1351            .map_err(|e| {
1352                log::error!("Error building docker image: {e}");
1353                DevContainerError::CommandFailed(command.get_program().display().to_string())
1354            })?;
1355
1356        if !output.status.success() {
1357            let stderr = String::from_utf8_lossy(&output.stderr);
1358            log::error!("docker buildx build failed: {stderr}");
1359            return Err(DevContainerError::CommandFailed(
1360                command.get_program().display().to_string(),
1361            ));
1362        }
1363
1364        // After a successful build, inspect the newly tagged image to get its metadata
1365        let Some(features_build_info) = &self.features_build_info else {
1366            log::error!("Features build info expected, but not created");
1367            return Err(DevContainerError::DevContainerParseFailed);
1368        };
1369        let image = self
1370            .docker_client
1371            .inspect(&features_build_info.image_tag)
1372            .await?;
1373
1374        Ok(image)
1375    }
1376
1377    #[cfg(target_os = "windows")]
1378    async fn update_remote_user_uid(
1379        &self,
1380        image: DockerInspect,
1381        _base_image: &str,
1382    ) -> Result<DockerInspect, DevContainerError> {
1383        Ok(image)
1384    }
1385    #[cfg(not(target_os = "windows"))]
1386    async fn update_remote_user_uid(
1387        &self,
1388        image: DockerInspect,
1389        base_image: &str,
1390    ) -> Result<DockerInspect, DevContainerError> {
1391        let dev_container = self.dev_container();
1392
1393        let Some(features_build_info) = &self.features_build_info else {
1394            return Ok(image);
1395        };
1396
1397        // updateRemoteUserUID defaults to true per the devcontainers spec
1398        if dev_container.update_remote_user_uid == Some(false) {
1399            return Ok(image);
1400        }
1401
1402        let remote_user = get_remote_user_from_config(&image, self)?;
1403        if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1404            return Ok(image);
1405        }
1406
1407        let image_user = image
1408            .config
1409            .image_user
1410            .as_deref()
1411            .unwrap_or("root")
1412            .to_string();
1413
1414        let host_uid = Command::new("id")
1415            .arg("-u")
1416            .output()
1417            .await
1418            .map_err(|e| {
1419                log::error!("Failed to get host UID: {e}");
1420                DevContainerError::CommandFailed("id -u".to_string())
1421            })
1422            .and_then(|output| {
1423                String::from_utf8_lossy(&output.stdout)
1424                    .trim()
1425                    .parse::<u32>()
1426                    .map_err(|e| {
1427                        log::error!("Failed to parse host UID: {e}");
1428                        DevContainerError::CommandFailed("id -u".to_string())
1429                    })
1430            })?;
1431
1432        let host_gid = Command::new("id")
1433            .arg("-g")
1434            .output()
1435            .await
1436            .map_err(|e| {
1437                log::error!("Failed to get host GID: {e}");
1438                DevContainerError::CommandFailed("id -g".to_string())
1439            })
1440            .and_then(|output| {
1441                String::from_utf8_lossy(&output.stdout)
1442                    .trim()
1443                    .parse::<u32>()
1444                    .map_err(|e| {
1445                        log::error!("Failed to parse host GID: {e}");
1446                        DevContainerError::CommandFailed("id -g".to_string())
1447                    })
1448            })?;
1449
1450        let dockerfile_content = self.generate_update_uid_dockerfile();
1451
1452        let dockerfile_path = features_build_info
1453            .features_content_dir
1454            .join("updateUID.Dockerfile");
1455        self.fs
1456            .write(&dockerfile_path, dockerfile_content.as_bytes())
1457            .await
1458            .map_err(|e| {
1459                log::error!("Failed to write updateUID Dockerfile: {e}");
1460                DevContainerError::FilesystemError
1461            })?;
1462
1463        let updated_image_tag = features_build_info.image_tag.clone();
1464
1465        let mut command = Command::new(self.docker_client.docker_cli());
1466        command.args(["build"]);
1467        command.args(["-f", &dockerfile_path.display().to_string()]);
1468        command.args(["-t", &updated_image_tag]);
1469        command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1470        command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1471        command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1472        command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1473        command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1474        command.arg(features_build_info.empty_context_dir.display().to_string());
1475
1476        let output = self
1477            .command_runner
1478            .run_command(&mut command)
1479            .await
1480            .map_err(|e| {
1481                log::error!("Error building UID update image: {e}");
1482                DevContainerError::CommandFailed(command.get_program().display().to_string())
1483            })?;
1484
1485        if !output.status.success() {
1486            let stderr = String::from_utf8_lossy(&output.stderr);
1487            log::error!("UID update build failed: {stderr}");
1488            return Err(DevContainerError::CommandFailed(
1489                command.get_program().display().to_string(),
1490            ));
1491        }
1492
1493        self.docker_client.inspect(&updated_image_tag).await
1494    }
1495
1496    #[cfg(not(target_os = "windows"))]
1497    fn generate_update_uid_dockerfile(&self) -> String {
1498        let mut dockerfile = r#"ARG BASE_IMAGE
1499FROM $BASE_IMAGE
1500
1501USER root
1502
1503ARG REMOTE_USER
1504ARG NEW_UID
1505ARG NEW_GID
1506SHELL ["/bin/sh", "-c"]
1507RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1508	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1509	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1510	if [ -z "$OLD_UID" ]; then \
1511		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1512	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1513		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1514	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1515		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1516	else \
1517		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1518			FREE_GID=65532; \
1519			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1520			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1521			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1522		fi; \
1523		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1524		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1525		if [ "$OLD_GID" != "$NEW_GID" ]; then \
1526			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1527		fi; \
1528		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1529	fi;
1530
1531ARG IMAGE_USER
1532USER $IMAGE_USER
1533
1534# Ensure that /etc/profile does not clobber the existing path
1535RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1536"#.to_string();
1537        for feature in &self.features {
1538            let container_env_layer = feature.generate_dockerfile_env();
1539            dockerfile = format!("{dockerfile}\n{container_env_layer}");
1540        }
1541
1542        if let Some(env) = &self.dev_container().container_env {
1543            for (key, value) in env {
1544                dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1545            }
1546        }
1547        dockerfile
1548    }
1549
1550    async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1551        let Some(features_build_info) = &self.features_build_info else {
1552            log::error!("Features build info not available for building feature content image");
1553            return Err(DevContainerError::DevContainerParseFailed);
1554        };
1555        let features_content_dir = &features_build_info.features_content_dir;
1556
1557        let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1558        let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1559
1560        self.fs
1561            .write(&dockerfile_path, dockerfile_content.as_bytes())
1562            .await
1563            .map_err(|e| {
1564                log::error!("Failed to write feature content Dockerfile: {e}");
1565                DevContainerError::FilesystemError
1566            })?;
1567
1568        let mut command = Command::new(self.docker_client.docker_cli());
1569        command.args([
1570            "build",
1571            "-t",
1572            "dev_container_feature_content_temp",
1573            "-f",
1574            &dockerfile_path.display().to_string(),
1575            &features_content_dir.display().to_string(),
1576        ]);
1577
1578        let output = self
1579            .command_runner
1580            .run_command(&mut command)
1581            .await
1582            .map_err(|e| {
1583                log::error!("Error building feature content image: {e}");
1584                DevContainerError::CommandFailed(self.docker_client.docker_cli())
1585            })?;
1586
1587        if !output.status.success() {
1588            let stderr = String::from_utf8_lossy(&output.stderr);
1589            log::error!("Feature content image build failed: {stderr}");
1590            return Err(DevContainerError::CommandFailed(
1591                self.docker_client.docker_cli(),
1592            ));
1593        }
1594
1595        Ok(())
1596    }
1597
1598    fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1599        let dev_container = match &self.config {
1600            ConfigStatus::Deserialized(_) => {
1601                log::error!(
1602                    "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1603                );
1604                return Err(DevContainerError::DevContainerParseFailed);
1605            }
1606            ConfigStatus::VariableParsed(dev_container) => dev_container,
1607        };
1608
1609        let Some(features_build_info) = &self.features_build_info else {
1610            log::error!(
1611                "Cannot create docker build command; features build info has not been constructed"
1612            );
1613            return Err(DevContainerError::DevContainerParseFailed);
1614        };
1615        let mut command = Command::new(self.docker_client.docker_cli());
1616
1617        command.args(["buildx", "build"]);
1618
1619        // --load is short for --output=docker, loading the built image into the local docker images
1620        command.arg("--load");
1621
1622        // BuildKit build context: provides the features content directory as a named context
1623        // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1624        command.args([
1625            "--build-context",
1626            &format!(
1627                "dev_containers_feature_content_source={}",
1628                features_build_info.features_content_dir.display()
1629            ),
1630        ]);
1631
1632        // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1633        if let Some(build_image) = &features_build_info.build_image {
1634            command.args([
1635                "--build-arg",
1636                &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1637            ]);
1638        } else {
1639            command.args([
1640                "--build-arg",
1641                "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1642            ]);
1643        }
1644
1645        command.args([
1646            "--build-arg",
1647            &format!(
1648                "_DEV_CONTAINERS_IMAGE_USER={}",
1649                self.root_image
1650                    .as_ref()
1651                    .and_then(|docker_image| docker_image.config.image_user.as_ref())
1652                    .unwrap_or(&"root".to_string())
1653            ),
1654        ]);
1655
1656        command.args([
1657            "--build-arg",
1658            "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1659        ]);
1660
1661        if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1662            for (key, value) in args {
1663                command.args(["--build-arg", &format!("{}={}", key, value)]);
1664            }
1665        }
1666
1667        if let Some(options) = dev_container
1668            .build
1669            .as_ref()
1670            .and_then(|b| b.options.as_ref())
1671        {
1672            for option in options {
1673                command.arg(option);
1674            }
1675        }
1676
1677        if let Some(cache_from_images) = dev_container
1678            .build
1679            .as_ref()
1680            .and_then(|b| b.cache_from.as_ref())
1681        {
1682            for cache_from_image in cache_from_images {
1683                command.args(["--cache-from", cache_from_image]);
1684            }
1685        }
1686
1687        command.args(["--target", "dev_containers_target_stage"]);
1688
1689        command.args([
1690            "-f",
1691            &features_build_info.dockerfile_path.display().to_string(),
1692        ]);
1693
1694        command.args(["-t", &features_build_info.image_tag]);
1695
1696        if let DevContainerBuildType::Dockerfile(build) = dev_container.build_type() {
1697            command.arg(self.calculate_context_dir(build).display().to_string());
1698        } else {
1699            // Use an empty folder as the build context to avoid pulling in unneeded files.
1700            // The actual feature content is supplied via the BuildKit build context above.
1701            command.arg(features_build_info.empty_context_dir.display().to_string());
1702        }
1703
1704        Ok(command)
1705    }
1706
1707    async fn run_docker_compose(
1708        &self,
1709        resources: DockerComposeResources,
1710    ) -> Result<DockerInspect, DevContainerError> {
1711        let mut command = Command::new(self.docker_client.docker_cli());
1712        let project_name = self.project_name().await?;
1713        command.args(&["compose", "--project-name", &project_name]);
1714        for docker_compose_file in resources.files {
1715            command.args(&["-f", &docker_compose_file.display().to_string()]);
1716        }
1717        command.args(&["up", "-d"]);
1718
1719        let output = self
1720            .command_runner
1721            .run_command(&mut command)
1722            .await
1723            .map_err(|e| {
1724                log::error!("Error running docker compose up: {e}");
1725                DevContainerError::CommandFailed(command.get_program().display().to_string())
1726            })?;
1727
1728        if !output.status.success() {
1729            let stderr = String::from_utf8_lossy(&output.stderr);
1730            log::error!("Non-success status from docker compose up: {}", stderr);
1731            return Err(DevContainerError::CommandFailed(
1732                command.get_program().display().to_string(),
1733            ));
1734        }
1735
1736        if let Some(docker_ps) = self.check_for_existing_container().await? {
1737            log::debug!("Found newly created dev container");
1738            return self.docker_client.inspect(&docker_ps.id).await;
1739        }
1740
1741        log::error!("Could not find existing container after docker compose up");
1742
1743        Err(DevContainerError::DevContainerParseFailed)
1744    }
1745
1746    async fn run_docker_image(
1747        &self,
1748        build_resources: DockerBuildResources,
1749    ) -> Result<DockerInspect, DevContainerError> {
1750        let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1751
1752        let output = self
1753            .command_runner
1754            .run_command(&mut docker_run_command)
1755            .await
1756            .map_err(|e| {
1757                log::error!("Error running docker run: {e}");
1758                DevContainerError::CommandFailed(
1759                    docker_run_command.get_program().display().to_string(),
1760                )
1761            })?;
1762
1763        if !output.status.success() {
1764            let std_err = String::from_utf8_lossy(&output.stderr);
1765            log::error!("Non-success status from docker run. StdErr: {std_err}");
1766            return Err(DevContainerError::CommandFailed(
1767                docker_run_command.get_program().display().to_string(),
1768            ));
1769        }
1770
1771        log::debug!("Checking for container that was started");
1772        let Some(docker_ps) = self.check_for_existing_container().await? else {
1773            log::error!("Could not locate container just created");
1774            return Err(DevContainerError::DevContainerParseFailed);
1775        };
1776        self.docker_client.inspect(&docker_ps.id).await
1777    }
1778
1779    fn local_workspace_folder(&self) -> String {
1780        self.local_project_directory.display().to_string()
1781    }
1782    fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1783        self.local_project_directory
1784            .file_name()
1785            .map(|f| f.display().to_string())
1786            .ok_or(DevContainerError::DevContainerParseFailed)
1787    }
1788
1789    fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1790        self.dev_container()
1791            .workspace_folder
1792            .as_ref()
1793            .map(|folder| PathBuf::from(folder))
1794            .or(Some(
1795                // We explicitly use "/" here, instead of PathBuf::join
1796                // because we want remote targets to use unix-style filepaths,
1797                // even on a Windows host
1798                PathBuf::from(format!(
1799                    "{}/{}",
1800                    DEFAULT_REMOTE_PROJECT_DIR,
1801                    self.local_workspace_base_name()?
1802                )),
1803            ))
1804            .ok_or(DevContainerError::DevContainerParseFailed)
1805    }
1806    fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1807        self.remote_workspace_folder().and_then(|f| {
1808            f.file_name()
1809                .map(|file_name| file_name.display().to_string())
1810                .ok_or(DevContainerError::DevContainerParseFailed)
1811        })
1812    }
1813
1814    fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1815        if let Some(mount) = &self.dev_container().workspace_mount {
1816            return Ok(mount.clone());
1817        }
1818        let Some(project_directory_name) = self.local_project_directory.file_name() else {
1819            return Err(DevContainerError::DevContainerParseFailed);
1820        };
1821
1822        Ok(MountDefinition {
1823            source: Some(self.local_workspace_folder()),
1824            // We explicitly use "/" here, instead of PathBuf::join
1825            // because we want the remote target to use unix-style filepaths,
1826            // even on a Windows host
1827            target: format!(
1828                "{}/{}",
1829                PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1830                project_directory_name.display()
1831            ),
1832            mount_type: None,
1833        })
1834    }
1835
1836    fn create_docker_run_command(
1837        &self,
1838        build_resources: DockerBuildResources,
1839    ) -> Result<Command, DevContainerError> {
1840        let remote_workspace_mount = self.remote_workspace_mount()?;
1841
1842        let docker_cli = self.docker_client.docker_cli();
1843        let mut command = Command::new(&docker_cli);
1844
1845        command.arg("run");
1846
1847        if build_resources.privileged {
1848            command.arg("--privileged");
1849        }
1850
1851        let run_args = match &self.dev_container().run_args {
1852            Some(run_args) => run_args,
1853            None => &Vec::new(),
1854        };
1855
1856        for arg in run_args {
1857            command.arg(arg);
1858        }
1859
1860        let run_if_missing = {
1861            |arg_name: &str, arg: &str, command: &mut Command| {
1862                if !run_args
1863                    .iter()
1864                    .any(|arg| arg.strip_prefix(arg_name).is_some())
1865                {
1866                    command.arg(arg);
1867                }
1868            }
1869        };
1870
1871        if &docker_cli == "podman" {
1872            run_if_missing(
1873                "--security-opt",
1874                "--security-opt=label=disable",
1875                &mut command,
1876            );
1877            run_if_missing("--userns", "--userns=keep-id", &mut command);
1878        }
1879
1880        run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1881        command.arg("-d");
1882        command.arg("--mount");
1883        command.arg(remote_workspace_mount.to_string());
1884
1885        for mount in &build_resources.additional_mounts {
1886            command.arg("--mount");
1887            command.arg(mount.to_string());
1888        }
1889
1890        for (key, val) in self.identifying_labels() {
1891            command.arg("-l");
1892            command.arg(format!("{}={}", key, val));
1893        }
1894
1895        if let Some(metadata) = &build_resources.image.config.labels.metadata {
1896            let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1897                log::error!("Problem serializing image metadata: {e}");
1898                DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1899            })?;
1900            command.arg("-l");
1901            command.arg(format!(
1902                "{}={}",
1903                "devcontainer.metadata", serialized_metadata
1904            ));
1905        }
1906
1907        if let Some(forward_ports) = &self.dev_container().forward_ports {
1908            for port in forward_ports {
1909                if let ForwardPort::Number(port_number) = port {
1910                    command.arg("-p");
1911                    command.arg(format!("{port_number}:{port_number}"));
1912                }
1913            }
1914        }
1915        for app_port in &self.dev_container().app_port {
1916            command.arg("-p");
1917            command.arg(app_port);
1918        }
1919
1920        command.arg("--entrypoint");
1921        command.arg("/bin/sh");
1922        command.arg(&build_resources.image.id);
1923        command.arg("-c");
1924
1925        command.arg(build_resources.entrypoint_script);
1926        command.arg("-");
1927
1928        Ok(command)
1929    }
1930
1931    fn extension_ids(&self) -> Vec<String> {
1932        self.dev_container()
1933            .customizations
1934            .as_ref()
1935            .map(|c| c.zed.extensions.clone())
1936            .unwrap_or_default()
1937    }
1938
1939    async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1940        self.dev_container().validate_devcontainer_contents()?;
1941
1942        self.run_initialize_commands().await?;
1943
1944        self.download_feature_and_dockerfile_resources().await?;
1945
1946        let build_resources = self.build_resources().await?;
1947
1948        let devcontainer_up = self.run_dev_container(build_resources).await?;
1949
1950        self.run_remote_scripts(&devcontainer_up, true).await?;
1951
1952        Ok(devcontainer_up)
1953    }
1954
1955    async fn run_remote_scripts(
1956        &self,
1957        devcontainer_up: &DevContainerUp,
1958        new_container: bool,
1959    ) -> Result<(), DevContainerError> {
1960        let ConfigStatus::VariableParsed(config) = &self.config else {
1961            log::error!("Config not yet parsed, cannot proceed with remote scripts");
1962            return Err(DevContainerError::DevContainerScriptsFailed);
1963        };
1964        let remote_folder = self.remote_workspace_folder()?.display().to_string();
1965
1966        if new_container {
1967            if let Some(on_create_command) = &config.on_create_command {
1968                for (command_name, command) in on_create_command.script_commands() {
1969                    log::debug!("Running on create command {command_name}");
1970                    self.docker_client
1971                        .run_docker_exec(
1972                            &devcontainer_up.container_id,
1973                            &remote_folder,
1974                            &devcontainer_up.remote_user,
1975                            &devcontainer_up.remote_env,
1976                            command,
1977                        )
1978                        .await?;
1979                }
1980            }
1981            if let Some(update_content_command) = &config.update_content_command {
1982                for (command_name, command) in update_content_command.script_commands() {
1983                    log::debug!("Running update content command {command_name}");
1984                    self.docker_client
1985                        .run_docker_exec(
1986                            &devcontainer_up.container_id,
1987                            &remote_folder,
1988                            &devcontainer_up.remote_user,
1989                            &devcontainer_up.remote_env,
1990                            command,
1991                        )
1992                        .await?;
1993                }
1994            }
1995
1996            if let Some(post_create_command) = &config.post_create_command {
1997                for (command_name, command) in post_create_command.script_commands() {
1998                    log::debug!("Running post create command {command_name}");
1999                    self.docker_client
2000                        .run_docker_exec(
2001                            &devcontainer_up.container_id,
2002                            &remote_folder,
2003                            &devcontainer_up.remote_user,
2004                            &devcontainer_up.remote_env,
2005                            command,
2006                        )
2007                        .await?;
2008                }
2009            }
2010            if let Some(post_start_command) = &config.post_start_command {
2011                for (command_name, command) in post_start_command.script_commands() {
2012                    log::debug!("Running post start command {command_name}");
2013                    self.docker_client
2014                        .run_docker_exec(
2015                            &devcontainer_up.container_id,
2016                            &remote_folder,
2017                            &devcontainer_up.remote_user,
2018                            &devcontainer_up.remote_env,
2019                            command,
2020                        )
2021                        .await?;
2022                }
2023            }
2024        }
2025        if let Some(post_attach_command) = &config.post_attach_command {
2026            for (command_name, command) in post_attach_command.script_commands() {
2027                log::debug!("Running post attach command {command_name}");
2028                self.docker_client
2029                    .run_docker_exec(
2030                        &devcontainer_up.container_id,
2031                        &remote_folder,
2032                        &devcontainer_up.remote_user,
2033                        &devcontainer_up.remote_env,
2034                        command,
2035                    )
2036                    .await?;
2037            }
2038        }
2039
2040        Ok(())
2041    }
2042
2043    async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
2044        let ConfigStatus::VariableParsed(config) = &self.config else {
2045            log::error!("Config not yet parsed, cannot proceed with initializeCommand");
2046            return Err(DevContainerError::DevContainerParseFailed);
2047        };
2048
2049        if let Some(initialize_command) = &config.initialize_command {
2050            log::debug!("Running initialize command");
2051            initialize_command
2052                .run(&self.command_runner, &self.local_project_directory)
2053                .await
2054        } else {
2055            log::warn!("No initialize command found");
2056            Ok(())
2057        }
2058    }
2059
2060    async fn check_for_existing_devcontainer(
2061        &self,
2062    ) -> Result<Option<DevContainerUp>, DevContainerError> {
2063        if let Some(docker_ps) = self.check_for_existing_container().await? {
2064            log::debug!("Dev container already found. Proceeding with it");
2065
2066            let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
2067
2068            if !docker_inspect.is_running() {
2069                log::debug!("Container not running. Will attempt to start, and then proceed");
2070                self.docker_client.start_container(&docker_ps.id).await?;
2071            }
2072
2073            let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
2074
2075            let remote_folder = self.remote_workspace_folder()?;
2076
2077            let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
2078
2079            let dev_container_up = DevContainerUp {
2080                container_id: docker_ps.id,
2081                remote_user: remote_user,
2082                remote_workspace_folder: remote_folder.display().to_string(),
2083                extension_ids: self.extension_ids(),
2084                remote_env,
2085            };
2086
2087            self.run_remote_scripts(&dev_container_up, false).await?;
2088
2089            Ok(Some(dev_container_up))
2090        } else {
2091            log::debug!("Existing container not found.");
2092
2093            Ok(None)
2094        }
2095    }
2096
2097    async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2098        self.docker_client
2099            .find_process_by_filters(
2100                self.identifying_labels()
2101                    .iter()
2102                    .map(|(k, v)| format!("label={k}={v}"))
2103                    .collect(),
2104            )
2105            .await
2106    }
2107
2108    /// Matches `@devcontainers/cli`'s `getProjectName` in
2109    /// `src/spec-node/dockerCompose.ts`. See `derive_project_name` for the
2110    /// full precedence. Using the devcontainer.json `name` field here
2111    /// diverges from the reference CLI and creates duplicate compose
2112    /// projects when the same folder is opened by both tools — see #54255.
2113    ///
2114    /// Async because the derivation reads both the workspace `.env` file
2115    /// and the merged compose config — neither of which is available
2116    /// synchronously.
2117    async fn project_name(&self) -> Result<String, DevContainerError> {
2118        let workspace_fallback = self
2119            .local_workspace_base_name()
2120            .unwrap_or_else(|_| self.local_workspace_folder());
2121        let compose_resources = self.docker_compose_manifest().await.ok();
2122        let first_compose_file = compose_resources
2123            .as_ref()
2124            .and_then(|r| r.files.first())
2125            .map(PathBuf::as_path);
2126        let compose_config_name = compose_resources
2127            .as_ref()
2128            .and_then(|r| r.config.name.as_deref());
2129        let mut compose_name_explicitly_declared = false;
2130        if let Some(resources) = &compose_resources {
2131            for file in &resources.files {
2132                // Mirrors the CLI's fragment re-parse (dockerCompose.ts 663-673):
2133                // the whole readFile+yaml.load pair is wrapped in a single
2134                // try/catch that swallows every failure. The comment there
2135                // calls out `!reset` custom tags; the behavior is "on any
2136                // failure, treat the fragment as not-declared and keep
2137                // scanning." Propagating an I/O error here would diverge
2138                // from that policy and fail the whole devcontainer flow for
2139                // a fragment the CLI would have silently skipped.
2140                let contents = match self.fs.load(file).await {
2141                    Ok(contents) => contents,
2142                    Err(err) => {
2143                        log::warn!(
2144                            "Ignoring unreadable compose fragment `{}` while deriving project name: {err:?}",
2145                            file.display()
2146                        );
2147                        continue;
2148                    }
2149                };
2150                if compose_fragment_declares_name(&contents) {
2151                    compose_name_explicitly_declared = true;
2152                    break;
2153                }
2154            }
2155        }
2156        let dotenv_path = self.local_project_directory.join(".env");
2157        let dotenv_contents = match self.fs.load(&dotenv_path).await {
2158            Ok(contents) => Some(contents),
2159            Err(err) if is_missing_file_error(&err) => None,
2160            Err(err) => {
2161                // Mirrors the CLI: `getProjectName` only swallows `ENOENT`/
2162                // `EISDIR` on the `.env` read. Any other error (permission
2163                // denied, I/O failure, …) must surface so we don't silently
2164                // fall back to a non-canonical project name and create a
2165                // second compose project for the same repo.
2166                log::error!(
2167                    "Failed to read workspace .env `{}` while deriving project name: {err:?}",
2168                    dotenv_path.display()
2169                );
2170                return Err(DevContainerError::FilesystemError);
2171            }
2172        };
2173        Ok(derive_project_name(
2174            &self.local_environment,
2175            dotenv_contents.as_deref(),
2176            compose_config_name,
2177            compose_name_explicitly_declared,
2178            first_compose_file,
2179            &self.local_project_directory,
2180            &workspace_fallback,
2181        ))
2182    }
2183
2184    async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2185        let Some(dockerfile_path) = self.dockerfile_location().await else {
2186            log::error!("Tried to expand dockerfile for an image-type config");
2187            return Err(DevContainerError::DevContainerParseFailed);
2188        };
2189
2190        // For docker-compose configs the build args live on the primary
2191        // compose service rather than on dev_container.build.
2192        let devcontainer_args = match self.dev_container().build_type() {
2193            DevContainerBuildType::DockerCompose => {
2194                let compose = self.docker_compose_manifest().await?;
2195                find_primary_service(&compose, self)?
2196                    .1
2197                    .build
2198                    .and_then(|b| b.args)
2199                    .unwrap_or_default()
2200            }
2201            _ => self
2202                .dev_container()
2203                .build
2204                .as_ref()
2205                .and_then(|b| b.args.clone())
2206                .unwrap_or_default(),
2207        };
2208        let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2209            log::error!("Failed to load Dockerfile: {e}");
2210            DevContainerError::FilesystemError
2211        })?;
2212        let mut parsed_lines: Vec<String> = Vec::new();
2213        let mut inline_args: Vec<(String, String)> = Vec::new();
2214        let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2215
2216        for line in contents.lines() {
2217            let mut parsed_line = line.to_string();
2218            // Replace from devcontainer args first, since they take precedence
2219            for (key, value) in &devcontainer_args {
2220                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2221            }
2222            for (key, value) in &inline_args {
2223                parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2224            }
2225            if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2226                let trimmed = arg_directives.trim();
2227                let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2228                for (i, captures) in key_matches.iter().enumerate() {
2229                    let key = captures[1].to_string();
2230                    // Insert the devcontainer overrides here if needed
2231                    let value_start = captures.get(0).expect("full match").end();
2232                    let value_end = if i + 1 < key_matches.len() {
2233                        key_matches[i + 1].get(0).expect("full match").start()
2234                    } else {
2235                        trimmed.len()
2236                    };
2237                    let raw_value = trimmed[value_start..value_end].trim();
2238                    let value = if raw_value.starts_with('"')
2239                        && raw_value.ends_with('"')
2240                        && raw_value.len() > 1
2241                    {
2242                        &raw_value[1..raw_value.len() - 1]
2243                    } else {
2244                        raw_value
2245                    };
2246                    inline_args.push((key, value.to_string()));
2247                }
2248            }
2249            parsed_lines.push(parsed_line);
2250        }
2251
2252        Ok(parsed_lines.join("\n"))
2253    }
2254
2255    fn calculate_context_dir(&self, build: ContainerBuild) -> PathBuf {
2256        let Some(context) = build.context else {
2257            return self.config_directory.clone();
2258        };
2259        let context_path = PathBuf::from(context);
2260
2261        if context_path.is_absolute() {
2262            context_path
2263        } else {
2264            self.config_directory.join(context_path)
2265        }
2266    }
2267}
2268
2269/// Holds all the information needed to construct a `docker buildx build` command
2270/// that extends a base image with dev container features.
2271///
2272/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2273/// (cli/src/spec-node/containerFeatures.ts).
2274#[derive(Debug, Eq, PartialEq)]
2275pub(crate) struct FeaturesBuildInfo {
2276    /// Path to the generated Dockerfile.extended
2277    pub dockerfile_path: PathBuf,
2278    /// Path to the features content directory (used as a BuildKit build context)
2279    pub features_content_dir: PathBuf,
2280    /// Path to an empty directory used as the Docker build context
2281    pub empty_context_dir: PathBuf,
2282    /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2283    pub build_image: Option<String>,
2284    /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2285    pub image_tag: String,
2286}
2287
2288pub(crate) async fn read_devcontainer_configuration(
2289    config: DevContainerConfig,
2290    context: &DevContainerContext,
2291    environment: HashMap<String, String>,
2292) -> Result<DevContainer, DevContainerError> {
2293    let docker = if context.use_podman {
2294        Docker::new("podman").await
2295    } else {
2296        Docker::new("docker").await
2297    };
2298    let mut dev_container = DevContainerManifest::new(
2299        context,
2300        environment,
2301        Arc::new(docker),
2302        Arc::new(DefaultCommandRunner::new()),
2303        config,
2304        &context.project_directory.as_ref(),
2305    )
2306    .await?;
2307    dev_container.parse_nonremote_vars()?;
2308    Ok(dev_container.dev_container().clone())
2309}
2310
2311pub(crate) async fn spawn_dev_container(
2312    context: &DevContainerContext,
2313    environment: HashMap<String, String>,
2314    config: DevContainerConfig,
2315    local_project_path: &Path,
2316) -> Result<DevContainerUp, DevContainerError> {
2317    let docker = if context.use_podman {
2318        Docker::new("podman").await
2319    } else {
2320        Docker::new("docker").await
2321    };
2322    let mut devcontainer_manifest = DevContainerManifest::new(
2323        context,
2324        environment,
2325        Arc::new(docker),
2326        Arc::new(DefaultCommandRunner::new()),
2327        config,
2328        local_project_path,
2329    )
2330    .await?;
2331
2332    devcontainer_manifest.parse_nonremote_vars()?;
2333
2334    log::debug!("Checking for existing container");
2335    if let Some(devcontainer) = devcontainer_manifest
2336        .check_for_existing_devcontainer()
2337        .await?
2338    {
2339        Ok(devcontainer)
2340    } else {
2341        log::debug!("Existing container not found. Building");
2342
2343        devcontainer_manifest.build_and_run().await
2344    }
2345}
2346
2347#[derive(Debug)]
2348struct DockerBuildResources {
2349    image: DockerInspect,
2350    additional_mounts: Vec<MountDefinition>,
2351    privileged: bool,
2352    entrypoint_script: String,
2353}
2354
2355#[derive(Debug)]
2356enum DevContainerBuildResources {
2357    DockerCompose(DockerComposeResources),
2358    Docker(DockerBuildResources),
2359}
2360
2361fn find_primary_service(
2362    docker_compose: &DockerComposeResources,
2363    devcontainer: &DevContainerManifest,
2364) -> Result<(String, DockerComposeService), DevContainerError> {
2365    let Some(service_name) = &devcontainer.dev_container().service else {
2366        return Err(DevContainerError::DevContainerParseFailed);
2367    };
2368
2369    match docker_compose.config.services.get(service_name) {
2370        Some(service) => Ok((service_name.clone(), service.clone())),
2371        None => Err(DevContainerError::DevContainerParseFailed),
2372    }
2373}
2374
2375/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2376/// `dockerfile` is relative to the build `context`, and `context` is relative to
2377/// the compose file's directory.
2378fn resolve_compose_dockerfile(
2379    compose_file: &Path,
2380    context: Option<&str>,
2381    dockerfile: &str,
2382) -> Option<PathBuf> {
2383    let dockerfile = PathBuf::from(dockerfile);
2384    if dockerfile.is_absolute() {
2385        return Some(dockerfile);
2386    }
2387    let compose_dir = compose_file.parent()?;
2388    let context_dir = match context {
2389        Some(ctx) => {
2390            let ctx = PathBuf::from(ctx);
2391            if ctx.is_absolute() {
2392                ctx
2393            } else {
2394                normalize_path(&compose_dir.join(ctx))
2395            }
2396        }
2397        None => compose_dir.to_path_buf(),
2398    };
2399    Some(context_dir.join(dockerfile))
2400}
2401
2402/// Destination folder inside the container where feature content is staged during build.
2403/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2404const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2405
2406/// Escapes regex special characters in a string.
2407fn escape_regex_chars(input: &str) -> String {
2408    let mut result = String::with_capacity(input.len() * 2);
2409    for c in input.chars() {
2410        if ".*+?^${}()|[]\\".contains(c) {
2411            result.push('\\');
2412        }
2413        result.push(c);
2414    }
2415    result
2416}
2417
2418/// Sanitize a string for use as a Docker Compose project name, matching
2419/// `@devcontainers/cli`'s `toProjectName` (modern Compose branch): lowercase
2420/// the input and strip any character outside `[-_a-z0-9]`.
2421fn sanitize_compose_project_name(input: &str) -> String {
2422    input
2423        .chars()
2424        .flat_map(|c| c.to_lowercase())
2425        .filter(|c| c.is_ascii_digit() || c.is_ascii_lowercase() || *c == '-' || *c == '_')
2426        .collect()
2427}
2428
2429/// Derive the Docker Compose project name, mirroring `getProjectName` in
2430/// `@devcontainers/cli`'s `src/spec-node/dockerCompose.ts`. Precedence:
2431///
2432/// 1. `COMPOSE_PROJECT_NAME` from the local environment.
2433/// 2. `COMPOSE_PROJECT_NAME` from the workspace `.env` file.
2434/// 3. The top-level `name:` field of the merged compose config, but only
2435///    when at least one compose fragment explicitly declared `name:`.
2436///    Compose injects a default `name: devcontainer` into its merged
2437///    output whenever no fragment declared one — that default must NOT be
2438///    treated as a user-provided name, so rule 4 applies instead.
2439/// 4. Basename of the first compose file's directory, appending
2440///    `_devcontainer` only when that directory is
2441///    `<workspace_root>/.devcontainer`.
2442///
2443/// The caller is responsible for computing `compose_name_explicitly_declared`
2444/// by scanning the original compose fragments for a top-level `name:` key
2445/// (the reference CLI does the same). This keeps the helper a pure function
2446/// of its inputs.
2447///
2448/// All branches pass through `sanitize_compose_project_name` — the CLI's
2449/// final normalization step.
2450fn derive_project_name(
2451    local_environment: &HashMap<String, String>,
2452    workspace_dotenv_contents: Option<&str>,
2453    compose_config_name: Option<&str>,
2454    compose_name_explicitly_declared: bool,
2455    first_compose_file: Option<&Path>,
2456    workspace_root: &Path,
2457    workspace_fallback: &str,
2458) -> String {
2459    if let Some(env_name) = local_environment.get("COMPOSE_PROJECT_NAME")
2460        && !env_name.is_empty()
2461    {
2462        return sanitize_compose_project_name(env_name);
2463    }
2464    if let Some(contents) = workspace_dotenv_contents
2465        && let Some(dotenv_name) = parse_dotenv_compose_project_name(contents)
2466        && !dotenv_name.is_empty()
2467    {
2468        return sanitize_compose_project_name(&dotenv_name);
2469    }
2470    if let Some(name) = compose_config_name
2471        && !name.is_empty()
2472        && compose_name_explicitly_declared
2473    {
2474        return sanitize_compose_project_name(name);
2475    }
2476    let compose_dir = first_compose_file.and_then(Path::parent);
2477    let canonical_devcontainer_dir = normalize_path(&workspace_root.join(".devcontainer"));
2478    let raw = match compose_dir {
2479        Some(dir) if dir == canonical_devcontainer_dir => {
2480            // Matches the CLI's `configDir/.devcontainer` branch: use the
2481            // *workspace root's* basename with the `_devcontainer` suffix,
2482            // NOT the `.devcontainer` dir's basename.
2483            format!("{workspace_fallback}_devcontainer")
2484        }
2485        Some(dir) => dir
2486            .file_name()
2487            .map(|f| f.to_string_lossy().into_owned())
2488            .unwrap_or_else(|| workspace_fallback.to_string()),
2489        None => format!("{workspace_fallback}_devcontainer"),
2490    };
2491    sanitize_compose_project_name(&raw)
2492}
2493
2494/// Classify an anyhow error from `Fs::load` as "file does not exist" vs a
2495/// real I/O failure. Used on the `.env` read in `project_name()`, where the
2496/// CLI's `getProjectName` catches only `ENOENT`/`EISDIR` and rethrows
2497/// everything else; any other error must propagate so callers can surface
2498/// the problem instead of silently falling back to a non-canonical project
2499/// name. (The fragment-rescan loop uses a different, broader swallow —
2500/// the CLI wraps its fragment read+parse in one try/catch that ignores
2501/// every failure.)
2502fn is_missing_file_error(err: &anyhow::Error) -> bool {
2503    err.downcast_ref::<std::io::Error>().is_some_and(|e| {
2504        matches!(
2505            e.kind(),
2506            std::io::ErrorKind::NotFound | std::io::ErrorKind::IsADirectory
2507        )
2508    })
2509}
2510
2511/// Extract `COMPOSE_PROJECT_NAME` from a `.env` file's contents. Matches
2512/// the subset of dotenv syntax that `@devcontainers/cli`'s regex parser
2513/// recognizes: a bare `COMPOSE_PROJECT_NAME=value` line (no `export` prefix,
2514/// no quoting, no line continuation). Comment lines are skipped.
2515fn parse_dotenv_compose_project_name(contents: &str) -> Option<String> {
2516    for line in contents.lines() {
2517        let trimmed = line.trim_start();
2518        if trimmed.starts_with('#') {
2519            continue;
2520        }
2521        if let Some(value) = trimmed.strip_prefix("COMPOSE_PROJECT_NAME=") {
2522            return Some(value.trim().to_string());
2523        }
2524    }
2525    None
2526}
2527
2528/// Detect whether a compose-file fragment declares a top-level `name:` key.
2529/// Matches the reference CLI's approach: parse the fragment as YAML and check
2530/// for a `name` key on the root mapping. This handles all valid styles —
2531/// block mappings, quoted keys (`"name":`), flow-style root mappings, anchors,
2532/// etc. On parse failure we fall through (return `false`), matching the CLI's
2533/// own behavior when fragment parsing errors.
2534fn compose_fragment_declares_name(contents: &str) -> bool {
2535    let Ok(docs) = yaml_rust2::YamlLoader::load_from_str(contents) else {
2536        return false;
2537    };
2538    let Some(yaml_rust2::Yaml::Hash(h)) = docs.into_iter().next() else {
2539        return false;
2540    };
2541    h.contains_key(&yaml_rust2::Yaml::String("name".to_string()))
2542}
2543
2544/// Extracts the short feature ID from a full feature reference string.
2545///
2546/// Examples:
2547/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2548/// - `ghcr.io/user/repo/go` → `go`
2549/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2550/// - `./myFeature` → `myFeature`
2551fn extract_feature_id(feature_ref: &str) -> &str {
2552    let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2553        &feature_ref[..at_idx]
2554    } else {
2555        let last_slash = feature_ref.rfind('/');
2556        let last_colon = feature_ref.rfind(':');
2557        match (last_slash, last_colon) {
2558            (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2559            _ => feature_ref,
2560        }
2561    };
2562    match without_version.rfind('/') {
2563        Some(idx) => &without_version[idx + 1..],
2564        None => without_version,
2565    }
2566}
2567
2568/// Generates a shell command that looks up a user's passwd entry.
2569///
2570/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2571/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2572fn get_ent_passwd_shell_command(user: &str) -> String {
2573    let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2574    let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2575    format!(
2576        " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2577        shell = escaped_for_shell,
2578        re = escaped_for_regex,
2579    )
2580}
2581
2582/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2583///
2584/// Features listed in the override come first (in the specified order), followed
2585/// by any remaining features sorted lexicographically by their full reference ID.
2586fn resolve_feature_order<'a>(
2587    features: &'a HashMap<String, FeatureOptions>,
2588    override_order: &Option<Vec<String>>,
2589) -> Vec<(&'a String, &'a FeatureOptions)> {
2590    if let Some(order) = override_order {
2591        let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2592        for ordered_id in order {
2593            if let Some((key, options)) = features.get_key_value(ordered_id) {
2594                ordered.push((key, options));
2595            }
2596        }
2597        let mut remaining: Vec<_> = features
2598            .iter()
2599            .filter(|(id, _)| !order.iter().any(|o| o == *id))
2600            .collect();
2601        remaining.sort_by_key(|(id, _)| id.as_str());
2602        ordered.extend(remaining);
2603        ordered
2604    } else {
2605        let mut entries: Vec<_> = features.iter().collect();
2606        entries.sort_by_key(|(id, _)| id.as_str());
2607        entries
2608    }
2609}
2610
2611/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2612///
2613/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2614/// `containerFeaturesConfiguration.ts`.
2615fn generate_install_wrapper(
2616    feature_ref: &str,
2617    feature_id: &str,
2618    env_variables: &str,
2619) -> Result<String, DevContainerError> {
2620    let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2621        log::error!("Error escaping feature ref {feature_ref}: {e}");
2622        DevContainerError::DevContainerParseFailed
2623    })?;
2624    let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2625        log::error!("Error escaping feature {feature_id}: {e}");
2626        DevContainerError::DevContainerParseFailed
2627    })?;
2628    let options_indented: String = env_variables
2629        .lines()
2630        .filter(|l| !l.is_empty())
2631        .map(|l| format!("    {}", l))
2632        .collect::<Vec<_>>()
2633        .join("\n");
2634    let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2635        log::error!("Error escaping options {options_indented}: {e}");
2636        DevContainerError::DevContainerParseFailed
2637    })?;
2638
2639    let script = format!(
2640        r#"#!/bin/sh
2641set -e
2642
2643on_exit () {{
2644    [ $? -eq 0 ] && exit
2645    echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2646}}
2647
2648trap on_exit EXIT
2649
2650echo ===========================================================================
2651echo 'Feature       : {escaped_name}'
2652echo 'Id            : {escaped_id}'
2653echo 'Options       :'
2654echo {escaped_options}
2655echo ===========================================================================
2656
2657set -a
2658. ../devcontainer-features.builtin.env
2659. ./devcontainer-features.env
2660set +a
2661
2662chmod +x ./install.sh
2663./install.sh
2664"#
2665    );
2666
2667    Ok(script)
2668}
2669
2670fn dockerfile_inject_alias(
2671    dockerfile_content: &str,
2672    alias: &str,
2673    build_target: Option<String>,
2674) -> String {
2675    let from_lines: Vec<(usize, &str)> = dockerfile_content
2676        .lines()
2677        .enumerate()
2678        .filter(|(_, line)| line.starts_with("FROM"))
2679        .collect();
2680
2681    let target_entry = match &build_target {
2682        Some(target) => from_lines.iter().rfind(|(_, line)| {
2683            let parts: Vec<&str> = line.split_whitespace().collect();
2684            parts.len() >= 3
2685                && parts
2686                    .get(parts.len() - 2)
2687                    .map_or(false, |p| p.eq_ignore_ascii_case("as"))
2688                && parts
2689                    .last()
2690                    .map_or(false, |p| p.eq_ignore_ascii_case(target))
2691        }),
2692        None => from_lines.last(),
2693    };
2694
2695    let Some(&(line_idx, from_line)) = target_entry else {
2696        return dockerfile_content.to_string();
2697    };
2698
2699    let parts: Vec<&str> = from_line.split_whitespace().collect();
2700    let has_alias = parts.len() >= 3
2701        && parts
2702            .get(parts.len() - 2)
2703            .map_or(false, |p| p.eq_ignore_ascii_case("as"));
2704
2705    if has_alias {
2706        let Some(existing_alias) = parts.last() else {
2707            return dockerfile_content.to_string();
2708        };
2709        format!("{dockerfile_content}\nFROM {existing_alias} AS {alias}")
2710    } else {
2711        let lines: Vec<&str> = dockerfile_content.lines().collect();
2712        let mut result = String::new();
2713        for (i, line) in lines.iter().enumerate() {
2714            if i > 0 {
2715                result.push('\n');
2716            }
2717            if i == line_idx {
2718                result.push_str(&format!("{line} AS {alias}"));
2719            } else {
2720                result.push_str(line);
2721            }
2722        }
2723        if dockerfile_content.ends_with('\n') {
2724            result.push('\n');
2725        }
2726        result
2727    }
2728}
2729
2730fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2731    dockerfile_contents
2732        .lines()
2733        .filter(|line| line.starts_with("FROM"))
2734        .rfind(|from_line| match &target {
2735            Some(target) => {
2736                let parts = from_line.split(' ').collect::<Vec<&str>>();
2737                if parts.len() >= 3
2738                    && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2739                {
2740                    parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2741                } else {
2742                    false
2743                }
2744            }
2745            None => true,
2746        })
2747        .and_then(|from_line| {
2748            from_line
2749                .split(' ')
2750                .collect::<Vec<&str>>()
2751                .get(1)
2752                .map(|s| s.to_string())
2753        })
2754}
2755
2756fn get_remote_user_from_config(
2757    docker_config: &DockerInspect,
2758    devcontainer: &DevContainerManifest,
2759) -> Result<String, DevContainerError> {
2760    if let DevContainer {
2761        remote_user: Some(user),
2762        ..
2763    } = &devcontainer.dev_container()
2764    {
2765        return Ok(user.clone());
2766    }
2767    if let Some(metadata) = &docker_config.config.labels.metadata {
2768        for metadatum in metadata {
2769            if let Some(remote_user) = metadatum.get("remoteUser") {
2770                if let Some(remote_user_str) = remote_user.as_str() {
2771                    return Ok(remote_user_str.to_string());
2772                }
2773            }
2774        }
2775    }
2776    if let Some(image_user) = &docker_config.config.image_user {
2777        if !image_user.is_empty() {
2778            return Ok(image_user.to_string());
2779        }
2780    }
2781    Ok("root".to_string())
2782}
2783
2784// This should come from spec - see the docs
2785fn get_container_user_from_config(
2786    docker_config: &DockerInspect,
2787    devcontainer: &DevContainerManifest,
2788) -> Result<String, DevContainerError> {
2789    if let Some(user) = &devcontainer.dev_container().container_user {
2790        return Ok(user.to_string());
2791    }
2792    if let Some(metadata) = &docker_config.config.labels.metadata {
2793        for metadatum in metadata {
2794            if let Some(container_user) = metadatum.get("containerUser") {
2795                if let Some(container_user_str) = container_user.as_str() {
2796                    return Ok(container_user_str.to_string());
2797                }
2798            }
2799        }
2800    }
2801    if let Some(image_user) = &docker_config.config.image_user {
2802        return Ok(image_user.to_string());
2803    }
2804
2805    Ok("root".to_string())
2806}
2807
2808#[cfg(test)]
2809mod test {
2810    use std::{
2811        collections::HashMap,
2812        ffi::OsStr,
2813        path::{Path, PathBuf},
2814        process::{ExitStatus, Output},
2815        sync::{Arc, Mutex},
2816    };
2817
2818    use async_trait::async_trait;
2819    use fs::{FakeFs, Fs};
2820    use gpui::{AppContext, TestAppContext};
2821    use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2822    use project::{
2823        ProjectEnvironment,
2824        worktree_store::{WorktreeIdCounter, WorktreeStore},
2825    };
2826    use serde_json_lenient::Value;
2827    use util::{command::Command, paths::SanitizedPath};
2828
2829    #[cfg(not(target_os = "windows"))]
2830    use crate::docker::DockerComposeServicePort;
2831    use crate::{
2832        DevContainerConfig, DevContainerContext,
2833        command_json::CommandRunner,
2834        devcontainer_api::DevContainerError,
2835        devcontainer_json::MountDefinition,
2836        devcontainer_manifest::{
2837            ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2838            DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2839            image_from_dockerfile, resolve_compose_dockerfile,
2840        },
2841        docker::{
2842            DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2843            DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2844            DockerPs,
2845        },
2846        oci::TokenResponse,
2847    };
2848    #[cfg(not(target_os = "windows"))]
2849    const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2850    #[cfg(target_os = "windows")]
2851    const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2852
2853    async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2854        let buffer = futures::io::Cursor::new(Vec::new());
2855        let mut builder = async_tar::Builder::new(buffer);
2856        for (file_name, content) in content {
2857            if content.is_empty() {
2858                let mut header = async_tar::Header::new_gnu();
2859                header.set_size(0);
2860                header.set_mode(0o755);
2861                header.set_entry_type(async_tar::EntryType::Directory);
2862                header.set_cksum();
2863                builder
2864                    .append_data(&mut header, file_name, &[] as &[u8])
2865                    .await
2866                    .unwrap();
2867            } else {
2868                let data = content.as_bytes();
2869                let mut header = async_tar::Header::new_gnu();
2870                header.set_size(data.len() as u64);
2871                header.set_mode(0o755);
2872                header.set_entry_type(async_tar::EntryType::Regular);
2873                header.set_cksum();
2874                builder
2875                    .append_data(&mut header, file_name, data)
2876                    .await
2877                    .unwrap();
2878            }
2879        }
2880        let buffer = builder.into_inner().await.unwrap();
2881        buffer.into_inner()
2882    }
2883
2884    fn test_project_filename() -> String {
2885        PathBuf::from(TEST_PROJECT_PATH)
2886            .file_name()
2887            .expect("is valid")
2888            .display()
2889            .to_string()
2890    }
2891
2892    async fn init_devcontainer_config(
2893        fs: &Arc<FakeFs>,
2894        devcontainer_contents: &str,
2895    ) -> DevContainerConfig {
2896        fs.insert_tree(
2897            format!("{TEST_PROJECT_PATH}/.devcontainer"),
2898            serde_json::json!({"devcontainer.json": devcontainer_contents}),
2899        )
2900        .await;
2901
2902        DevContainerConfig::default_config()
2903    }
2904
2905    struct TestDependencies {
2906        fs: Arc<FakeFs>,
2907        _http_client: Arc<dyn HttpClient>,
2908        docker: Arc<FakeDocker>,
2909        command_runner: Arc<TestCommandRunner>,
2910    }
2911
2912    async fn init_default_devcontainer_manifest(
2913        cx: &mut TestAppContext,
2914        devcontainer_contents: &str,
2915    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2916        let fs = FakeFs::new(cx.executor());
2917        let http_client = fake_http_client();
2918        let command_runner = Arc::new(TestCommandRunner::new());
2919        let docker = Arc::new(FakeDocker::new());
2920        let environment = HashMap::new();
2921
2922        init_devcontainer_manifest(
2923            cx,
2924            fs,
2925            http_client,
2926            docker,
2927            command_runner,
2928            environment,
2929            devcontainer_contents,
2930        )
2931        .await
2932    }
2933
2934    async fn init_devcontainer_manifest(
2935        cx: &mut TestAppContext,
2936        fs: Arc<FakeFs>,
2937        http_client: Arc<dyn HttpClient>,
2938        docker_client: Arc<FakeDocker>,
2939        command_runner: Arc<TestCommandRunner>,
2940        environment: HashMap<String, String>,
2941        devcontainer_contents: &str,
2942    ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2943        let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2944        let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2945        let worktree_store =
2946            cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2947        let project_environment =
2948            cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2949
2950        let context = DevContainerContext {
2951            project_directory: SanitizedPath::cast_arc(project_path),
2952            use_podman: false,
2953            fs: fs.clone(),
2954            http_client: http_client.clone(),
2955            environment: project_environment.downgrade(),
2956        };
2957
2958        let test_dependencies = TestDependencies {
2959            fs: fs.clone(),
2960            _http_client: http_client.clone(),
2961            docker: docker_client.clone(),
2962            command_runner: command_runner.clone(),
2963        };
2964        let manifest = DevContainerManifest::new(
2965            &context,
2966            environment,
2967            docker_client,
2968            command_runner,
2969            local_config,
2970            &PathBuf::from(TEST_PROJECT_PATH),
2971        )
2972        .await?;
2973
2974        Ok((test_dependencies, manifest))
2975    }
2976
2977    #[gpui::test]
2978    async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2979        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2980            cx,
2981            r#"
2982// These are some external comments. serde_lenient should handle them
2983{
2984    // These are some internal comments
2985    "image": "image",
2986    "remoteUser": "root",
2987}
2988            "#,
2989        )
2990        .await
2991        .unwrap();
2992
2993        let mut metadata = HashMap::new();
2994        metadata.insert(
2995            "remoteUser".to_string(),
2996            serde_json_lenient::Value::String("vsCode".to_string()),
2997        );
2998        let given_docker_config = DockerInspect {
2999            id: "docker_id".to_string(),
3000            config: DockerInspectConfig {
3001                labels: DockerConfigLabels {
3002                    metadata: Some(vec![metadata]),
3003                },
3004                image_user: None,
3005                env: Vec::new(),
3006            },
3007            mounts: None,
3008            state: None,
3009        };
3010
3011        let remote_user =
3012            get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
3013
3014        assert_eq!(remote_user, "root".to_string())
3015    }
3016
3017    #[gpui::test]
3018    async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
3019        let (_, devcontainer_manifest) =
3020            init_default_devcontainer_manifest(cx, "{}").await.unwrap();
3021        let mut metadata = HashMap::new();
3022        metadata.insert(
3023            "remoteUser".to_string(),
3024            serde_json_lenient::Value::String("vsCode".to_string()),
3025        );
3026        let given_docker_config = DockerInspect {
3027            id: "docker_id".to_string(),
3028            config: DockerInspectConfig {
3029                labels: DockerConfigLabels {
3030                    metadata: Some(vec![metadata]),
3031                },
3032                image_user: None,
3033                env: Vec::new(),
3034            },
3035            mounts: None,
3036            state: None,
3037        };
3038
3039        let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
3040
3041        assert!(remote_user.is_ok());
3042        let remote_user = remote_user.expect("ok");
3043        assert_eq!(&remote_user, "vsCode")
3044    }
3045
3046    #[test]
3047    fn should_extract_feature_id_from_references() {
3048        assert_eq!(
3049            extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
3050            "aws-cli"
3051        );
3052        assert_eq!(
3053            extract_feature_id("ghcr.io/devcontainers/features/go"),
3054            "go"
3055        );
3056        assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
3057        assert_eq!(extract_feature_id("./myFeature"), "myFeature");
3058        assert_eq!(
3059            extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
3060            "rust"
3061        );
3062    }
3063
3064    #[gpui::test]
3065    async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
3066        let mut metadata = HashMap::new();
3067        metadata.insert(
3068            "remoteUser".to_string(),
3069            serde_json_lenient::Value::String("vsCode".to_string()),
3070        );
3071
3072        let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
3073            cx,
3074            r#"{
3075                    "name": "TODO"
3076                }"#,
3077        )
3078        .await
3079        .unwrap();
3080        let build_resources = DockerBuildResources {
3081            image: DockerInspect {
3082                id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
3083                config: DockerInspectConfig {
3084                    labels: DockerConfigLabels {
3085                        metadata: None,
3086                        },
3087                    image_user: None,
3088                    env: Vec::new(),
3089                },
3090                mounts: None,
3091                state: None,
3092            },
3093            additional_mounts: vec![],
3094            privileged: false,
3095            entrypoint_script: "echo Container started\n    trap \"exit 0\" 15\n    exec \"$@\"\n    while sleep 1 & wait $!; do :; done".to_string(),
3096        };
3097        let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
3098
3099        assert!(docker_run_command.is_ok());
3100        let docker_run_command = docker_run_command.expect("ok");
3101
3102        assert_eq!(docker_run_command.get_program(), "docker");
3103        let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
3104            .join(".devcontainer")
3105            .join("devcontainer.json");
3106        let expected_config_file_label = expected_config_file_label.display();
3107        assert_eq!(
3108            docker_run_command.get_args().collect::<Vec<&OsStr>>(),
3109            vec![
3110                OsStr::new("run"),
3111                OsStr::new("--sig-proxy=false"),
3112                OsStr::new("-d"),
3113                OsStr::new("--mount"),
3114                OsStr::new(&format!(
3115                    "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
3116                )),
3117                OsStr::new("-l"),
3118                OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
3119                OsStr::new("-l"),
3120                OsStr::new(&format!(
3121                    "devcontainer.config_file={expected_config_file_label}"
3122                )),
3123                OsStr::new("--entrypoint"),
3124                OsStr::new("/bin/sh"),
3125                OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
3126                OsStr::new("-c"),
3127                OsStr::new(
3128                    "
3129    echo Container started
3130    trap \"exit 0\" 15
3131    exec \"$@\"
3132    while sleep 1 & wait $!; do :; done
3133                        "
3134                    .trim()
3135                ),
3136                OsStr::new("-"),
3137            ]
3138        )
3139    }
3140
3141    #[gpui::test]
3142    async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
3143        // State where service not defined in dev container
3144        let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
3145        let given_docker_compose_config = DockerComposeResources {
3146            config: DockerComposeConfig {
3147                name: Some("devcontainers".to_string()),
3148                services: HashMap::new(),
3149                ..Default::default()
3150            },
3151            ..Default::default()
3152        };
3153
3154        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
3155
3156        assert!(bad_result.is_err());
3157
3158        // State where service defined in devcontainer, not found in DockerCompose config
3159        let (_, given_dev_container) =
3160            init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
3161                .await
3162                .unwrap();
3163        let given_docker_compose_config = DockerComposeResources {
3164            config: DockerComposeConfig {
3165                name: Some("devcontainers".to_string()),
3166                services: HashMap::new(),
3167                ..Default::default()
3168            },
3169            ..Default::default()
3170        };
3171
3172        let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
3173
3174        assert!(bad_result.is_err());
3175        // State where service defined in devcontainer and in DockerCompose config
3176
3177        let (_, given_dev_container) =
3178            init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
3179                .await
3180                .unwrap();
3181        let given_docker_compose_config = DockerComposeResources {
3182            config: DockerComposeConfig {
3183                name: Some("devcontainers".to_string()),
3184                services: HashMap::from([(
3185                    "found_service".to_string(),
3186                    DockerComposeService {
3187                        ..Default::default()
3188                    },
3189                )]),
3190                ..Default::default()
3191            },
3192            ..Default::default()
3193        };
3194
3195        let (service_name, _) =
3196            find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
3197
3198        assert_eq!(service_name, "found_service".to_string());
3199    }
3200
3201    #[gpui::test]
3202    async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
3203        let fs = FakeFs::new(cx.executor());
3204        let given_devcontainer_contents = r#"
3205// These are some external comments. serde_lenient should handle them
3206{
3207    // These are some internal comments
3208    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
3209    "name": "myDevContainer-${devcontainerId}",
3210    "remoteUser": "root",
3211    "remoteEnv": {
3212        "DEVCONTAINER_ID": "${devcontainerId}",
3213        "MYVAR2": "myvarothervalue",
3214        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3215        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3216        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3217        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
3218        "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
3219        "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}",
3220        "LOCAL_ENV_VAR_3": "before-${localEnv:missing_local_env}-after",
3221        "LOCAL_ENV_VAR_4": "${localEnv:with_defaults:default}"
3222
3223    }
3224}
3225                    "#;
3226        let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
3227            cx,
3228            fs,
3229            fake_http_client(),
3230            Arc::new(FakeDocker::new()),
3231            Arc::new(TestCommandRunner::new()),
3232            HashMap::from([
3233                ("local_env_1".to_string(), "local_env_value1".to_string()),
3234                ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
3235            ]),
3236            given_devcontainer_contents,
3237        )
3238        .await
3239        .unwrap();
3240
3241        devcontainer_manifest.parse_nonremote_vars().unwrap();
3242
3243        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3244            &devcontainer_manifest.config
3245        else {
3246            panic!("Config not parsed");
3247        };
3248
3249        // ${devcontainerId}
3250        let devcontainer_id = devcontainer_manifest.devcontainer_id();
3251        assert_eq!(
3252            variable_replaced_devcontainer.name,
3253            Some(format!("myDevContainer-{devcontainer_id}"))
3254        );
3255        assert_eq!(
3256            variable_replaced_devcontainer
3257                .remote_env
3258                .as_ref()
3259                .and_then(|env| env.get("DEVCONTAINER_ID")),
3260            Some(&devcontainer_id)
3261        );
3262
3263        // ${containerWorkspaceFolderBasename}
3264        assert_eq!(
3265            variable_replaced_devcontainer
3266                .remote_env
3267                .as_ref()
3268                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3269            Some(&test_project_filename())
3270        );
3271
3272        // ${localWorkspaceFolderBasename}
3273        assert_eq!(
3274            variable_replaced_devcontainer
3275                .remote_env
3276                .as_ref()
3277                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3278            Some(&test_project_filename())
3279        );
3280
3281        // ${containerWorkspaceFolder}
3282        assert_eq!(
3283            variable_replaced_devcontainer
3284                .remote_env
3285                .as_ref()
3286                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3287            Some(&format!("/workspaces/{}", test_project_filename()))
3288        );
3289
3290        // ${localWorkspaceFolder}
3291        assert_eq!(
3292            variable_replaced_devcontainer
3293                .remote_env
3294                .as_ref()
3295                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3296            // We replace backslashes with forward slashes during variable replacement for JSON safety
3297            Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3298        );
3299
3300        // ${localEnv:VARIABLE_NAME}
3301        assert_eq!(
3302            variable_replaced_devcontainer
3303                .remote_env
3304                .as_ref()
3305                .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
3306            Some(&"local_env_value1".to_string())
3307        );
3308        assert_eq!(
3309            variable_replaced_devcontainer
3310                .remote_env
3311                .as_ref()
3312                .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
3313            Some(&"THISVALUEHERE".to_string())
3314        );
3315        assert_eq!(
3316            variable_replaced_devcontainer
3317                .remote_env
3318                .as_ref()
3319                .and_then(|env| env.get("LOCAL_ENV_VAR_3")),
3320            Some(&"before--after".to_string())
3321        );
3322        assert_eq!(
3323            variable_replaced_devcontainer
3324                .remote_env
3325                .as_ref()
3326                .and_then(|env| env.get("LOCAL_ENV_VAR_4")),
3327            Some(&"default".to_string())
3328        );
3329    }
3330
3331    #[test]
3332    fn test_replace_environment_variables() {
3333        let replaced = DevContainerManifest::replace_environment_variables(
3334            "before ${containerEnv:FOUND} middle ${containerEnv:MISSING:default-value} after${containerEnv:MISSING2}",
3335            "containerEnv",
3336            &HashMap::from([("FOUND".to_string(), "value".to_string())]),
3337        );
3338
3339        assert_eq!(replaced, "before value middle default-value after");
3340    }
3341
3342    #[test]
3343    fn test_replace_environment_variables_supports_defaults_with_colons() {
3344        let replaced = DevContainerManifest::replace_environment_variables(
3345            "before ${containerEnv:MISSING:one:two} after",
3346            "containerEnv",
3347            &HashMap::new(),
3348        );
3349
3350        assert_eq!(replaced, "before one:two after");
3351    }
3352
3353    #[gpui::test]
3354    async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
3355        let given_devcontainer_contents = r#"
3356                // These are some external comments. serde_lenient should handle them
3357                {
3358                    // These are some internal comments
3359                    "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
3360                    "name": "myDevContainer-${devcontainerId}",
3361                    "remoteUser": "root",
3362                    "remoteEnv": {
3363                        "DEVCONTAINER_ID": "${devcontainerId}",
3364                        "MYVAR2": "myvarothervalue",
3365                        "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3366                        "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3367                        "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3368                        "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
3369
3370                    },
3371                    "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
3372                    "workspaceFolder": "/workspace/customfolder"
3373                }
3374            "#;
3375
3376        let (_, mut devcontainer_manifest) =
3377            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3378                .await
3379                .unwrap();
3380
3381        devcontainer_manifest.parse_nonremote_vars().unwrap();
3382
3383        let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3384            &devcontainer_manifest.config
3385        else {
3386            panic!("Config not parsed");
3387        };
3388
3389        // ${devcontainerId}
3390        let devcontainer_id = devcontainer_manifest.devcontainer_id();
3391        assert_eq!(
3392            variable_replaced_devcontainer.name,
3393            Some(format!("myDevContainer-{devcontainer_id}"))
3394        );
3395        assert_eq!(
3396            variable_replaced_devcontainer
3397                .remote_env
3398                .as_ref()
3399                .and_then(|env| env.get("DEVCONTAINER_ID")),
3400            Some(&devcontainer_id)
3401        );
3402
3403        // ${containerWorkspaceFolderBasename}
3404        assert_eq!(
3405            variable_replaced_devcontainer
3406                .remote_env
3407                .as_ref()
3408                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3409            Some(&"customfolder".to_string())
3410        );
3411
3412        // ${localWorkspaceFolderBasename}
3413        assert_eq!(
3414            variable_replaced_devcontainer
3415                .remote_env
3416                .as_ref()
3417                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3418            Some(&"project".to_string())
3419        );
3420
3421        // ${containerWorkspaceFolder}
3422        assert_eq!(
3423            variable_replaced_devcontainer
3424                .remote_env
3425                .as_ref()
3426                .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3427            Some(&"/workspace/customfolder".to_string())
3428        );
3429
3430        // ${localWorkspaceFolder}
3431        assert_eq!(
3432            variable_replaced_devcontainer
3433                .remote_env
3434                .as_ref()
3435                .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3436            // We replace backslashes with forward slashes during variable replacement for JSON safety
3437            Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3438        );
3439    }
3440
3441    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3442    // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3443    #[cfg(not(target_os = "windows"))]
3444    #[gpui::test]
3445    async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3446        cx.executor().allow_parking();
3447        env_logger::try_init().ok();
3448        let given_devcontainer_contents = r#"
3449            /*---------------------------------------------------------------------------------------------
3450             *  Copyright (c) Microsoft Corporation. All rights reserved.
3451             *  Licensed under the MIT License. See License.txt in the project root for license information.
3452             *--------------------------------------------------------------------------------------------*/
3453            {
3454              "name": "cli-${devcontainerId}",
3455              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3456              "build": {
3457                "dockerfile": "Dockerfile",
3458                "args": {
3459                  "VARIANT": "18-bookworm",
3460                  "FOO": "bar",
3461                },
3462              },
3463              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3464              "workspaceFolder": "/workspace2",
3465              "mounts": [
3466                // Keep command history across instances
3467                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3468              ],
3469
3470              "runArgs": [
3471                "--cap-add=SYS_PTRACE",
3472                "--sig-proxy=true",
3473              ],
3474
3475              "forwardPorts": [
3476                8082,
3477                8083,
3478              ],
3479              "appPort": [
3480                8084,
3481                "8085:8086",
3482              ],
3483
3484              "containerEnv": {
3485                "VARIABLE_VALUE": "value",
3486              },
3487
3488              "initializeCommand": "touch IAM.md",
3489
3490              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3491
3492              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3493
3494              "postCreateCommand": {
3495                "yarn": "yarn install",
3496                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3497              },
3498
3499              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3500
3501              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3502
3503              "remoteUser": "node",
3504
3505              "remoteEnv": {
3506                "PATH": "${containerEnv:PATH}:/some/other/path",
3507                "OTHER_ENV": "other_env_value"
3508              },
3509
3510              "features": {
3511                "ghcr.io/devcontainers/features/docker-in-docker:2": {
3512                  "moby": false,
3513                },
3514                "ghcr.io/devcontainers/features/go:1": {},
3515              },
3516
3517              "customizations": {
3518                "vscode": {
3519                  "extensions": [
3520                    "dbaeumer.vscode-eslint",
3521                    "GitHub.vscode-pull-request-github",
3522                  ],
3523                },
3524                "zed": {
3525                  "extensions": ["vue", "ruby"],
3526                },
3527                "codespaces": {
3528                  "repositories": {
3529                    "devcontainers/features": {
3530                      "permissions": {
3531                        "contents": "write",
3532                        "workflows": "write",
3533                      },
3534                    },
3535                  },
3536                },
3537              },
3538            }
3539            "#;
3540
3541        let (test_dependencies, mut devcontainer_manifest) =
3542            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3543                .await
3544                .unwrap();
3545
3546        test_dependencies
3547            .fs
3548            .atomic_write(
3549                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3550                r#"
3551#  Copyright (c) Microsoft Corporation. All rights reserved.
3552#  Licensed under the MIT License. See License.txt in the project root for license information.
3553ARG VARIANT="16-bullseye"
3554FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3555
3556RUN mkdir -p /workspaces && chown node:node /workspaces
3557
3558ARG USERNAME=node
3559USER $USERNAME
3560
3561# Save command line history
3562RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3563&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3564&& mkdir -p /home/$USERNAME/commandhistory \
3565&& touch /home/$USERNAME/commandhistory/.bash_history \
3566&& chown -R $USERNAME /home/$USERNAME/commandhistory
3567                    "#.trim().to_string(),
3568            )
3569            .await
3570            .unwrap();
3571
3572        devcontainer_manifest.parse_nonremote_vars().unwrap();
3573
3574        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3575
3576        assert_eq!(
3577            devcontainer_up.extension_ids,
3578            vec!["vue".to_string(), "ruby".to_string()]
3579        );
3580
3581        let files = test_dependencies.fs.files();
3582        let feature_dockerfile = files
3583            .iter()
3584            .find(|f| {
3585                f.file_name()
3586                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3587            })
3588            .expect("to be found");
3589        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3590        assert_eq!(
3591            &feature_dockerfile,
3592            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3593
3594#  Copyright (c) Microsoft Corporation. All rights reserved.
3595#  Licensed under the MIT License. See License.txt in the project root for license information.
3596ARG VARIANT="16-bullseye"
3597FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3598
3599RUN mkdir -p /workspaces && chown node:node /workspaces
3600
3601ARG USERNAME=node
3602USER $USERNAME
3603
3604# Save command line history
3605RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3606&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3607&& mkdir -p /home/$USERNAME/commandhistory \
3608&& touch /home/$USERNAME/commandhistory/.bash_history \
3609&& chown -R $USERNAME /home/$USERNAME/commandhistory
3610
3611FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3612USER root
3613COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3614RUN chmod -R 0755 /tmp/build-features/
3615
3616FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3617
3618USER root
3619
3620RUN mkdir -p /tmp/dev-container-features
3621COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3622
3623RUN \
3624echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3625echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3626
3627
3628RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3629cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3630&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3631&& cd /tmp/dev-container-features/docker-in-docker_0 \
3632&& chmod +x ./devcontainer-features-install.sh \
3633&& ./devcontainer-features-install.sh \
3634&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3635
3636RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3637cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3638&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3639&& cd /tmp/dev-container-features/go_1 \
3640&& chmod +x ./devcontainer-features-install.sh \
3641&& ./devcontainer-features-install.sh \
3642&& rm -rf /tmp/dev-container-features/go_1
3643
3644
3645ARG _DEV_CONTAINERS_IMAGE_USER=root
3646USER $_DEV_CONTAINERS_IMAGE_USER
3647"#
3648        );
3649
3650        let uid_dockerfile = files
3651            .iter()
3652            .find(|f| {
3653                f.file_name()
3654                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3655            })
3656            .expect("to be found");
3657        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3658
3659        assert_eq!(
3660            &uid_dockerfile,
3661            r#"ARG BASE_IMAGE
3662FROM $BASE_IMAGE
3663
3664USER root
3665
3666ARG REMOTE_USER
3667ARG NEW_UID
3668ARG NEW_GID
3669SHELL ["/bin/sh", "-c"]
3670RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3671	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3672	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3673	if [ -z "$OLD_UID" ]; then \
3674		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3675	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3676		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3677	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3678		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3679	else \
3680		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3681			FREE_GID=65532; \
3682			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3683			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3684			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3685		fi; \
3686		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3687		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3688		if [ "$OLD_GID" != "$NEW_GID" ]; then \
3689			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3690		fi; \
3691		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3692	fi;
3693
3694ARG IMAGE_USER
3695USER $IMAGE_USER
3696
3697# Ensure that /etc/profile does not clobber the existing path
3698RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3699
3700ENV DOCKER_BUILDKIT=1
3701
3702ENV GOPATH=/go
3703ENV GOROOT=/usr/local/go
3704ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3705ENV VARIABLE_VALUE=value
3706"#
3707        );
3708
3709        let golang_install_wrapper = files
3710            .iter()
3711            .find(|f| {
3712                f.file_name()
3713                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3714                    && f.to_str().is_some_and(|s| s.contains("/go_"))
3715            })
3716            .expect("to be found");
3717        let golang_install_wrapper = test_dependencies
3718            .fs
3719            .load(golang_install_wrapper)
3720            .await
3721            .unwrap();
3722        assert_eq!(
3723            &golang_install_wrapper,
3724            r#"#!/bin/sh
3725set -e
3726
3727on_exit () {
3728    [ $? -eq 0 ] && exit
3729    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3730}
3731
3732trap on_exit EXIT
3733
3734echo ===========================================================================
3735echo 'Feature       : go'
3736echo 'Id            : ghcr.io/devcontainers/features/go:1'
3737echo 'Options       :'
3738echo '    GOLANGCILINTVERSION=latest
3739    VERSION=latest'
3740echo ===========================================================================
3741
3742set -a
3743. ../devcontainer-features.builtin.env
3744. ./devcontainer-features.env
3745set +a
3746
3747chmod +x ./install.sh
3748./install.sh
3749"#
3750        );
3751
3752        let docker_commands = test_dependencies
3753            .command_runner
3754            .commands_by_program("docker");
3755
3756        let docker_run_command = docker_commands
3757            .iter()
3758            .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3759            .expect("found");
3760
3761        assert_eq!(
3762            docker_run_command.args,
3763            vec![
3764                "run".to_string(),
3765                "--privileged".to_string(),
3766                "--cap-add=SYS_PTRACE".to_string(),
3767                "--sig-proxy=true".to_string(),
3768                "-d".to_string(),
3769                "--mount".to_string(),
3770                "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3771                "--mount".to_string(),
3772                "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3773                "--mount".to_string(),
3774                "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3775                "-l".to_string(),
3776                "devcontainer.local_folder=/path/to/local/project".to_string(),
3777                "-l".to_string(),
3778                "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3779                "-l".to_string(),
3780                "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3781                "-p".to_string(),
3782                "8082:8082".to_string(),
3783                "-p".to_string(),
3784                "8083:8083".to_string(),
3785                "-p".to_string(),
3786                "8084:8084".to_string(),
3787                "-p".to_string(),
3788                "8085:8086".to_string(),
3789                "--entrypoint".to_string(),
3790                "/bin/sh".to_string(),
3791                "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3792                "-c".to_string(),
3793                "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3794                "-".to_string()
3795            ]
3796        );
3797
3798        let docker_exec_commands = test_dependencies
3799            .docker
3800            .exec_commands_recorded
3801            .lock()
3802            .unwrap();
3803
3804        assert!(docker_exec_commands.iter().all(|exec| {
3805            exec.env
3806                == HashMap::from([
3807                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3808                    (
3809                        "PATH".to_string(),
3810                        "/initial/path:/some/other/path".to_string(),
3811                    ),
3812                ])
3813        }))
3814    }
3815
3816    // updateRemoteUserUID is treated as false in Windows, so this test will fail
3817    // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3818    #[cfg(not(target_os = "windows"))]
3819    #[gpui::test]
3820    async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3821        cx.executor().allow_parking();
3822        env_logger::try_init().ok();
3823        let given_devcontainer_contents = r#"
3824            // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3825            // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3826            {
3827              "features": {
3828                "ghcr.io/devcontainers/features/aws-cli:1": {},
3829                "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3830              },
3831              "name": "Rust and PostgreSQL",
3832              "dockerComposeFile": "docker-compose.yml",
3833              "service": "app",
3834              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3835
3836              // Features to add to the dev container. More info: https://containers.dev/features.
3837              // "features": {},
3838
3839              // Use 'forwardPorts' to make a list of ports inside the container available locally.
3840              "forwardPorts": [
3841                8083,
3842                "db:5432",
3843                "db:1234",
3844              ],
3845
3846              // Use 'postCreateCommand' to run commands after the container is created.
3847              // "postCreateCommand": "rustc --version",
3848
3849              // Configure tool-specific properties.
3850              // "customizations": {},
3851
3852              // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3853              // "remoteUser": "root"
3854            }
3855            "#;
3856        let (test_dependencies, mut devcontainer_manifest) =
3857            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3858                .await
3859                .unwrap();
3860
3861        test_dependencies
3862            .fs
3863            .atomic_write(
3864                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3865                r#"
3866version: '3.8'
3867
3868volumes:
3869    postgres-data:
3870
3871services:
3872    app:
3873        build:
3874            context: .
3875            dockerfile: Dockerfile
3876        env_file:
3877            # Ensure that the variables in .env match the same variables in devcontainer.json
3878            - .env
3879
3880        volumes:
3881            - ../..:/workspaces:cached
3882
3883        # Overrides default command so things don't shut down after the process ends.
3884        command: sleep infinity
3885
3886        # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3887        network_mode: service:db
3888
3889        # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3890        # (Adding the "ports" property to this file will not forward from a Codespace.)
3891
3892    db:
3893        image: postgres:14.1
3894        restart: unless-stopped
3895        volumes:
3896            - postgres-data:/var/lib/postgresql/data
3897        env_file:
3898            # Ensure that the variables in .env match the same variables in devcontainer.json
3899            - .env
3900
3901        # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3902        # (Adding the "ports" property to this file will not forward from a Codespace.)
3903                    "#.trim().to_string(),
3904            )
3905            .await
3906            .unwrap();
3907
3908        test_dependencies.fs.atomic_write(
3909            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3910            r#"
3911FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3912
3913# Include lld linker to improve build times either by using environment variable
3914# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3915RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3916    && apt-get -y install clang lld \
3917    && apt-get autoremove -y && apt-get clean -y
3918            "#.trim().to_string()).await.unwrap();
3919
3920        devcontainer_manifest.parse_nonremote_vars().unwrap();
3921
3922        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3923
3924        let docker_commands = test_dependencies
3925            .command_runner
3926            .commands_by_program("docker");
3927        let compose_up = docker_commands
3928            .iter()
3929            .find(|c| {
3930                c.args.first().map(String::as_str) == Some("compose")
3931                    && c.args.iter().any(|a| a == "up")
3932            })
3933            .expect("docker compose up command recorded");
3934        let project_name_idx = compose_up
3935            .args
3936            .iter()
3937            .position(|a| a == "--project-name")
3938            .expect("compose command has --project-name flag");
3939        assert_eq!(
3940            compose_up.args[project_name_idx + 1],
3941            "project_devcontainer",
3942            "compose project name should match @devcontainers/cli derivation \
3943             (${{folderBasename}}_devcontainer), ignoring devcontainer.json `name`"
3944        );
3945
3946        let files = test_dependencies.fs.files();
3947        let feature_dockerfile = files
3948            .iter()
3949            .find(|f| {
3950                f.file_name()
3951                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3952            })
3953            .expect("to be found");
3954        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3955        assert_eq!(
3956            &feature_dockerfile,
3957            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3958
3959FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3960
3961# Include lld linker to improve build times either by using environment variable
3962# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3963RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3964    && apt-get -y install clang lld \
3965    && apt-get autoremove -y && apt-get clean -y
3966
3967FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3968USER root
3969COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3970RUN chmod -R 0755 /tmp/build-features/
3971
3972FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3973
3974USER root
3975
3976RUN mkdir -p /tmp/dev-container-features
3977COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3978
3979RUN \
3980echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3981echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3982
3983
3984RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3985cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3986&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3987&& cd /tmp/dev-container-features/aws-cli_0 \
3988&& chmod +x ./devcontainer-features-install.sh \
3989&& ./devcontainer-features-install.sh \
3990&& rm -rf /tmp/dev-container-features/aws-cli_0
3991
3992RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3993cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3994&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3995&& cd /tmp/dev-container-features/docker-in-docker_1 \
3996&& chmod +x ./devcontainer-features-install.sh \
3997&& ./devcontainer-features-install.sh \
3998&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3999
4000
4001ARG _DEV_CONTAINERS_IMAGE_USER=root
4002USER $_DEV_CONTAINERS_IMAGE_USER
4003"#
4004        );
4005
4006        let uid_dockerfile = files
4007            .iter()
4008            .find(|f| {
4009                f.file_name()
4010                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4011            })
4012            .expect("to be found");
4013        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4014
4015        assert_eq!(
4016            &uid_dockerfile,
4017            r#"ARG BASE_IMAGE
4018FROM $BASE_IMAGE
4019
4020USER root
4021
4022ARG REMOTE_USER
4023ARG NEW_UID
4024ARG NEW_GID
4025SHELL ["/bin/sh", "-c"]
4026RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4027	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4028	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4029	if [ -z "$OLD_UID" ]; then \
4030		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4031	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4032		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4033	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4034		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4035	else \
4036		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4037			FREE_GID=65532; \
4038			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4039			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4040			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4041		fi; \
4042		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4043		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4044		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4045			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4046		fi; \
4047		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4048	fi;
4049
4050ARG IMAGE_USER
4051USER $IMAGE_USER
4052
4053# Ensure that /etc/profile does not clobber the existing path
4054RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4055
4056
4057ENV DOCKER_BUILDKIT=1
4058"#
4059        );
4060
4061        let build_override = files
4062            .iter()
4063            .find(|f| {
4064                f.file_name()
4065                    .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
4066            })
4067            .expect("to be found");
4068        let build_override = test_dependencies.fs.load(build_override).await.unwrap();
4069        let build_config: DockerComposeConfig =
4070            serde_json_lenient::from_str(&build_override).unwrap();
4071        let build_context = build_config
4072            .services
4073            .get("app")
4074            .and_then(|s| s.build.as_ref())
4075            .and_then(|b| b.context.clone())
4076            .expect("build override should have a context");
4077        assert_eq!(
4078            build_context, ".",
4079            "build override should preserve the original build context from docker-compose.yml"
4080        );
4081
4082        let runtime_override = files
4083            .iter()
4084            .find(|f| {
4085                f.file_name()
4086                    .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
4087            })
4088            .expect("to be found");
4089        let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
4090
4091        let expected_runtime_override = DockerComposeConfig {
4092            name: None,
4093            services: HashMap::from([
4094                (
4095                    "app".to_string(),
4096                    DockerComposeService {
4097                        entrypoint: Some(vec![
4098                            "/bin/sh".to_string(),
4099                            "-c".to_string(),
4100                            "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
4101                            "-".to_string(),
4102                        ]),
4103                        cap_add: Some(vec!["SYS_PTRACE".to_string()]),
4104                        security_opt: Some(vec!["seccomp=unconfined".to_string()]),
4105                        privileged: Some(true),
4106                        labels: Some(HashMap::from([
4107                            ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
4108                            ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
4109                            ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
4110                        ])),
4111                        volumes: vec![
4112                            MountDefinition {
4113                                source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
4114                                target: "/var/lib/docker".to_string(),
4115                                mount_type: Some("volume".to_string())
4116                            }
4117                        ],
4118                        ..Default::default()
4119                    },
4120                ),
4121                (
4122                    "db".to_string(),
4123                    DockerComposeService {
4124                        ports: vec![
4125                            DockerComposeServicePort {
4126                                target: "8083".to_string(),
4127                                published: "8083".to_string(),
4128                                ..Default::default()
4129                            },
4130                            DockerComposeServicePort {
4131                                target: "5432".to_string(),
4132                                published: "5432".to_string(),
4133                                ..Default::default()
4134                            },
4135                            DockerComposeServicePort {
4136                                target: "1234".to_string(),
4137                                published: "1234".to_string(),
4138                                ..Default::default()
4139                            },
4140                        ],
4141                        ..Default::default()
4142                    },
4143                ),
4144            ]),
4145            volumes: HashMap::from([(
4146                "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
4147                DockerComposeVolume {
4148                    name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
4149                },
4150            )]),
4151        };
4152
4153        assert_eq!(
4154            serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
4155            expected_runtime_override
4156        )
4157    }
4158
4159    #[test]
4160    fn derive_project_name_env_wins_over_everything() {
4161        // CLI precedence rule 1: `COMPOSE_PROJECT_NAME` env var short-circuits
4162        // every later source (.env, compose name:, basename fallback).
4163        use crate::devcontainer_manifest::derive_project_name;
4164
4165        let env = HashMap::from([("COMPOSE_PROJECT_NAME".to_string(), "from_env".to_string())]);
4166        let got = derive_project_name(
4167            &env,
4168            Some("COMPOSE_PROJECT_NAME=from_dotenv\n"),
4169            Some("from_compose_name"),
4170            true,
4171            Some(Path::new(
4172                "/path/to/local/project/.devcontainer/docker-compose.yml",
4173            )),
4174            Path::new("/path/to/local/project"),
4175            "project",
4176        );
4177        assert_eq!(got, "from_env");
4178    }
4179
4180    #[test]
4181    fn derive_project_name_dotenv_wins_over_compose_and_fallback() {
4182        // CLI precedence rule 2: when no env var is set, the workspace .env's
4183        // `COMPOSE_PROJECT_NAME=` line wins over the compose config's `name:`
4184        // field and the basename fallback.
4185        use crate::devcontainer_manifest::derive_project_name;
4186
4187        let got = derive_project_name(
4188            &HashMap::new(),
4189            Some("# comment\nCOMPOSE_PROJECT_NAME=from_dotenv\n"),
4190            Some("from_compose_name"),
4191            true,
4192            Some(Path::new(
4193                "/path/to/local/project/.devcontainer/docker-compose.yml",
4194            )),
4195            Path::new("/path/to/local/project"),
4196            "project",
4197        );
4198        assert_eq!(got, "from_dotenv");
4199    }
4200
4201    #[test]
4202    fn derive_project_name_compose_name_wins_over_fallback() {
4203        // CLI precedence rule 3: when neither env nor .env provide a name,
4204        // the merged compose config's top-level `name:` field takes precedence
4205        // over the basename fallback. Also covers sanitization (spaces
4206        // stripped, uppercase lowercased).
4207        use crate::devcontainer_manifest::derive_project_name;
4208
4209        let got = derive_project_name(
4210            &HashMap::new(),
4211            None,
4212            Some("My Compose Project"),
4213            true,
4214            Some(Path::new(
4215                "/path/to/local/project/.devcontainer/docker-compose.yml",
4216            )),
4217            Path::new("/path/to/local/project"),
4218            "project",
4219        );
4220        assert_eq!(got, "mycomposeproject");
4221    }
4222
4223    #[test]
4224    fn derive_project_name_skips_compose_name_when_not_explicitly_declared() {
4225        // CLI precedence rule 3 edge case: `docker compose config` injects a
4226        // default `name: devcontainer` into the merged output whenever no
4227        // compose fragment declared one. `@devcontainers/cli` ignores that
4228        // default by tracking per-fragment whether `name:` was declared and
4229        // skipping rule 3 if none was. The caller conveys that signal via
4230        // `compose_name_explicitly_declared`; when it's `false`, even a
4231        // non-empty `compose_config_name` must be skipped so rule 4 applies.
4232        use crate::devcontainer_manifest::derive_project_name;
4233
4234        let got = derive_project_name(
4235            &HashMap::new(),
4236            None,
4237            Some("devcontainer"),
4238            false,
4239            Some(Path::new(
4240                "/path/to/myworkspace/.devcontainer/docker-compose.yml",
4241            )),
4242            Path::new("/path/to/myworkspace"),
4243            "myworkspace",
4244        );
4245        assert_eq!(got, "myworkspace_devcontainer");
4246    }
4247
4248    #[test]
4249    fn derive_project_name_omits_suffix_when_compose_file_outside_devcontainer_dir() {
4250        // CLI precedence rule 4: when falling back to the first compose file's
4251        // directory basename, the `_devcontainer` suffix is only appended when
4252        // that directory IS `<config>/.devcontainer`. A compose file at the
4253        // workspace root (as `"dockerComposeFile": "../docker-compose.yml"`
4254        // produces) must derive to the plain dir basename, not
4255        // `project_devcontainer` — otherwise Zed diverges from the CLI.
4256        use crate::devcontainer_manifest::derive_project_name;
4257
4258        let got = derive_project_name(
4259            &HashMap::new(),
4260            None,
4261            None,
4262            false,
4263            Some(Path::new("/path/to/local/project/docker-compose.yml")),
4264            Path::new("/path/to/local/project"),
4265            "project",
4266        );
4267        assert_eq!(got, "project");
4268    }
4269
4270    #[test]
4271    fn derive_project_name_handles_resolved_paths_from_docker_compose_manifest() {
4272        // `docker_compose_manifest()` normalizes compose file paths upfront
4273        // (resolving `..` components from raw `dockerComposeFile` entries like
4274        // `"subdir/../docker-compose.yml"`) before populating
4275        // `DockerComposeResources.files`. This test pins the resulting
4276        // rule-4/rule-5 behavior on those normalized paths: a file
4277        // semantically under `<workspace>/.devcontainer` takes rule 4, and
4278        // one that resolves outside it takes rule 5.
4279        use crate::devcontainer_manifest::derive_project_name;
4280
4281        // Normalized equivalent of `.devcontainer/subdir/../docker-compose.yml`:
4282        // rule 4 applies → `${ws}_devcontainer`.
4283        let got_under = derive_project_name(
4284            &HashMap::new(),
4285            None,
4286            None,
4287            false,
4288            Some(Path::new(
4289                "/path/to/local/project/.devcontainer/docker-compose.yml",
4290            )),
4291            Path::new("/path/to/local/project"),
4292            "project",
4293        );
4294        assert_eq!(got_under, "project_devcontainer");
4295
4296        // Normalized equivalent of `.devcontainer/../docker-compose.yml`:
4297        // the file sits at the workspace root, so rule 5 applies — plain
4298        // basename of the parent dir, no suffix.
4299        let got_escaped = derive_project_name(
4300            &HashMap::new(),
4301            None,
4302            None,
4303            false,
4304            Some(Path::new("/path/to/local/project/docker-compose.yml")),
4305            Path::new("/path/to/local/project"),
4306            "project",
4307        );
4308        assert_eq!(got_escaped, "project");
4309    }
4310
4311    #[test]
4312    fn compose_fragment_declares_name_detects_top_level_name_key() {
4313        // Block-style top-level key — declared.
4314        use crate::devcontainer_manifest::compose_fragment_declares_name;
4315
4316        assert!(compose_fragment_declares_name(
4317            "name: my-project\nservices:\n  app:\n    image: foo\n"
4318        ));
4319        // Indented `name:` belongs to a nested mapping (here a service) and
4320        // must NOT count as a top-level declaration.
4321        assert!(!compose_fragment_declares_name(
4322            "services:\n  app:\n    name: inner\n    image: foo\n"
4323        ));
4324        // Comment lines are ignored.
4325        assert!(!compose_fragment_declares_name(
4326            "# name: commented-out\nservices: {}\n"
4327        ));
4328        // Empty fragment — no declaration.
4329        assert!(!compose_fragment_declares_name(""));
4330        // Quoted key — still a top-level declaration. A line scanner that
4331        // looks for bare `name:` at column 0 would miss this.
4332        assert!(compose_fragment_declares_name(
4333            "\"name\": my-project\nservices: {}\n"
4334        ));
4335        // Flow-style root mapping — also a top-level declaration. Again a
4336        // line scanner keyed on block-style layout would miss it.
4337        assert!(compose_fragment_declares_name(
4338            "{name: my-project, services: {app: {image: foo}}}\n"
4339        ));
4340        // Unparsable fragment falls through to "not declared" (matches the
4341        // CLI's behavior on parse failure).
4342        assert!(!compose_fragment_declares_name(": : :\n- - -\n"));
4343    }
4344
4345    #[test]
4346    fn is_missing_file_error_only_accepts_notfound_and_isadirectory() {
4347        // Mirrors the CLI's narrow `ENOENT`/`EISDIR` swallow in
4348        // `getProjectName`'s `.env` read. Any other `io::Error` — permission
4349        // denied, I/O failure, `ENOTDIR`, etc. — must not be classified as
4350        // "missing" so callers surface the problem instead of silently
4351        // falling back to a non-canonical project name. Non-`io::Error`
4352        // anyhow errors must also not be classified as missing.
4353        use crate::devcontainer_manifest::is_missing_file_error;
4354
4355        let notfound = anyhow::Error::new(std::io::Error::from(std::io::ErrorKind::NotFound));
4356        assert!(is_missing_file_error(&notfound));
4357
4358        // EISDIR — `.env` exists as a directory; CLI swallows, so must we.
4359        let is_a_dir = anyhow::Error::new(std::io::Error::from(std::io::ErrorKind::IsADirectory));
4360        assert!(is_missing_file_error(&is_a_dir));
4361
4362        // ENOTDIR — a path component isn't a directory; CLI does NOT
4363        // swallow this (its catch is narrow to ENOENT/EISDIR), so we must
4364        // propagate it as a real failure.
4365        let not_a_dir = anyhow::Error::new(std::io::Error::from(std::io::ErrorKind::NotADirectory));
4366        assert!(!is_missing_file_error(&not_a_dir));
4367
4368        let permission_denied =
4369            anyhow::Error::new(std::io::Error::from(std::io::ErrorKind::PermissionDenied));
4370        assert!(!is_missing_file_error(&permission_denied));
4371
4372        let other_io = anyhow::Error::new(std::io::Error::from(std::io::ErrorKind::Other));
4373        assert!(!is_missing_file_error(&other_io));
4374
4375        let non_io: anyhow::Error = anyhow::anyhow!("something else");
4376        assert!(!is_missing_file_error(&non_io));
4377    }
4378
4379    #[test]
4380    fn sanitize_compose_project_name_matches_cli_rules() {
4381        use crate::devcontainer_manifest::sanitize_compose_project_name;
4382
4383        // Plain lowercase alnum passes through.
4384        assert_eq!(
4385            sanitize_compose_project_name("project_devcontainer"),
4386            "project_devcontainer"
4387        );
4388        // Hyphens survive (unlike safe_id_lower which would replace them with _).
4389        assert_eq!(
4390            sanitize_compose_project_name("devcontainer-compose-test_devcontainer"),
4391            "devcontainer-compose-test_devcontainer"
4392        );
4393        // Uppercase letters are lowercased.
4394        assert_eq!(
4395            sanitize_compose_project_name("Makermint-Studio_devcontainer"),
4396            "makermint-studio_devcontainer"
4397        );
4398        // Characters outside [-_a-z0-9] are stripped.
4399        assert_eq!(
4400            sanitize_compose_project_name("Rust & PostgreSQL_devcontainer"),
4401            "rustpostgresql_devcontainer"
4402        );
4403    }
4404
4405    #[test]
4406    fn test_resolve_compose_dockerfile() {
4407        let compose = Path::new("/project/.devcontainer/docker-compose.yml");
4408
4409        // Bug case (#53473): context ".." with relative dockerfile
4410        assert_eq!(
4411            resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
4412            Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
4413        );
4414
4415        // Compose path containing ".." (as docker_compose_manifest() produces)
4416        assert_eq!(
4417            resolve_compose_dockerfile(
4418                Path::new("/project/.devcontainer/../docker-compose.yml"),
4419                Some("."),
4420                "docker/Dockerfile",
4421            ),
4422            Some(PathBuf::from("/project/docker/Dockerfile")),
4423        );
4424
4425        // Absolute dockerfile returned as-is
4426        assert_eq!(
4427            resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
4428            Some(PathBuf::from("/absolute/Dockerfile")),
4429        );
4430
4431        // Absolute context used directly
4432        assert_eq!(
4433            resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
4434            Some(PathBuf::from("/abs/context/Dockerfile")),
4435        );
4436
4437        // No context defaults to compose file's directory
4438        assert_eq!(
4439            resolve_compose_dockerfile(compose, None, "Dockerfile"),
4440            Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
4441        );
4442    }
4443
4444    #[gpui::test]
4445    async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
4446        cx.executor().allow_parking();
4447        env_logger::try_init().ok();
4448
4449        let given_devcontainer_contents = r#"
4450            {
4451              "name": "Test",
4452              "dockerComposeFile": "docker-compose-context-parent.yml",
4453              "service": "app",
4454              "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
4455            }
4456            "#;
4457        let (_, mut devcontainer_manifest) =
4458            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4459                .await
4460                .unwrap();
4461
4462        devcontainer_manifest.parse_nonremote_vars().unwrap();
4463
4464        let expected = PathBuf::from(TEST_PROJECT_PATH)
4465            .join(".devcontainer")
4466            .join("Dockerfile");
4467        assert_eq!(
4468            devcontainer_manifest.dockerfile_location().await,
4469            Some(expected)
4470        );
4471    }
4472
4473    #[gpui::test]
4474    async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
4475        cx: &mut TestAppContext,
4476    ) {
4477        cx.executor().allow_parking();
4478        env_logger::try_init().ok();
4479        let given_devcontainer_contents = r#"
4480        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4481        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4482        {
4483          "features": {
4484            "ghcr.io/devcontainers/features/aws-cli:1": {},
4485            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4486          },
4487          "name": "Rust and PostgreSQL",
4488          "dockerComposeFile": "docker-compose.yml",
4489          "service": "app",
4490          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4491
4492          // Features to add to the dev container. More info: https://containers.dev/features.
4493          // "features": {},
4494
4495          // Use 'forwardPorts' to make a list of ports inside the container available locally.
4496          "forwardPorts": [
4497            8083,
4498            "db:5432",
4499            "db:1234",
4500          ],
4501          "updateRemoteUserUID": false,
4502          "appPort": "8084",
4503
4504          // Use 'postCreateCommand' to run commands after the container is created.
4505          // "postCreateCommand": "rustc --version",
4506
4507          // Configure tool-specific properties.
4508          // "customizations": {},
4509
4510          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4511          // "remoteUser": "root"
4512        }
4513        "#;
4514        let (test_dependencies, mut devcontainer_manifest) =
4515            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4516                .await
4517                .unwrap();
4518
4519        test_dependencies
4520        .fs
4521        .atomic_write(
4522            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4523            r#"
4524version: '3.8'
4525
4526volumes:
4527postgres-data:
4528
4529services:
4530app:
4531    build:
4532        context: .
4533        dockerfile: Dockerfile
4534    env_file:
4535        # Ensure that the variables in .env match the same variables in devcontainer.json
4536        - .env
4537
4538    volumes:
4539        - ../..:/workspaces:cached
4540
4541    # Overrides default command so things don't shut down after the process ends.
4542    command: sleep infinity
4543
4544    # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4545    network_mode: service:db
4546
4547    # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4548    # (Adding the "ports" property to this file will not forward from a Codespace.)
4549
4550db:
4551    image: postgres:14.1
4552    restart: unless-stopped
4553    volumes:
4554        - postgres-data:/var/lib/postgresql/data
4555    env_file:
4556        # Ensure that the variables in .env match the same variables in devcontainer.json
4557        - .env
4558
4559    # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4560    # (Adding the "ports" property to this file will not forward from a Codespace.)
4561                "#.trim().to_string(),
4562        )
4563        .await
4564        .unwrap();
4565
4566        test_dependencies.fs.atomic_write(
4567        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4568        r#"
4569FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4570
4571# Include lld linker to improve build times either by using environment variable
4572# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4573RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4574&& apt-get -y install clang lld \
4575&& apt-get autoremove -y && apt-get clean -y
4576        "#.trim().to_string()).await.unwrap();
4577
4578        devcontainer_manifest.parse_nonremote_vars().unwrap();
4579
4580        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4581
4582        let files = test_dependencies.fs.files();
4583        let feature_dockerfile = files
4584            .iter()
4585            .find(|f| {
4586                f.file_name()
4587                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4588            })
4589            .expect("to be found");
4590        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4591        assert_eq!(
4592            &feature_dockerfile,
4593            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4594
4595FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4596
4597# Include lld linker to improve build times either by using environment variable
4598# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4599RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4600&& apt-get -y install clang lld \
4601&& apt-get autoremove -y && apt-get clean -y
4602
4603FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4604USER root
4605COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4606RUN chmod -R 0755 /tmp/build-features/
4607
4608FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4609
4610USER root
4611
4612RUN mkdir -p /tmp/dev-container-features
4613COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4614
4615RUN \
4616echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4617echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4618
4619
4620RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
4621cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
4622&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4623&& cd /tmp/dev-container-features/aws-cli_0 \
4624&& chmod +x ./devcontainer-features-install.sh \
4625&& ./devcontainer-features-install.sh \
4626&& rm -rf /tmp/dev-container-features/aws-cli_0
4627
4628RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
4629cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
4630&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4631&& cd /tmp/dev-container-features/docker-in-docker_1 \
4632&& chmod +x ./devcontainer-features-install.sh \
4633&& ./devcontainer-features-install.sh \
4634&& rm -rf /tmp/dev-container-features/docker-in-docker_1
4635
4636
4637ARG _DEV_CONTAINERS_IMAGE_USER=root
4638USER $_DEV_CONTAINERS_IMAGE_USER
4639
4640# Ensure that /etc/profile does not clobber the existing path
4641RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4642
4643
4644ENV DOCKER_BUILDKIT=1
4645"#
4646        );
4647    }
4648
4649    #[cfg(not(target_os = "windows"))]
4650    #[gpui::test]
4651    async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
4652        cx.executor().allow_parking();
4653        env_logger::try_init().ok();
4654        let given_devcontainer_contents = r#"
4655        // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4656        // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4657        {
4658          "features": {
4659            "ghcr.io/devcontainers/features/aws-cli:1": {},
4660            "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4661          },
4662          "name": "Rust and PostgreSQL",
4663          "dockerComposeFile": "docker-compose.yml",
4664          "service": "app",
4665          "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4666
4667          // Features to add to the dev container. More info: https://containers.dev/features.
4668          // "features": {},
4669
4670          // Use 'forwardPorts' to make a list of ports inside the container available locally.
4671          // "forwardPorts": [5432],
4672
4673          // Use 'postCreateCommand' to run commands after the container is created.
4674          // "postCreateCommand": "rustc --version",
4675
4676          // Configure tool-specific properties.
4677          // "customizations": {},
4678
4679          // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4680          // "remoteUser": "root"
4681        }
4682        "#;
4683        let mut fake_docker = FakeDocker::new();
4684        fake_docker.set_podman(true);
4685        let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4686            cx,
4687            FakeFs::new(cx.executor()),
4688            fake_http_client(),
4689            Arc::new(fake_docker),
4690            Arc::new(TestCommandRunner::new()),
4691            HashMap::new(),
4692            given_devcontainer_contents,
4693        )
4694        .await
4695        .unwrap();
4696
4697        test_dependencies
4698        .fs
4699        .atomic_write(
4700            PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4701            r#"
4702version: '3.8'
4703
4704volumes:
4705postgres-data:
4706
4707services:
4708app:
4709build:
4710    context: .
4711    dockerfile: Dockerfile
4712env_file:
4713    # Ensure that the variables in .env match the same variables in devcontainer.json
4714    - .env
4715
4716volumes:
4717    - ../..:/workspaces:cached
4718
4719# Overrides default command so things don't shut down after the process ends.
4720command: sleep infinity
4721
4722# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4723network_mode: service:db
4724
4725# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4726# (Adding the "ports" property to this file will not forward from a Codespace.)
4727
4728db:
4729image: postgres:14.1
4730restart: unless-stopped
4731volumes:
4732    - postgres-data:/var/lib/postgresql/data
4733env_file:
4734    # Ensure that the variables in .env match the same variables in devcontainer.json
4735    - .env
4736
4737# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4738# (Adding the "ports" property to this file will not forward from a Codespace.)
4739                "#.trim().to_string(),
4740        )
4741        .await
4742        .unwrap();
4743
4744        test_dependencies.fs.atomic_write(
4745        PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4746        r#"
4747FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4748
4749# Include lld linker to improve build times either by using environment variable
4750# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4751RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4752&& apt-get -y install clang lld \
4753&& apt-get autoremove -y && apt-get clean -y
4754        "#.trim().to_string()).await.unwrap();
4755
4756        devcontainer_manifest.parse_nonremote_vars().unwrap();
4757
4758        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4759
4760        let files = test_dependencies.fs.files();
4761
4762        let feature_dockerfile = files
4763            .iter()
4764            .find(|f| {
4765                f.file_name()
4766                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4767            })
4768            .expect("to be found");
4769        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4770        assert_eq!(
4771            &feature_dockerfile,
4772            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4773
4774FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4775
4776# Include lld linker to improve build times either by using environment variable
4777# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4778RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4779&& apt-get -y install clang lld \
4780&& apt-get autoremove -y && apt-get clean -y
4781
4782FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4783
4784FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4785USER root
4786COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4787RUN chmod -R 0755 /tmp/build-features/
4788
4789FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4790
4791USER root
4792
4793RUN mkdir -p /tmp/dev-container-features
4794COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4795
4796RUN \
4797echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4798echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4799
4800
4801COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4802RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4803&& cd /tmp/dev-container-features/aws-cli_0 \
4804&& chmod +x ./devcontainer-features-install.sh \
4805&& ./devcontainer-features-install.sh
4806
4807COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4808RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4809&& cd /tmp/dev-container-features/docker-in-docker_1 \
4810&& chmod +x ./devcontainer-features-install.sh \
4811&& ./devcontainer-features-install.sh
4812
4813
4814ARG _DEV_CONTAINERS_IMAGE_USER=root
4815USER $_DEV_CONTAINERS_IMAGE_USER
4816"#
4817        );
4818
4819        let uid_dockerfile = files
4820            .iter()
4821            .find(|f| {
4822                f.file_name()
4823                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4824            })
4825            .expect("to be found");
4826        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4827
4828        assert_eq!(
4829            &uid_dockerfile,
4830            r#"ARG BASE_IMAGE
4831FROM $BASE_IMAGE
4832
4833USER root
4834
4835ARG REMOTE_USER
4836ARG NEW_UID
4837ARG NEW_GID
4838SHELL ["/bin/sh", "-c"]
4839RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4840	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4841	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4842	if [ -z "$OLD_UID" ]; then \
4843		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4844	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4845		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4846	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4847		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4848	else \
4849		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4850			FREE_GID=65532; \
4851			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4852			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4853			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4854		fi; \
4855		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4856		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4857		if [ "$OLD_GID" != "$NEW_GID" ]; then \
4858			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4859		fi; \
4860		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4861	fi;
4862
4863ARG IMAGE_USER
4864USER $IMAGE_USER
4865
4866# Ensure that /etc/profile does not clobber the existing path
4867RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4868
4869
4870ENV DOCKER_BUILDKIT=1
4871"#
4872        );
4873    }
4874
4875    #[gpui::test]
4876    async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4877        cx.executor().allow_parking();
4878        env_logger::try_init().ok();
4879        let given_devcontainer_contents = r#"
4880            /*---------------------------------------------------------------------------------------------
4881             *  Copyright (c) Microsoft Corporation. All rights reserved.
4882             *  Licensed under the MIT License. See License.txt in the project root for license information.
4883             *--------------------------------------------------------------------------------------------*/
4884            {
4885              "name": "cli-${devcontainerId}",
4886              // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4887              "build": {
4888                "dockerfile": "Dockerfile",
4889                "args": {
4890                  "VARIANT": "18-bookworm",
4891                  "FOO": "bar",
4892                },
4893                "target": "development",
4894              },
4895              "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4896              "workspaceFolder": "/workspace2",
4897              "mounts": [
4898                // Keep command history across instances
4899                "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4900              ],
4901
4902              "forwardPorts": [
4903                8082,
4904                8083,
4905              ],
4906              "appPort": "8084",
4907              "updateRemoteUserUID": false,
4908
4909              "containerEnv": {
4910                "VARIABLE_VALUE": "value",
4911              },
4912
4913              "initializeCommand": "touch IAM.md",
4914
4915              "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4916
4917              "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4918
4919              "postCreateCommand": {
4920                "yarn": "yarn install",
4921                "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4922              },
4923
4924              "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4925
4926              "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4927
4928              "remoteUser": "node",
4929
4930              "remoteEnv": {
4931                "PATH": "${containerEnv:PATH}:/some/other/path",
4932                "OTHER_ENV": "other_env_value"
4933              },
4934
4935              "features": {
4936                "ghcr.io/devcontainers/features/docker-in-docker:2": {
4937                  "moby": false,
4938                },
4939                "ghcr.io/devcontainers/features/go:1": {},
4940              },
4941
4942              "customizations": {
4943                "vscode": {
4944                  "extensions": [
4945                    "dbaeumer.vscode-eslint",
4946                    "GitHub.vscode-pull-request-github",
4947                  ],
4948                },
4949                "zed": {
4950                  "extensions": ["vue", "ruby"],
4951                },
4952                "codespaces": {
4953                  "repositories": {
4954                    "devcontainers/features": {
4955                      "permissions": {
4956                        "contents": "write",
4957                        "workflows": "write",
4958                      },
4959                    },
4960                  },
4961                },
4962              },
4963            }
4964            "#;
4965
4966        let (test_dependencies, mut devcontainer_manifest) =
4967            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4968                .await
4969                .unwrap();
4970
4971        test_dependencies
4972            .fs
4973            .atomic_write(
4974                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4975                r#"
4976#  Copyright (c) Microsoft Corporation. All rights reserved.
4977#  Licensed under the MIT License. See License.txt in the project root for license information.
4978ARG VARIANT="16-bullseye"
4979FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4980FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4981
4982RUN mkdir -p /workspaces && chown node:node /workspaces
4983
4984ARG USERNAME=node
4985USER $USERNAME
4986
4987# Save command line history
4988RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4989&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4990&& mkdir -p /home/$USERNAME/commandhistory \
4991&& touch /home/$USERNAME/commandhistory/.bash_history \
4992&& chown -R $USERNAME /home/$USERNAME/commandhistory
4993                    "#.trim().to_string(),
4994            )
4995            .await
4996            .unwrap();
4997
4998        devcontainer_manifest.parse_nonremote_vars().unwrap();
4999
5000        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
5001
5002        assert_eq!(
5003            devcontainer_up.extension_ids,
5004            vec!["vue".to_string(), "ruby".to_string()]
5005        );
5006
5007        let files = test_dependencies.fs.files();
5008        let feature_dockerfile = files
5009            .iter()
5010            .find(|f| {
5011                f.file_name()
5012                    .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
5013            })
5014            .expect("to be found");
5015        let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
5016        assert_eq!(
5017            &feature_dockerfile,
5018            r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
5019
5020#  Copyright (c) Microsoft Corporation. All rights reserved.
5021#  Licensed under the MIT License. See License.txt in the project root for license information.
5022ARG VARIANT="16-bullseye"
5023FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
5024FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
5025
5026RUN mkdir -p /workspaces && chown node:node /workspaces
5027
5028ARG USERNAME=node
5029USER $USERNAME
5030
5031# Save command line history
5032RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
5033&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
5034&& mkdir -p /home/$USERNAME/commandhistory \
5035&& touch /home/$USERNAME/commandhistory/.bash_history \
5036&& chown -R $USERNAME /home/$USERNAME/commandhistory
5037FROM development AS dev_container_auto_added_stage_label
5038
5039FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
5040USER root
5041COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
5042RUN chmod -R 0755 /tmp/build-features/
5043
5044FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
5045
5046USER root
5047
5048RUN mkdir -p /tmp/dev-container-features
5049COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
5050
5051RUN \
5052echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
5053echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
5054
5055
5056RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
5057cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
5058&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
5059&& cd /tmp/dev-container-features/docker-in-docker_0 \
5060&& chmod +x ./devcontainer-features-install.sh \
5061&& ./devcontainer-features-install.sh \
5062&& rm -rf /tmp/dev-container-features/docker-in-docker_0
5063
5064RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
5065cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
5066&& chmod -R 0755 /tmp/dev-container-features/go_1 \
5067&& cd /tmp/dev-container-features/go_1 \
5068&& chmod +x ./devcontainer-features-install.sh \
5069&& ./devcontainer-features-install.sh \
5070&& rm -rf /tmp/dev-container-features/go_1
5071
5072
5073ARG _DEV_CONTAINERS_IMAGE_USER=root
5074USER $_DEV_CONTAINERS_IMAGE_USER
5075
5076# Ensure that /etc/profile does not clobber the existing path
5077RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
5078
5079ENV DOCKER_BUILDKIT=1
5080
5081ENV GOPATH=/go
5082ENV GOROOT=/usr/local/go
5083ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
5084ENV VARIABLE_VALUE=value
5085"#
5086        );
5087
5088        let golang_install_wrapper = files
5089            .iter()
5090            .find(|f| {
5091                f.file_name()
5092                    .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
5093                    && f.to_str().is_some_and(|s| s.contains("go_"))
5094            })
5095            .expect("to be found");
5096        let golang_install_wrapper = test_dependencies
5097            .fs
5098            .load(golang_install_wrapper)
5099            .await
5100            .unwrap();
5101        assert_eq!(
5102            &golang_install_wrapper,
5103            r#"#!/bin/sh
5104set -e
5105
5106on_exit () {
5107    [ $? -eq 0 ] && exit
5108    echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
5109}
5110
5111trap on_exit EXIT
5112
5113echo ===========================================================================
5114echo 'Feature       : go'
5115echo 'Id            : ghcr.io/devcontainers/features/go:1'
5116echo 'Options       :'
5117echo '    GOLANGCILINTVERSION=latest
5118    VERSION=latest'
5119echo ===========================================================================
5120
5121set -a
5122. ../devcontainer-features.builtin.env
5123. ./devcontainer-features.env
5124set +a
5125
5126chmod +x ./install.sh
5127./install.sh
5128"#
5129        );
5130
5131        let docker_commands = test_dependencies
5132            .command_runner
5133            .commands_by_program("docker");
5134
5135        let docker_run_command = docker_commands
5136            .iter()
5137            .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
5138
5139        assert!(docker_run_command.is_some());
5140
5141        let docker_exec_commands = test_dependencies
5142            .docker
5143            .exec_commands_recorded
5144            .lock()
5145            .unwrap();
5146
5147        assert!(docker_exec_commands.iter().all(|exec| {
5148            exec.env
5149                == HashMap::from([
5150                    ("OTHER_ENV".to_string(), "other_env_value".to_string()),
5151                    (
5152                        "PATH".to_string(),
5153                        "/initial/path:/some/other/path".to_string(),
5154                    ),
5155                ])
5156        }))
5157    }
5158
5159    #[cfg(not(target_os = "windows"))]
5160    #[gpui::test]
5161    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
5162        cx.executor().allow_parking();
5163        env_logger::try_init().ok();
5164        let given_devcontainer_contents = r#"
5165            {
5166              "name": "cli-${devcontainerId}",
5167              "image": "test_image:latest",
5168            }
5169            "#;
5170
5171        let (test_dependencies, mut devcontainer_manifest) =
5172            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5173                .await
5174                .unwrap();
5175
5176        devcontainer_manifest.parse_nonremote_vars().unwrap();
5177
5178        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
5179
5180        let files = test_dependencies.fs.files();
5181        let uid_dockerfile = files
5182            .iter()
5183            .find(|f| {
5184                f.file_name()
5185                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
5186            })
5187            .expect("to be found");
5188        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
5189
5190        assert_eq!(
5191            &uid_dockerfile,
5192            r#"ARG BASE_IMAGE
5193FROM $BASE_IMAGE
5194
5195USER root
5196
5197ARG REMOTE_USER
5198ARG NEW_UID
5199ARG NEW_GID
5200SHELL ["/bin/sh", "-c"]
5201RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
5202	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
5203	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
5204	if [ -z "$OLD_UID" ]; then \
5205		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
5206	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
5207		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
5208	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
5209		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
5210	else \
5211		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
5212			FREE_GID=65532; \
5213			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
5214			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
5215			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
5216		fi; \
5217		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
5218		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
5219		if [ "$OLD_GID" != "$NEW_GID" ]; then \
5220			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
5221		fi; \
5222		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
5223	fi;
5224
5225ARG IMAGE_USER
5226USER $IMAGE_USER
5227
5228# Ensure that /etc/profile does not clobber the existing path
5229RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
5230"#
5231        );
5232    }
5233
5234    #[cfg(target_os = "windows")]
5235    #[gpui::test]
5236    async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
5237        cx.executor().allow_parking();
5238        env_logger::try_init().ok();
5239        let given_devcontainer_contents = r#"
5240            {
5241              "name": "cli-${devcontainerId}",
5242              "image": "test_image:latest",
5243            }
5244            "#;
5245
5246        let (_, mut devcontainer_manifest) =
5247            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5248                .await
5249                .unwrap();
5250
5251        devcontainer_manifest.parse_nonremote_vars().unwrap();
5252
5253        let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
5254
5255        assert_eq!(
5256            devcontainer_up.remote_workspace_folder,
5257            "/workspaces/project"
5258        );
5259    }
5260
5261    #[cfg(not(target_os = "windows"))]
5262    #[gpui::test]
5263    async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
5264        cx.executor().allow_parking();
5265        env_logger::try_init().ok();
5266        let given_devcontainer_contents = r#"
5267            {
5268              "name": "cli-${devcontainerId}",
5269              "dockerComposeFile": "docker-compose-plain.yml",
5270              "service": "app",
5271            }
5272            "#;
5273
5274        let (test_dependencies, mut devcontainer_manifest) =
5275            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5276                .await
5277                .unwrap();
5278
5279        test_dependencies
5280            .fs
5281            .atomic_write(
5282                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
5283                r#"
5284services:
5285    app:
5286        image: test_image:latest
5287        command: sleep infinity
5288        volumes:
5289            - ..:/workspace:cached
5290                "#
5291                .trim()
5292                .to_string(),
5293            )
5294            .await
5295            .unwrap();
5296
5297        devcontainer_manifest.parse_nonremote_vars().unwrap();
5298
5299        let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
5300
5301        let files = test_dependencies.fs.files();
5302        let uid_dockerfile = files
5303            .iter()
5304            .find(|f| {
5305                f.file_name()
5306                    .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
5307            })
5308            .expect("to be found");
5309        let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
5310
5311        assert_eq!(
5312            &uid_dockerfile,
5313            r#"ARG BASE_IMAGE
5314FROM $BASE_IMAGE
5315
5316USER root
5317
5318ARG REMOTE_USER
5319ARG NEW_UID
5320ARG NEW_GID
5321SHELL ["/bin/sh", "-c"]
5322RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
5323	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
5324	eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
5325	if [ -z "$OLD_UID" ]; then \
5326		echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
5327	elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
5328		echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
5329	elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
5330		echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
5331	else \
5332		if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
5333			FREE_GID=65532; \
5334			while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
5335			echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
5336			sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
5337		fi; \
5338		echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
5339		sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
5340		if [ "$OLD_GID" != "$NEW_GID" ]; then \
5341			sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
5342		fi; \
5343		chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
5344	fi;
5345
5346ARG IMAGE_USER
5347USER $IMAGE_USER
5348
5349# Ensure that /etc/profile does not clobber the existing path
5350RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
5351"#
5352        );
5353    }
5354
5355    #[gpui::test]
5356    async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
5357        cx.executor().allow_parking();
5358        env_logger::try_init().ok();
5359        let given_devcontainer_contents = r#"
5360            {
5361              "name": "cli-${devcontainerId}",
5362              "build": {
5363                "dockerfile": "Dockerfile",
5364                "args": {
5365                    "VERSION": "1.22",
5366                }
5367              },
5368            }
5369            "#;
5370
5371        let (test_dependencies, mut devcontainer_manifest) =
5372            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5373                .await
5374                .unwrap();
5375
5376        test_dependencies
5377            .fs
5378            .atomic_write(
5379                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5380                r#"
5381FROM dontgrabme as build_context
5382ARG VERSION=1.21
5383ARG REPOSITORY=mybuild
5384ARG REGISTRY=docker.io/stuff
5385
5386ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
5387
5388FROM ${IMAGE} AS devcontainer
5389                    "#
5390                .trim()
5391                .to_string(),
5392            )
5393            .await
5394            .unwrap();
5395
5396        devcontainer_manifest.parse_nonremote_vars().unwrap();
5397
5398        let dockerfile_contents = devcontainer_manifest
5399            .expanded_dockerfile_content()
5400            .await
5401            .unwrap();
5402        let base_image = image_from_dockerfile(
5403            dockerfile_contents,
5404            &devcontainer_manifest
5405                .dev_container()
5406                .build
5407                .as_ref()
5408                .and_then(|b| b.target.clone()),
5409        )
5410        .unwrap();
5411
5412        assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
5413    }
5414
5415    #[gpui::test]
5416    async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
5417        cx.executor().allow_parking();
5418        env_logger::try_init().ok();
5419        let given_devcontainer_contents = r#"
5420            {
5421              "name": "cli-${devcontainerId}",
5422              "build": {
5423                "dockerfile": "Dockerfile",
5424                "args": {
5425                    "VERSION": "1.22",
5426                },
5427                "target": "development"
5428              },
5429            }
5430            "#;
5431
5432        let (test_dependencies, mut devcontainer_manifest) =
5433            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5434                .await
5435                .unwrap();
5436
5437        test_dependencies
5438            .fs
5439            .atomic_write(
5440                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5441                r#"
5442FROM dontgrabme as build_context
5443ARG VERSION=1.21
5444ARG REPOSITORY=mybuild
5445ARG REGISTRY=docker.io/stuff
5446
5447ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
5448ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
5449
5450FROM ${DEV_IMAGE} AS development
5451FROM ${IMAGE} AS production
5452                    "#
5453                .trim()
5454                .to_string(),
5455            )
5456            .await
5457            .unwrap();
5458
5459        devcontainer_manifest.parse_nonremote_vars().unwrap();
5460
5461        let dockerfile_contents = devcontainer_manifest
5462            .expanded_dockerfile_content()
5463            .await
5464            .unwrap();
5465        let base_image = image_from_dockerfile(
5466            dockerfile_contents,
5467            &devcontainer_manifest
5468                .dev_container()
5469                .build
5470                .as_ref()
5471                .and_then(|b| b.target.clone()),
5472        )
5473        .unwrap();
5474
5475        assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
5476    }
5477
5478    #[gpui::test]
5479    async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
5480        cx.executor().allow_parking();
5481        env_logger::try_init().ok();
5482        let given_devcontainer_contents = r#"
5483            {
5484              "name": "cli-${devcontainerId}",
5485              "build": {
5486                "dockerfile": "Dockerfile",
5487                "args": {
5488                    "JSON_ARG": "some-value",
5489                    "ELIXIR_VERSION": "1.21",
5490                }
5491              },
5492            }
5493            "#;
5494
5495        let (test_dependencies, mut devcontainer_manifest) =
5496            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5497                .await
5498                .unwrap();
5499
5500        test_dependencies
5501            .fs
5502            .atomic_write(
5503                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5504                r#"
5505ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5506ARG ELIXIR_VERSION=1.20.0-rc.4
5507ARG FOO=foo BAR=bar
5508ARG FOOBAR=${FOO}${BAR}
5509ARG OTP_VERSION=28.4.1
5510ARG DEBIAN_VERSION=trixie-20260316-slim
5511ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
5512ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5513ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
5514ARG FROM_JSON=${JSON_ARG}
5515
5516FROM ${IMAGE} AS devcontainer
5517                    "#
5518                .trim()
5519                .to_string(),
5520            )
5521            .await
5522            .unwrap();
5523
5524        devcontainer_manifest.parse_nonremote_vars().unwrap();
5525
5526        let expanded_dockerfile = devcontainer_manifest
5527            .expanded_dockerfile_content()
5528            .await
5529            .unwrap();
5530
5531        assert_eq!(
5532            &expanded_dockerfile,
5533            r#"
5534ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5535ARG ELIXIR_VERSION=1.20.0-rc.4
5536ARG FOO=foo BAR=bar
5537ARG FOOBAR=foobar
5538ARG OTP_VERSION=28.4.1
5539ARG DEBIAN_VERSION=trixie-20260316-slim
5540ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
5541ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5542ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
5543ARG FROM_JSON=some-value
5544
5545FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
5546            "#
5547            .trim()
5548        )
5549    }
5550
5551    #[gpui::test]
5552    async fn test_expands_compose_service_args_in_dockerfile(cx: &mut TestAppContext) {
5553        cx.executor().allow_parking();
5554        env_logger::try_init().ok();
5555
5556        let given_devcontainer_contents = r#"
5557            {
5558              "dockerComposeFile": "docker-compose-with-args.yml",
5559              "service": "app",
5560            }
5561            "#;
5562
5563        let (test_dependencies, mut devcontainer_manifest) =
5564            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5565                .await
5566                .unwrap();
5567
5568        test_dependencies
5569            .fs
5570            .atomic_write(
5571                PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5572                "FROM ${BASE_IMAGE}\nUSER root\n".to_string(),
5573            )
5574            .await
5575            .unwrap();
5576
5577        devcontainer_manifest.parse_nonremote_vars().unwrap();
5578
5579        let expanded = devcontainer_manifest
5580            .expanded_dockerfile_content()
5581            .await
5582            .unwrap();
5583
5584        assert_eq!(expanded, "FROM test_image:latest\nUSER root");
5585
5586        let base_image =
5587            image_from_dockerfile(expanded, &None).expect("base image resolves from compose args");
5588        assert_eq!(base_image, "test_image:latest");
5589    }
5590
5591    #[cfg(not(target_os = "windows"))]
5592    #[gpui::test]
5593    async fn check_for_existing_container_errors_when_multiple_match(cx: &mut TestAppContext) {
5594        cx.executor().allow_parking();
5595        let (test_dependencies, devcontainer_manifest) =
5596            init_default_devcontainer_manifest(cx, r#"{"image": "image"}"#)
5597                .await
5598                .unwrap();
5599        test_dependencies
5600            .docker
5601            .set_duplicate_container_ids(vec!["abc123".to_string(), "def456".to_string()]);
5602
5603        let result = devcontainer_manifest
5604            .check_for_existing_devcontainer()
5605            .await;
5606
5607        let Err(DevContainerError::MultipleMatchingContainers(ids)) = result else {
5608            panic!("expected MultipleMatchingContainers, got {result:?}");
5609        };
5610        assert_eq!(ids, vec!["abc123".to_string(), "def456".to_string()]);
5611    }
5612
5613    #[gpui::test]
5614    async fn trim_non_alphanumeric_chars_from_image_tag(cx: &mut TestAppContext) {
5615        cx.executor().allow_parking();
5616        env_logger::try_init().ok();
5617        let given_devcontainer_contents = r#"
5618            {
5619              "name": "abcde test",
5620              "image": "test_image:latest",
5621            }
5622            "#;
5623
5624        let (_, devcontainer_manifest) =
5625            init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5626                .await
5627                .unwrap();
5628
5629        let image_tag = devcontainer_manifest.generate_features_image_tag("Dockerfile".to_string());
5630
5631        assert!(
5632            image_tag.starts_with("abcde-"),
5633            "expected prefix 'abcde-', got: {image_tag}"
5634        );
5635        assert!(
5636            image_tag.ends_with("-features"),
5637            "expected suffix '-features', got: {image_tag}"
5638        );
5639    }
5640
5641    #[test]
5642    fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
5643
5644    #[test]
5645    fn test_aliases_dockerfile_with_no_aliases_for_build() {}
5646
5647    #[test]
5648    fn test_aliases_dockerfile_with_build_target_specified() {}
5649
5650    pub(crate) struct RecordedExecCommand {
5651        pub(crate) _container_id: String,
5652        pub(crate) _remote_folder: String,
5653        pub(crate) _user: String,
5654        pub(crate) env: HashMap<String, String>,
5655        pub(crate) _inner_command: Command,
5656    }
5657
5658    pub(crate) struct FakeDocker {
5659        exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
5660        podman: bool,
5661        has_buildx: bool,
5662        /// When `Some`, `find_process_by_filters` returns
5663        /// `MultipleMatchingContainers` with these IDs. Used to exercise the
5664        /// duplicate-container error path.
5665        duplicate_container_ids: Mutex<Option<Vec<String>>>,
5666    }
5667
5668    impl FakeDocker {
5669        pub(crate) fn new() -> Self {
5670            Self {
5671                podman: false,
5672                has_buildx: true,
5673                exec_commands_recorded: Mutex::new(Vec::new()),
5674                duplicate_container_ids: Mutex::new(None),
5675            }
5676        }
5677        #[cfg(not(target_os = "windows"))]
5678        fn set_podman(&mut self, podman: bool) {
5679            self.podman = podman;
5680        }
5681        #[cfg(not(target_os = "windows"))]
5682        fn set_duplicate_container_ids(&self, ids: Vec<String>) {
5683            *self
5684                .duplicate_container_ids
5685                .lock()
5686                .expect("should be available") = Some(ids);
5687        }
5688    }
5689
5690    #[async_trait]
5691    impl DockerClient for FakeDocker {
5692        async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
5693            if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
5694                return Ok(DockerInspect {
5695                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5696                        .to_string(),
5697                    config: DockerInspectConfig {
5698                        labels: DockerConfigLabels {
5699                            metadata: Some(vec![HashMap::from([(
5700                                "remoteUser".to_string(),
5701                                Value::String("node".to_string()),
5702                            )])]),
5703                        },
5704                        env: Vec::new(),
5705                        image_user: Some("root".to_string()),
5706                    },
5707                    mounts: None,
5708                    state: None,
5709                });
5710            }
5711            if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
5712                return Ok(DockerInspect {
5713                    id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
5714                        .to_string(),
5715                    config: DockerInspectConfig {
5716                        labels: DockerConfigLabels {
5717                            metadata: Some(vec![HashMap::from([(
5718                                "remoteUser".to_string(),
5719                                Value::String("vscode".to_string()),
5720                            )])]),
5721                        },
5722                        image_user: Some("root".to_string()),
5723                        env: Vec::new(),
5724                    },
5725                    mounts: None,
5726                    state: None,
5727                });
5728            }
5729            if id.starts_with("cli_") {
5730                return Ok(DockerInspect {
5731                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5732                        .to_string(),
5733                    config: DockerInspectConfig {
5734                        labels: DockerConfigLabels {
5735                            metadata: Some(vec![HashMap::from([(
5736                                "remoteUser".to_string(),
5737                                Value::String("node".to_string()),
5738                            )])]),
5739                        },
5740                        image_user: Some("root".to_string()),
5741                        env: vec!["PATH=/initial/path".to_string()],
5742                    },
5743                    mounts: None,
5744                    state: None,
5745                });
5746            }
5747            if id == "found_docker_ps" {
5748                return Ok(DockerInspect {
5749                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5750                        .to_string(),
5751                    config: DockerInspectConfig {
5752                        labels: DockerConfigLabels {
5753                            metadata: Some(vec![HashMap::from([(
5754                                "remoteUser".to_string(),
5755                                Value::String("node".to_string()),
5756                            )])]),
5757                        },
5758                        image_user: Some("root".to_string()),
5759                        env: vec!["PATH=/initial/path".to_string()],
5760                    },
5761                    mounts: Some(vec![DockerInspectMount {
5762                        source: "/path/to/local/project".to_string(),
5763                        destination: "/workspaces/project".to_string(),
5764                    }]),
5765                    state: None,
5766                });
5767            }
5768            if id.starts_with("rust_a-") {
5769                return Ok(DockerInspect {
5770                    id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
5771                        .to_string(),
5772                    config: DockerInspectConfig {
5773                        labels: DockerConfigLabels {
5774                            metadata: Some(vec![HashMap::from([(
5775                                "remoteUser".to_string(),
5776                                Value::String("vscode".to_string()),
5777                            )])]),
5778                        },
5779                        image_user: Some("root".to_string()),
5780                        env: Vec::new(),
5781                    },
5782                    mounts: None,
5783                    state: None,
5784                });
5785            }
5786            if id == "test_image:latest" {
5787                return Ok(DockerInspect {
5788                    id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5789                        .to_string(),
5790                    config: DockerInspectConfig {
5791                        labels: DockerConfigLabels {
5792                            metadata: Some(vec![HashMap::from([(
5793                                "remoteUser".to_string(),
5794                                Value::String("node".to_string()),
5795                            )])]),
5796                        },
5797                        env: Vec::new(),
5798                        image_user: Some("root".to_string()),
5799                    },
5800                    mounts: None,
5801                    state: None,
5802                });
5803            }
5804
5805            Err(DevContainerError::DockerNotAvailable)
5806        }
5807        async fn get_docker_compose_config(
5808            &self,
5809            config_files: &Vec<PathBuf>,
5810        ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5811            let project_path = PathBuf::from(TEST_PROJECT_PATH);
5812            if config_files.len() == 1
5813                && config_files.get(0)
5814                    == Some(
5815                        &project_path
5816                            .join(".devcontainer")
5817                            .join("docker-compose.yml"),
5818                    )
5819            {
5820                return Ok(Some(DockerComposeConfig {
5821                    name: None,
5822                    services: HashMap::from([
5823                        (
5824                            "app".to_string(),
5825                            DockerComposeService {
5826                                build: Some(DockerComposeServiceBuild {
5827                                    context: Some(".".to_string()),
5828                                    dockerfile: Some("Dockerfile".to_string()),
5829                                    args: None,
5830                                    additional_contexts: None,
5831                                    target: None,
5832                                }),
5833                                volumes: vec![MountDefinition {
5834                                    source: Some("../..".to_string()),
5835                                    target: "/workspaces".to_string(),
5836                                    mount_type: Some("bind".to_string()),
5837                                }],
5838                                network_mode: Some("service:db".to_string()),
5839                                ..Default::default()
5840                            },
5841                        ),
5842                        (
5843                            "db".to_string(),
5844                            DockerComposeService {
5845                                image: Some("postgres:14.1".to_string()),
5846                                volumes: vec![MountDefinition {
5847                                    source: Some("postgres-data".to_string()),
5848                                    target: "/var/lib/postgresql/data".to_string(),
5849                                    mount_type: Some("volume".to_string()),
5850                                }],
5851                                env_file: Some(vec![".env".to_string()]),
5852                                ..Default::default()
5853                            },
5854                        ),
5855                    ]),
5856                    volumes: HashMap::from([(
5857                        "postgres-data".to_string(),
5858                        DockerComposeVolume::default(),
5859                    )]),
5860                }));
5861            }
5862            if config_files.len() == 1
5863                && config_files.get(0)
5864                    == Some(
5865                        &project_path
5866                            .join(".devcontainer")
5867                            .join("docker-compose-context-parent.yml"),
5868                    )
5869            {
5870                return Ok(Some(DockerComposeConfig {
5871                    name: None,
5872                    services: HashMap::from([(
5873                        "app".to_string(),
5874                        DockerComposeService {
5875                            build: Some(DockerComposeServiceBuild {
5876                                context: Some("..".to_string()),
5877                                dockerfile: Some(
5878                                    PathBuf::from(".devcontainer")
5879                                        .join("Dockerfile")
5880                                        .display()
5881                                        .to_string(),
5882                                ),
5883                                args: None,
5884                                additional_contexts: None,
5885                                target: None,
5886                            }),
5887                            ..Default::default()
5888                        },
5889                    )]),
5890                    volumes: HashMap::new(),
5891                }));
5892            }
5893            if config_files.len() == 1
5894                && config_files.get(0)
5895                    == Some(
5896                        &project_path
5897                            .join(".devcontainer")
5898                            .join("docker-compose-with-args.yml"),
5899                    )
5900            {
5901                return Ok(Some(DockerComposeConfig {
5902                    name: None,
5903                    services: HashMap::from([(
5904                        "app".to_string(),
5905                        DockerComposeService {
5906                            build: Some(DockerComposeServiceBuild {
5907                                context: Some(".".to_string()),
5908                                dockerfile: Some("Dockerfile".to_string()),
5909                                args: Some(HashMap::from([(
5910                                    "BASE_IMAGE".to_string(),
5911                                    "test_image:latest".to_string(),
5912                                )])),
5913                                additional_contexts: None,
5914                                target: None,
5915                            }),
5916                            ..Default::default()
5917                        },
5918                    )]),
5919                    ..Default::default()
5920                }));
5921            }
5922            if config_files.len() == 1
5923                && config_files.get(0)
5924                    == Some(
5925                        &project_path
5926                            .join(".devcontainer")
5927                            .join("docker-compose-plain.yml"),
5928                    )
5929            {
5930                return Ok(Some(DockerComposeConfig {
5931                    name: None,
5932                    services: HashMap::from([(
5933                        "app".to_string(),
5934                        DockerComposeService {
5935                            image: Some("test_image:latest".to_string()),
5936                            command: vec!["sleep".to_string(), "infinity".to_string()],
5937                            ..Default::default()
5938                        },
5939                    )]),
5940                    ..Default::default()
5941                }));
5942            }
5943            Err(DevContainerError::DockerNotAvailable)
5944        }
5945        async fn docker_compose_build(
5946            &self,
5947            _config_files: &Vec<PathBuf>,
5948            _project_name: &str,
5949        ) -> Result<(), DevContainerError> {
5950            Ok(())
5951        }
5952        async fn run_docker_exec(
5953            &self,
5954            container_id: &str,
5955            remote_folder: &str,
5956            user: &str,
5957            env: &HashMap<String, String>,
5958            inner_command: Command,
5959        ) -> Result<(), DevContainerError> {
5960            let mut record = self
5961                .exec_commands_recorded
5962                .lock()
5963                .expect("should be available");
5964            record.push(RecordedExecCommand {
5965                _container_id: container_id.to_string(),
5966                _remote_folder: remote_folder.to_string(),
5967                _user: user.to_string(),
5968                env: env.clone(),
5969                _inner_command: inner_command,
5970            });
5971            Ok(())
5972        }
5973        async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5974            Err(DevContainerError::DockerNotAvailable)
5975        }
5976        async fn find_process_by_filters(
5977            &self,
5978            _filters: Vec<String>,
5979        ) -> Result<Option<DockerPs>, DevContainerError> {
5980            if let Some(ids) = self
5981                .duplicate_container_ids
5982                .lock()
5983                .expect("should be available")
5984                .clone()
5985            {
5986                return Err(DevContainerError::MultipleMatchingContainers(ids));
5987            }
5988            Ok(Some(DockerPs {
5989                id: "found_docker_ps".to_string(),
5990            }))
5991        }
5992        fn supports_compose_buildkit(&self) -> bool {
5993            !self.podman && self.has_buildx
5994        }
5995        fn docker_cli(&self) -> String {
5996            if self.podman {
5997                "podman".to_string()
5998            } else {
5999                "docker".to_string()
6000            }
6001        }
6002    }
6003
6004    #[derive(Debug, Clone)]
6005    pub(crate) struct TestCommand {
6006        pub(crate) program: String,
6007        pub(crate) args: Vec<String>,
6008    }
6009
6010    pub(crate) struct TestCommandRunner {
6011        commands_recorded: Mutex<Vec<TestCommand>>,
6012    }
6013
6014    impl TestCommandRunner {
6015        fn new() -> Self {
6016            Self {
6017                commands_recorded: Mutex::new(Vec::new()),
6018            }
6019        }
6020
6021        fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
6022            let record = self.commands_recorded.lock().expect("poisoned");
6023            record
6024                .iter()
6025                .filter(|r| r.program == program)
6026                .map(|r| r.clone())
6027                .collect()
6028        }
6029    }
6030
6031    #[async_trait]
6032    impl CommandRunner for TestCommandRunner {
6033        async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
6034            let mut record = self.commands_recorded.lock().expect("poisoned");
6035
6036            record.push(TestCommand {
6037                program: command.get_program().display().to_string(),
6038                args: command
6039                    .get_args()
6040                    .map(|a| a.display().to_string())
6041                    .collect(),
6042            });
6043
6044            Ok(Output {
6045                status: ExitStatus::default(),
6046                stdout: vec![],
6047                stderr: vec![],
6048            })
6049        }
6050    }
6051
6052    fn fake_http_client() -> Arc<dyn HttpClient> {
6053        FakeHttpClient::create(|request| async move {
6054            let (parts, _body) = request.into_parts();
6055            if parts.uri.path() == "/token" {
6056                let token_response = TokenResponse {
6057                    token: "token".to_string(),
6058                };
6059                return Ok(http::Response::builder()
6060                    .status(200)
6061                    .body(http_client::AsyncBody::from(
6062                        serde_json_lenient::to_string(&token_response).unwrap(),
6063                    ))
6064                    .unwrap());
6065            }
6066
6067            // OCI specific things
6068            if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
6069                let response = r#"
6070                    {
6071                        "schemaVersion": 2,
6072                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
6073                        "config": {
6074                            "mediaType": "application/vnd.devcontainers",
6075                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6076                            "size": 2
6077                        },
6078                        "layers": [
6079                            {
6080                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6081                                "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
6082                                "size": 59392,
6083                                "annotations": {
6084                                    "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
6085                                }
6086                            }
6087                        ],
6088                        "annotations": {
6089                            "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6090                            "com.github.package.type": "devcontainer_feature"
6091                        }
6092                    }
6093                    "#;
6094                return Ok(http::Response::builder()
6095                    .status(200)
6096                    .body(http_client::AsyncBody::from(response))
6097                    .unwrap());
6098            }
6099
6100            if parts.uri.path()
6101                == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
6102            {
6103                let response = build_tarball(vec![
6104                    ("./NOTES.md", r#"
6105                        ## Limitations
6106
6107                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
6108                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
6109                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
6110                          ```
6111                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
6112                          ```
6113                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
6114
6115
6116                        ## OS Support
6117
6118                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
6119
6120                        Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
6121
6122                        `bash` is required to execute the `install.sh` script."#),
6123                    ("./README.md", r#"
6124                        # Docker (Docker-in-Docker) (docker-in-docker)
6125
6126                        Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
6127
6128                        ## Example Usage
6129
6130                        ```json
6131                        "features": {
6132                            "ghcr.io/devcontainers/features/docker-in-docker:2": {}
6133                        }
6134                        ```
6135
6136                        ## Options
6137
6138                        | Options Id | Description | Type | Default Value |
6139                        |-----|-----|-----|-----|
6140                        | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
6141                        | moby | Install OSS Moby build instead of Docker CE | boolean | true |
6142                        | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
6143                        | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
6144                        | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
6145                        | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
6146                        | installDockerBuildx | Install Docker Buildx | boolean | true |
6147                        | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
6148                        | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
6149
6150                        ## Customizations
6151
6152                        ### VS Code Extensions
6153
6154                        - `ms-azuretools.vscode-containers`
6155
6156                        ## Limitations
6157
6158                        This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
6159                        * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
6160                        * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
6161                          ```
6162                          FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
6163                          ```
6164                          See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
6165
6166
6167                        ## OS Support
6168
6169                        This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
6170
6171                        `bash` is required to execute the `install.sh` script.
6172
6173
6174                        ---
6175
6176                        _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json).  Add additional notes to a `NOTES.md`._"#),
6177                    ("./devcontainer-feature.json", r#"
6178                        {
6179                          "id": "docker-in-docker",
6180                          "version": "2.16.1",
6181                          "name": "Docker (Docker-in-Docker)",
6182                          "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
6183                          "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
6184                          "options": {
6185                            "version": {
6186                              "type": "string",
6187                              "proposals": [
6188                                "latest",
6189                                "none",
6190                                "20.10"
6191                              ],
6192                              "default": "latest",
6193                              "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
6194                            },
6195                            "moby": {
6196                              "type": "boolean",
6197                              "default": true,
6198                              "description": "Install OSS Moby build instead of Docker CE"
6199                            },
6200                            "mobyBuildxVersion": {
6201                              "type": "string",
6202                              "default": "latest",
6203                              "description": "Install a specific version of moby-buildx when using Moby"
6204                            },
6205                            "dockerDashComposeVersion": {
6206                              "type": "string",
6207                              "enum": [
6208                                "none",
6209                                "v1",
6210                                "v2"
6211                              ],
6212                              "default": "v2",
6213                              "description": "Default version of Docker Compose (v1, v2 or none)"
6214                            },
6215                            "azureDnsAutoDetection": {
6216                              "type": "boolean",
6217                              "default": true,
6218                              "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
6219                            },
6220                            "dockerDefaultAddressPool": {
6221                              "type": "string",
6222                              "default": "",
6223                              "proposals": [],
6224                              "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
6225                            },
6226                            "installDockerBuildx": {
6227                              "type": "boolean",
6228                              "default": true,
6229                              "description": "Install Docker Buildx"
6230                            },
6231                            "installDockerComposeSwitch": {
6232                              "type": "boolean",
6233                              "default": false,
6234                              "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
6235                            },
6236                            "disableIp6tables": {
6237                              "type": "boolean",
6238                              "default": false,
6239                              "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
6240                            }
6241                          },
6242                          "entrypoint": "/usr/local/share/docker-init.sh",
6243                          "privileged": true,
6244                          "containerEnv": {
6245                            "DOCKER_BUILDKIT": "1"
6246                          },
6247                          "customizations": {
6248                            "vscode": {
6249                              "extensions": [
6250                                "ms-azuretools.vscode-containers"
6251                              ],
6252                              "settings": {
6253                                "github.copilot.chat.codeGeneration.instructions": [
6254                                  {
6255                                    "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
6256                                  }
6257                                ]
6258                              }
6259                            }
6260                          },
6261                          "mounts": [
6262                            {
6263                              "source": "dind-var-lib-docker-${devcontainerId}",
6264                              "target": "/var/lib/docker",
6265                              "type": "volume"
6266                            }
6267                          ],
6268                          "installsAfter": [
6269                            "ghcr.io/devcontainers/features/common-utils"
6270                          ]
6271                        }"#),
6272                    ("./install.sh", r#"
6273                    #!/usr/bin/env bash
6274                    #-------------------------------------------------------------------------------------------------------------
6275                    # Copyright (c) Microsoft Corporation. All rights reserved.
6276                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6277                    #-------------------------------------------------------------------------------------------------------------
6278                    #
6279                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
6280                    # Maintainer: The Dev Container spec maintainers
6281
6282
6283                    DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
6284                    USE_MOBY="${MOBY:-"true"}"
6285                    MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
6286                    DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
6287                    AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
6288                    DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
6289                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6290                    INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
6291                    INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
6292                    MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
6293                    MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
6294                    DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
6295                    DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
6296                    DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
6297
6298                    # Default: Exit on any failure.
6299                    set -e
6300
6301                    # Clean up
6302                    rm -rf /var/lib/apt/lists/*
6303
6304                    # Setup STDERR.
6305                    err() {
6306                        echo "(!) $*" >&2
6307                    }
6308
6309                    if [ "$(id -u)" -ne 0 ]; then
6310                        err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6311                        exit 1
6312                    fi
6313
6314                    ###################
6315                    # Helper Functions
6316                    # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
6317                    ###################
6318
6319                    # Determine the appropriate non-root user
6320                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6321                        USERNAME=""
6322                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6323                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6324                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6325                                USERNAME=${CURRENT_USER}
6326                                break
6327                            fi
6328                        done
6329                        if [ "${USERNAME}" = "" ]; then
6330                            USERNAME=root
6331                        fi
6332                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6333                        USERNAME=root
6334                    fi
6335
6336                    # Package manager update function
6337                    pkg_mgr_update() {
6338                        case ${ADJUSTED_ID} in
6339                            debian)
6340                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6341                                    echo "Running apt-get update..."
6342                                    apt-get update -y
6343                                fi
6344                                ;;
6345                            rhel)
6346                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6347                                    cache_check_dir="/var/cache/yum"
6348                                else
6349                                    cache_check_dir="/var/cache/${PKG_MGR_CMD}"
6350                                fi
6351                                if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
6352                                    echo "Running ${PKG_MGR_CMD} makecache ..."
6353                                    ${PKG_MGR_CMD} makecache
6354                                fi
6355                                ;;
6356                        esac
6357                    }
6358
6359                    # Checks if packages are installed and installs them if not
6360                    check_packages() {
6361                        case ${ADJUSTED_ID} in
6362                            debian)
6363                                if ! dpkg -s "$@" > /dev/null 2>&1; then
6364                                    pkg_mgr_update
6365                                    apt-get -y install --no-install-recommends "$@"
6366                                fi
6367                                ;;
6368                            rhel)
6369                                if ! rpm -q "$@" > /dev/null 2>&1; then
6370                                    pkg_mgr_update
6371                                    ${PKG_MGR_CMD} -y install "$@"
6372                                fi
6373                                ;;
6374                        esac
6375                    }
6376
6377                    # Figure out correct version of a three part version number is not passed
6378                    find_version_from_git_tags() {
6379                        local variable_name=$1
6380                        local requested_version=${!variable_name}
6381                        if [ "${requested_version}" = "none" ]; then return; fi
6382                        local repository=$2
6383                        local prefix=${3:-"tags/v"}
6384                        local separator=${4:-"."}
6385                        local last_part_optional=${5:-"false"}
6386                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6387                            local escaped_separator=${separator//./\\.}
6388                            local last_part
6389                            if [ "${last_part_optional}" = "true" ]; then
6390                                last_part="(${escaped_separator}[0-9]+)?"
6391                            else
6392                                last_part="${escaped_separator}[0-9]+"
6393                            fi
6394                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6395                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6396                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6397                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6398                            else
6399                                set +e
6400                                    declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6401                                set -e
6402                            fi
6403                        fi
6404                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6405                            err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6406                            exit 1
6407                        fi
6408                        echo "${variable_name}=${!variable_name}"
6409                    }
6410
6411                    # Use semver logic to decrement a version number then look for the closest match
6412                    find_prev_version_from_git_tags() {
6413                        local variable_name=$1
6414                        local current_version=${!variable_name}
6415                        local repository=$2
6416                        # Normally a "v" is used before the version number, but support alternate cases
6417                        local prefix=${3:-"tags/v"}
6418                        # Some repositories use "_" instead of "." for version number part separation, support that
6419                        local separator=${4:-"."}
6420                        # Some tools release versions that omit the last digit (e.g. go)
6421                        local last_part_optional=${5:-"false"}
6422                        # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
6423                        local version_suffix_regex=$6
6424                        # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6425                        set +e
6426                            major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
6427                            minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6428                            breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6429
6430                            if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
6431                                ((major=major-1))
6432                                declare -g ${variable_name}="${major}"
6433                                # Look for latest version from previous major release
6434                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
6435                            # Handle situations like Go's odd version pattern where "0" releases omit the last part
6436                            elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6437                                ((minor=minor-1))
6438                                declare -g ${variable_name}="${major}.${minor}"
6439                                # Look for latest version from previous minor release
6440                                find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
6441                            else
6442                                ((breakfix=breakfix-1))
6443                                if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
6444                                    declare -g ${variable_name}="${major}.${minor}"
6445                                else
6446                                    declare -g ${variable_name}="${major}.${minor}.${breakfix}"
6447                                fi
6448                            fi
6449                        set -e
6450                    }
6451
6452                    # Function to fetch the version released prior to the latest version
6453                    get_previous_version() {
6454                        local url=$1
6455                        local repo_url=$2
6456                        local variable_name=$3
6457                        prev_version=${!variable_name}
6458
6459                        output=$(curl -s "$repo_url");
6460                        if echo "$output" | jq -e 'type == "object"' > /dev/null; then
6461                          message=$(echo "$output" | jq -r '.message')
6462
6463                          if [[ $message == "API rate limit exceeded"* ]]; then
6464                                echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
6465                                echo -e "\nAttempting to find latest version using GitHub tags."
6466                                find_prev_version_from_git_tags prev_version "$url" "tags/v"
6467                                declare -g ${variable_name}="${prev_version}"
6468                           fi
6469                        elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
6470                            echo -e "\nAttempting to find latest version using GitHub Api."
6471                            version=$(echo "$output" | jq -r '.[1].tag_name')
6472                            declare -g ${variable_name}="${version#v}"
6473                        fi
6474                        echo "${variable_name}=${!variable_name}"
6475                    }
6476
6477                    get_github_api_repo_url() {
6478                        local url=$1
6479                        echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
6480                    }
6481
6482                    ###########################################
6483                    # Start docker-in-docker installation
6484                    ###########################################
6485
6486                    # Ensure apt is in non-interactive to avoid prompts
6487                    export DEBIAN_FRONTEND=noninteractive
6488
6489                    # Source /etc/os-release to get OS info
6490                    . /etc/os-release
6491
6492                    # Determine adjusted ID and package manager
6493                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6494                        ADJUSTED_ID="debian"
6495                        PKG_MGR_CMD="apt-get"
6496                        # Use dpkg for Debian-based systems
6497                        architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
6498                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
6499                        ADJUSTED_ID="rhel"
6500                        # Determine the appropriate package manager for RHEL-based systems
6501                        for pkg_mgr in tdnf dnf microdnf yum; do
6502                            if command -v "$pkg_mgr" >/dev/null 2>&1; then
6503                                PKG_MGR_CMD="$pkg_mgr"
6504                                break
6505                            fi
6506                        done
6507
6508                        if [ -z "${PKG_MGR_CMD}" ]; then
6509                            err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
6510                            exit 1
6511                        fi
6512
6513                        architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
6514                    else
6515                        err "Linux distro ${ID} not supported."
6516                        exit 1
6517                    fi
6518
6519                    # Azure Linux specific setup
6520                    if [ "${ID}" = "azurelinux" ]; then
6521                        VERSION_CODENAME="azurelinux${VERSION_ID}"
6522                    fi
6523
6524                    # Prevent attempting to install Moby on Debian trixie (packages removed)
6525                    if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
6526                        err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
6527                        err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
6528                        exit 1
6529                    fi
6530
6531                    # Check if distro is supported
6532                    if [ "${USE_MOBY}" = "true" ]; then
6533                        if [ "${ADJUSTED_ID}" = "debian" ]; then
6534                            if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
6535                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
6536                                err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
6537                                exit 1
6538                            fi
6539                            echo "(*) ${VERSION_CODENAME} is supported for Moby installation  - setting up Microsoft repository"
6540                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
6541                            if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6542                                echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
6543                            else
6544                                echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
6545                            fi
6546                        fi
6547                    else
6548                        if [ "${ADJUSTED_ID}" = "debian" ]; then
6549                            if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
6550                                err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
6551                                err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
6552                                exit 1
6553                            fi
6554                            echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
6555                        elif [ "${ADJUSTED_ID}" = "rhel" ]; then
6556
6557                            echo "RHEL-based system (${ID}) detected - using Docker CE packages"
6558                        fi
6559                    fi
6560
6561                    # Install base dependencies
6562                    base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
6563                    case ${ADJUSTED_ID} in
6564                        debian)
6565                            check_packages apt-transport-https $base_packages dirmngr
6566                            ;;
6567                        rhel)
6568                            check_packages $base_packages tar gawk shadow-utils policycoreutils  procps-ng systemd-libs systemd-devel
6569
6570                            ;;
6571                    esac
6572
6573                    # Install git if not already present
6574                    if ! command -v git >/dev/null 2>&1; then
6575                        check_packages git
6576                    fi
6577
6578                    # Update CA certificates to ensure HTTPS connections work properly
6579                    # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
6580                    # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
6581                    if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
6582                        update-ca-certificates
6583                    fi
6584
6585                    # Swap to legacy iptables for compatibility (Debian only)
6586                    if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
6587                        update-alternatives --set iptables /usr/sbin/iptables-legacy
6588                        update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
6589                    fi
6590
6591                    # Set up the necessary repositories
6592                    if [ "${USE_MOBY}" = "true" ]; then
6593                        # Name of open source engine/cli
6594                        engine_package_name="moby-engine"
6595                        cli_package_name="moby-cli"
6596
6597                        case ${ADJUSTED_ID} in
6598                            debian)
6599                                # Import key safely and import Microsoft apt repo
6600                                {
6601                                    curl -sSL ${MICROSOFT_GPG_KEYS_URI}
6602                                    curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
6603                                } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
6604                                echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
6605                                ;;
6606                            rhel)
6607                                echo "(*) ${ID} detected - checking for Moby packages..."
6608
6609                                # Check if moby packages are available in default repos
6610                                if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
6611                                    echo "(*) Using built-in ${ID} Moby packages"
6612                                else
6613                                    case "${ID}" in
6614                                        azurelinux)
6615                                            echo "(*) Moby packages not found in Azure Linux repositories"
6616                                            echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
6617                                            err "Moby packages are not available for Azure Linux ${VERSION_ID}."
6618                                            err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
6619                                            exit 1
6620                                            ;;
6621                                        mariner)
6622                                            echo "(*) Adding Microsoft repository for CBL-Mariner..."
6623                                            # Add Microsoft repository if packages aren't available locally
6624                                            curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
6625                                            cat > /etc/yum.repos.d/microsoft.repo << EOF
6626                    [microsoft]
6627                    name=Microsoft Repository
6628                    baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
6629                    enabled=1
6630                    gpgcheck=1
6631                    gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
6632                    EOF
6633                                    # Verify packages are available after adding repo
6634                                    pkg_mgr_update
6635                                    if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
6636                                        echo "(*) Moby packages not found in Microsoft repository either"
6637                                        err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
6638                                        err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
6639                                        exit 1
6640                                    fi
6641                                    ;;
6642                                *)
6643                                    err "Moby packages are not available for ${ID}. Please use 'moby': false option."
6644                                    exit 1
6645                                    ;;
6646                                esac
6647                            fi
6648                            ;;
6649                        esac
6650                    else
6651                        # Name of licensed engine/cli
6652                        engine_package_name="docker-ce"
6653                        cli_package_name="docker-ce-cli"
6654                        case ${ADJUSTED_ID} in
6655                            debian)
6656                                curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
6657                                echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
6658                                ;;
6659                            rhel)
6660                                # Docker CE repository setup for RHEL-based systems
6661                                setup_docker_ce_repo() {
6662                                    curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
6663                                    cat > /etc/yum.repos.d/docker-ce.repo << EOF
6664                    [docker-ce-stable]
6665                    name=Docker CE Stable
6666                    baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
6667                    enabled=1
6668                    gpgcheck=1
6669                    gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
6670                    skip_if_unavailable=1
6671                    module_hotfixes=1
6672                    EOF
6673                                }
6674                                install_azure_linux_deps() {
6675                                    echo "(*) Installing device-mapper libraries for Docker CE..."
6676                                    [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
6677                                    echo "(*) Installing additional Docker CE dependencies..."
6678                                    ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
6679                                        echo "(*) Some optional dependencies could not be installed, continuing..."
6680                                    }
6681                                }
6682                                setup_selinux_context() {
6683                                    if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
6684                                        echo "(*) Creating minimal SELinux context for Docker compatibility..."
6685                                        mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
6686                                        echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
6687                                    fi
6688                                }
6689
6690                                # Special handling for RHEL Docker CE installation
6691                                case "${ID}" in
6692                                    azurelinux|mariner)
6693                                        echo "(*) ${ID} detected"
6694                                        echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
6695                                        echo "(*) Setting up Docker CE repository..."
6696
6697                                        setup_docker_ce_repo
6698                                        install_azure_linux_deps
6699
6700                                        if [ "${USE_MOBY}" != "true" ]; then
6701                                            echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
6702                                            echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
6703                                            setup_selinux_context
6704                                        else
6705                                            echo "(*) Using Moby - container-selinux not required"
6706                                        fi
6707                                        ;;
6708                                    *)
6709                                        # Standard RHEL/CentOS/Fedora approach
6710                                        if command -v dnf >/dev/null 2>&1; then
6711                                            dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6712                                        elif command -v yum-config-manager >/dev/null 2>&1; then
6713                                            yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6714                                        else
6715                                            # Manual fallback
6716                                            setup_docker_ce_repo
6717                                fi
6718                                ;;
6719                            esac
6720                            ;;
6721                        esac
6722                    fi
6723
6724                    # Refresh package database
6725                    case ${ADJUSTED_ID} in
6726                        debian)
6727                            apt-get update
6728                            ;;
6729                        rhel)
6730                            pkg_mgr_update
6731                            ;;
6732                    esac
6733
6734                    # Soft version matching
6735                    if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
6736                        # Empty, meaning grab whatever "latest" is in apt repo
6737                        engine_version_suffix=""
6738                        cli_version_suffix=""
6739                    else
6740                        case ${ADJUSTED_ID} in
6741                            debian)
6742                        # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
6743                        docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
6744                        docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
6745                        # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
6746                        docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6747                        set +e # Don't exit if finding version fails - will handle gracefully
6748                            cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6749                            engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6750                        set -e
6751                        if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
6752                            err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6753                            apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6754                            exit 1
6755                        fi
6756                        ;;
6757                    rhel)
6758                         # For RHEL-based systems, use dnf/yum to find versions
6759                                docker_version_escaped="${DOCKER_VERSION//./\\.}"
6760                                set +e # Don't exit if finding version fails - will handle gracefully
6761                                    if [ "${USE_MOBY}" = "true" ]; then
6762                                        available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6763                                    else
6764                                        available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6765                                    fi
6766                                set -e
6767                                if [ -n "${available_versions}" ]; then
6768                                    engine_version_suffix="-${available_versions}"
6769                                    cli_version_suffix="-${available_versions}"
6770                                else
6771                                    echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
6772                                    engine_version_suffix=""
6773                                    cli_version_suffix=""
6774                                fi
6775                                ;;
6776                        esac
6777                    fi
6778
6779                    # Version matching for moby-buildx
6780                    if [ "${USE_MOBY}" = "true" ]; then
6781                        if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
6782                            # Empty, meaning grab whatever "latest" is in apt repo
6783                            buildx_version_suffix=""
6784                        else
6785                            case ${ADJUSTED_ID} in
6786                                debian)
6787                            buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6788                            buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
6789                            buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6790                            set +e
6791                                buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
6792                            set -e
6793                            if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
6794                                err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6795                                apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6796                                exit 1
6797                            fi
6798                            ;;
6799                                rhel)
6800                                    # For RHEL-based systems, try to find buildx version or use latest
6801                                    buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6802                                    set +e
6803                                    available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
6804                                    set -e
6805                                    if [ -n "${available_buildx}" ]; then
6806                                        buildx_version_suffix="-${available_buildx}"
6807                                    else
6808                                        echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
6809                                        buildx_version_suffix=""
6810                                    fi
6811                                    ;;
6812                            esac
6813                            echo "buildx_version_suffix ${buildx_version_suffix}"
6814                        fi
6815                    fi
6816
6817                    # Install Docker / Moby CLI if not already installed
6818                    if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
6819                        echo "Docker / Moby CLI and Engine already installed."
6820                    else
6821                            case ${ADJUSTED_ID} in
6822                            debian)
6823                                if [ "${USE_MOBY}" = "true" ]; then
6824                                    # Install engine
6825                                    set +e # Handle error gracefully
6826                                        apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6827                                        exit_code=$?
6828                                    set -e
6829
6830                                    if [ ${exit_code} -ne 0 ]; then
6831                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6832                                        exit 1
6833                                    fi
6834
6835                                    # Install compose
6836                                    apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6837                                else
6838                                    apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6839                                    # Install compose
6840                                    apt-mark hold docker-ce docker-ce-cli
6841                                    apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6842                                fi
6843                                ;;
6844                            rhel)
6845                                if [ "${USE_MOBY}" = "true" ]; then
6846                                    set +e # Handle error gracefully
6847                                        ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6848                                        exit_code=$?
6849                                    set -e
6850
6851                                    if [ ${exit_code} -ne 0 ]; then
6852                                        err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6853                                        exit 1
6854                                    fi
6855
6856                                    # Install compose
6857                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6858                                        ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6859                                    fi
6860                                else
6861                                                   # Special handling for Azure Linux Docker CE installation
6862                                    if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6863                                        echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6864
6865                                        # Use rpm with --force and --nodeps for Azure Linux
6866                                        set +e  # Don't exit on error for this section
6867                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6868                                        install_result=$?
6869                                        set -e
6870
6871                                        if [ $install_result -ne 0 ]; then
6872                                            echo "(*) Standard installation failed, trying manual installation..."
6873
6874                                            echo "(*) Standard installation failed, trying manual installation..."
6875
6876                                            # Create directory for downloading packages
6877                                            mkdir -p /tmp/docker-ce-install
6878
6879                                            # Download packages manually using curl since tdnf doesn't support download
6880                                            echo "(*) Downloading Docker CE packages manually..."
6881
6882                                            # Get the repository baseurl
6883                                            repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6884
6885                                            # Download packages directly
6886                                            cd /tmp/docker-ce-install
6887
6888                                            # Get package names with versions
6889                                            if [ -n "${cli_version_suffix}" ]; then
6890                                                docker_ce_version="${cli_version_suffix#-}"
6891                                                docker_cli_version="${engine_version_suffix#-}"
6892                                            else
6893                                                # Get latest version from repository
6894                                                docker_ce_version="latest"
6895                                            fi
6896
6897                                            echo "(*) Attempting to download Docker CE packages from repository..."
6898
6899                                            # Try to download latest packages if specific version fails
6900                                            if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6901                                                # Fallback: try to get latest available version
6902                                                echo "(*) Specific version not found, trying latest..."
6903                                                latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6904                                                latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6905                                                latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6906
6907                                                if [ -n "${latest_docker}" ]; then
6908                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6909                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6910                                                    curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6911                                                else
6912                                                    echo "(*) ERROR: Could not find Docker CE packages in repository"
6913                                                    echo "(*) Please check repository configuration or use 'moby': true"
6914                                                    exit 1
6915                                                fi
6916                                            fi
6917                                            # Install systemd libraries required by Docker CE
6918                                            echo "(*) Installing systemd libraries required by Docker CE..."
6919                                            ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6920                                                echo "(*) WARNING: Could not install systemd libraries"
6921                                                echo "(*) Docker may fail to start without these"
6922                                            }
6923
6924                                            # Install with rpm --force --nodeps
6925                                            echo "(*) Installing Docker CE packages with dependency override..."
6926                                            rpm -Uvh --force --nodeps *.rpm
6927
6928                                            # Cleanup
6929                                            cd /
6930                                            rm -rf /tmp/docker-ce-install
6931
6932                                            echo "(*) Docker CE installation completed with dependency bypass"
6933                                            echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6934                                        fi
6935                                    else
6936                                        # Standard installation for other RHEL-based systems
6937                                        ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6938                                    fi
6939                                    # Install compose
6940                                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6941                                        ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6942                                    fi
6943                                fi
6944                                ;;
6945                        esac
6946                    fi
6947
6948                    echo "Finished installing docker / moby!"
6949
6950                    docker_home="/usr/libexec/docker"
6951                    cli_plugins_dir="${docker_home}/cli-plugins"
6952
6953                    # fallback for docker-compose
6954                    fallback_compose(){
6955                        local url=$1
6956                        local repo_url=$(get_github_api_repo_url "$url")
6957                        echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6958                        get_previous_version "${url}" "${repo_url}" compose_version
6959                        echo -e "\nAttempting to install v${compose_version}"
6960                        curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6961                    }
6962
6963                    # If 'docker-compose' command is to be included
6964                    if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6965                        case "${architecture}" in
6966                        amd64|x86_64) target_compose_arch=x86_64 ;;
6967                        arm64|aarch64) target_compose_arch=aarch64 ;;
6968                        *)
6969                            echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6970                            exit 1
6971                        esac
6972
6973                        docker_compose_path="/usr/local/bin/docker-compose"
6974                        if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6975                            err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6976                            INSTALL_DOCKER_COMPOSE_SWITCH="false"
6977
6978                            if [ "${target_compose_arch}" = "x86_64" ]; then
6979                                echo "(*) Installing docker compose v1..."
6980                                curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6981                                chmod +x ${docker_compose_path}
6982
6983                                # Download the SHA256 checksum
6984                                DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6985                                echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
6986                                sha256sum -c docker-compose.sha256sum --ignore-missing
6987                            elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6988                                err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6989                                exit 1
6990                            else
6991                                # Use pip to get a version that runs on this architecture
6992                                check_packages python3-minimal python3-pip libffi-dev python3-venv
6993                                echo "(*) Installing docker compose v1 via pip..."
6994                                export PYTHONUSERBASE=/usr/local
6995                                pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6996                            fi
6997                        else
6998                            compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6999                            docker_compose_url="https://github.com/docker/compose"
7000                            find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
7001                            echo "(*) Installing docker-compose ${compose_version}..."
7002                            curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
7003                                     echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
7004                                     fallback_compose "$docker_compose_url"
7005                            }
7006
7007                            chmod +x ${docker_compose_path}
7008
7009                            # Download the SHA256 checksum
7010                            DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
7011                            echo "${DOCKER_COMPOSE_SHA256}  ${docker_compose_path}" > docker-compose.sha256sum
7012                            sha256sum -c docker-compose.sha256sum --ignore-missing
7013
7014                            mkdir -p ${cli_plugins_dir}
7015                            cp ${docker_compose_path} ${cli_plugins_dir}
7016                        fi
7017                    fi
7018
7019                    # fallback method for compose-switch
7020                    fallback_compose-switch() {
7021                        local url=$1
7022                        local repo_url=$(get_github_api_repo_url "$url")
7023                        echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
7024                        get_previous_version "$url" "$repo_url" compose_switch_version
7025                        echo -e "\nAttempting to install v${compose_switch_version}"
7026                        curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
7027                    }
7028                    # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
7029                    if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
7030                        if type docker-compose > /dev/null 2>&1; then
7031                            echo "(*) Installing compose-switch..."
7032                            current_compose_path="$(command -v docker-compose)"
7033                            target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
7034                            compose_switch_version="latest"
7035                            compose_switch_url="https://github.com/docker/compose-switch"
7036                            # Try to get latest version, fallback to known stable version if GitHub API fails
7037                            set +e
7038                            find_version_from_git_tags compose_switch_version "$compose_switch_url"
7039                            if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
7040                                echo "(*) GitHub API rate limited or failed, using fallback method"
7041                                fallback_compose-switch "$compose_switch_url"
7042                            fi
7043                            set -e
7044
7045                            # Map architecture for compose-switch downloads
7046                            case "${architecture}" in
7047                                amd64|x86_64) target_switch_arch=amd64 ;;
7048                                arm64|aarch64) target_switch_arch=arm64 ;;
7049                                *) target_switch_arch=${architecture} ;;
7050                            esac
7051                            curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
7052                            chmod +x /usr/local/bin/compose-switch
7053                            # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
7054                            # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
7055                            mv "${current_compose_path}" "${target_compose_path}"
7056                            update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
7057                            update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
7058                        else
7059                            err "Skipping installation of compose-switch as docker compose is unavailable..."
7060                        fi
7061                    fi
7062
7063                    # If init file already exists, exit
7064                    if [ -f "/usr/local/share/docker-init.sh" ]; then
7065                        echo "/usr/local/share/docker-init.sh already exists, so exiting."
7066                        # Clean up
7067                        rm -rf /var/lib/apt/lists/*
7068                        exit 0
7069                    fi
7070                    echo "docker-init doesn't exist, adding..."
7071
7072                    if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
7073                            groupadd -r docker
7074                    fi
7075
7076                    usermod -aG docker ${USERNAME}
7077
7078                    # fallback for docker/buildx
7079                    fallback_buildx() {
7080                        local url=$1
7081                        local repo_url=$(get_github_api_repo_url "$url")
7082                        echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
7083                        get_previous_version "$url" "$repo_url" buildx_version
7084                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
7085                        echo -e "\nAttempting to install v${buildx_version}"
7086                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
7087                    }
7088
7089                    if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
7090                        buildx_version="latest"
7091                        docker_buildx_url="https://github.com/docker/buildx"
7092                        find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
7093                        echo "(*) Installing buildx ${buildx_version}..."
7094
7095                          # Map architecture for buildx downloads
7096                        case "${architecture}" in
7097                            amd64|x86_64) target_buildx_arch=amd64 ;;
7098                            arm64|aarch64) target_buildx_arch=arm64 ;;
7099                            *) target_buildx_arch=${architecture} ;;
7100                        esac
7101
7102                        buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
7103
7104                        cd /tmp
7105                        wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
7106
7107                        docker_home="/usr/libexec/docker"
7108                        cli_plugins_dir="${docker_home}/cli-plugins"
7109
7110                        mkdir -p ${cli_plugins_dir}
7111                        mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
7112                        chmod +x ${cli_plugins_dir}/docker-buildx
7113
7114                        chown -R "${USERNAME}:docker" "${docker_home}"
7115                        chmod -R g+r+w "${docker_home}"
7116                        find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
7117                    fi
7118
7119                    DOCKER_DEFAULT_IP6_TABLES=""
7120                    if [ "$DISABLE_IP6_TABLES" == true ]; then
7121                        requested_version=""
7122                        # checking whether the version requested either is in semver format or just a number denoting the major version
7123                        # and, extracting the major version number out of the two scenarios
7124                        semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
7125                        if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
7126                            requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
7127                        elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
7128                            requested_version=$DOCKER_VERSION
7129                        fi
7130                        if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
7131                            DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
7132                            echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
7133                        fi
7134                    fi
7135
7136                    if [ ! -d /usr/local/share ]; then
7137                        mkdir -p /usr/local/share
7138                    fi
7139
7140                    tee /usr/local/share/docker-init.sh > /dev/null \
7141                    << EOF
7142                    #!/bin/sh
7143                    #-------------------------------------------------------------------------------------------------------------
7144                    # Copyright (c) Microsoft Corporation. All rights reserved.
7145                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7146                    #-------------------------------------------------------------------------------------------------------------
7147
7148                    set -e
7149
7150                    AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
7151                    DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
7152                    DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
7153                    EOF
7154
7155                    tee -a /usr/local/share/docker-init.sh > /dev/null \
7156                    << 'EOF'
7157                    dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
7158                        # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
7159                        find /run /var/run -iname 'docker*.pid' -delete || :
7160                        find /run /var/run -iname 'container*.pid' -delete || :
7161
7162                        # -- Start: dind wrapper script --
7163                        # Maintained: https://github.com/moby/moby/blob/master/hack/dind
7164
7165                        export container=docker
7166
7167                        if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
7168                            mount -t securityfs none /sys/kernel/security || {
7169                                echo >&2 'Could not mount /sys/kernel/security.'
7170                                echo >&2 'AppArmor detection and --privileged mode might break.'
7171                            }
7172                        fi
7173
7174                        # Mount /tmp (conditionally)
7175                        if ! mountpoint -q /tmp; then
7176                            mount -t tmpfs none /tmp
7177                        fi
7178
7179                        set_cgroup_nesting()
7180                        {
7181                            # cgroup v2: enable nesting
7182                            if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
7183                                # move the processes from the root group to the /init group,
7184                                # otherwise writing subtree_control fails with EBUSY.
7185                                # An error during moving non-existent process (i.e., "cat") is ignored.
7186                                mkdir -p /sys/fs/cgroup/init
7187                                xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
7188                                # enable controllers
7189                                sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
7190                                    > /sys/fs/cgroup/cgroup.subtree_control
7191                            fi
7192                        }
7193
7194                        # Set cgroup nesting, retrying if necessary
7195                        retry_cgroup_nesting=0
7196
7197                        until [ "${retry_cgroup_nesting}" -eq "5" ];
7198                        do
7199                            set +e
7200                                set_cgroup_nesting
7201
7202                                if [ $? -ne 0 ]; then
7203                                    echo "(*) cgroup v2: Failed to enable nesting, retrying..."
7204                                else
7205                                    break
7206                                fi
7207
7208                                retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
7209                            set -e
7210                        done
7211
7212                        # -- End: dind wrapper script --
7213
7214                        # Handle DNS
7215                        set +e
7216                            cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
7217                            if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
7218                            then
7219                                echo "Setting dockerd Azure DNS."
7220                                CUSTOMDNS="--dns 168.63.129.16"
7221                            else
7222                                echo "Not setting dockerd DNS manually."
7223                                CUSTOMDNS=""
7224                            fi
7225                        set -e
7226
7227                        if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
7228                        then
7229                            DEFAULT_ADDRESS_POOL=""
7230                        else
7231                            DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
7232                        fi
7233
7234                        # Start docker/moby engine
7235                        ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
7236                    INNEREOF
7237                    )"
7238
7239                    sudo_if() {
7240                        COMMAND="$*"
7241
7242                        if [ "$(id -u)" -ne 0 ]; then
7243                            sudo $COMMAND
7244                        else
7245                            $COMMAND
7246                        fi
7247                    }
7248
7249                    retry_docker_start_count=0
7250                    docker_ok="false"
7251
7252                    until [ "${docker_ok}" = "true"  ] || [ "${retry_docker_start_count}" -eq "5" ];
7253                    do
7254                        # Start using sudo if not invoked as root
7255                        if [ "$(id -u)" -ne 0 ]; then
7256                            sudo /bin/sh -c "${dockerd_start}"
7257                        else
7258                            eval "${dockerd_start}"
7259                        fi
7260
7261                        retry_count=0
7262                        until [ "${docker_ok}" = "true"  ] || [ "${retry_count}" -eq "5" ];
7263                        do
7264                            sleep 1s
7265                            set +e
7266                                docker info > /dev/null 2>&1 && docker_ok="true"
7267                            set -e
7268
7269                            retry_count=`expr $retry_count + 1`
7270                        done
7271
7272                        if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
7273                            echo "(*) Failed to start docker, retrying..."
7274                            set +e
7275                                sudo_if pkill dockerd
7276                                sudo_if pkill containerd
7277                            set -e
7278                        fi
7279
7280                        retry_docker_start_count=`expr $retry_docker_start_count + 1`
7281                    done
7282
7283                    # Execute whatever commands were passed in (if any). This allows us
7284                    # to set this script to ENTRYPOINT while still executing the default CMD.
7285                    exec "$@"
7286                    EOF
7287
7288                    chmod +x /usr/local/share/docker-init.sh
7289                    chown ${USERNAME}:root /usr/local/share/docker-init.sh
7290
7291                    # Clean up
7292                    rm -rf /var/lib/apt/lists/*
7293
7294                    echo 'docker-in-docker-debian script has completed!'"#),
7295                ]).await;
7296
7297                return Ok(http::Response::builder()
7298                    .status(200)
7299                    .body(AsyncBody::from(response))
7300                    .unwrap());
7301            }
7302            if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
7303                let response = r#"
7304                    {
7305                        "schemaVersion": 2,
7306                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
7307                        "config": {
7308                            "mediaType": "application/vnd.devcontainers",
7309                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
7310                            "size": 2
7311                        },
7312                        "layers": [
7313                            {
7314                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
7315                                "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
7316                                "size": 20992,
7317                                "annotations": {
7318                                    "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
7319                                }
7320                            }
7321                        ],
7322                        "annotations": {
7323                            "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
7324                            "com.github.package.type": "devcontainer_feature"
7325                        }
7326                    }
7327                    "#;
7328
7329                return Ok(http::Response::builder()
7330                    .status(200)
7331                    .body(http_client::AsyncBody::from(response))
7332                    .unwrap());
7333            }
7334            if parts.uri.path()
7335                == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
7336            {
7337                let response = build_tarball(vec![
7338                    ("./devcontainer-feature.json", r#"
7339                        {
7340                            "id": "go",
7341                            "version": "1.3.3",
7342                            "name": "Go",
7343                            "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
7344                            "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
7345                            "options": {
7346                                "version": {
7347                                    "type": "string",
7348                                    "proposals": [
7349                                        "latest",
7350                                        "none",
7351                                        "1.24",
7352                                        "1.23"
7353                                    ],
7354                                    "default": "latest",
7355                                    "description": "Select or enter a Go version to install"
7356                                },
7357                                "golangciLintVersion": {
7358                                    "type": "string",
7359                                    "default": "latest",
7360                                    "description": "Version of golangci-lint to install"
7361                                }
7362                            },
7363                            "init": true,
7364                            "customizations": {
7365                                "vscode": {
7366                                    "extensions": [
7367                                        "golang.Go"
7368                                    ],
7369                                    "settings": {
7370                                        "github.copilot.chat.codeGeneration.instructions": [
7371                                            {
7372                                                "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
7373                                            }
7374                                        ]
7375                                    }
7376                                }
7377                            },
7378                            "containerEnv": {
7379                                "GOROOT": "/usr/local/go",
7380                                "GOPATH": "/go",
7381                                "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
7382                            },
7383                            "capAdd": [
7384                                "SYS_PTRACE"
7385                            ],
7386                            "securityOpt": [
7387                                "seccomp=unconfined"
7388                            ],
7389                            "installsAfter": [
7390                                "ghcr.io/devcontainers/features/common-utils"
7391                            ]
7392                        }
7393                        "#),
7394                    ("./install.sh", r#"
7395                    #!/usr/bin/env bash
7396                    #-------------------------------------------------------------------------------------------------------------
7397                    # Copyright (c) Microsoft Corporation. All rights reserved.
7398                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
7399                    #-------------------------------------------------------------------------------------------------------------
7400                    #
7401                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
7402                    # Maintainer: The VS Code and Codespaces Teams
7403
7404                    TARGET_GO_VERSION="${VERSION:-"latest"}"
7405                    GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
7406
7407                    TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
7408                    TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
7409                    USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
7410                    INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
7411
7412                    # https://www.google.com/linuxrepositories/
7413                    GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
7414
7415                    set -e
7416
7417                    if [ "$(id -u)" -ne 0 ]; then
7418                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7419                        exit 1
7420                    fi
7421
7422                    # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
7423                    . /etc/os-release
7424                    # Get an adjusted ID independent of distro variants
7425                    MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
7426                    if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
7427                        ADJUSTED_ID="debian"
7428                    elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
7429                        ADJUSTED_ID="rhel"
7430                        if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
7431                            VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
7432                        else
7433                            VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
7434                        fi
7435                    else
7436                        echo "Linux distro ${ID} not supported."
7437                        exit 1
7438                    fi
7439
7440                    if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
7441                        # As of 1 July 2024, mirrorlist.centos.org no longer exists.
7442                        # Update the repo files to reference vault.centos.org.
7443                        sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
7444                        sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
7445                        sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
7446                    fi
7447
7448                    # Setup INSTALL_CMD & PKG_MGR_CMD
7449                    if type apt-get > /dev/null 2>&1; then
7450                        PKG_MGR_CMD=apt-get
7451                        INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
7452                    elif type microdnf > /dev/null 2>&1; then
7453                        PKG_MGR_CMD=microdnf
7454                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
7455                    elif type dnf > /dev/null 2>&1; then
7456                        PKG_MGR_CMD=dnf
7457                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
7458                    else
7459                        PKG_MGR_CMD=yum
7460                        INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
7461                    fi
7462
7463                    # Clean up
7464                    clean_up() {
7465                        case ${ADJUSTED_ID} in
7466                            debian)
7467                                rm -rf /var/lib/apt/lists/*
7468                                ;;
7469                            rhel)
7470                                rm -rf /var/cache/dnf/* /var/cache/yum/*
7471                                rm -rf /tmp/yum.log
7472                                rm -rf ${GPG_INSTALL_PATH}
7473                                ;;
7474                        esac
7475                    }
7476                    clean_up
7477
7478
7479                    # Figure out correct version of a three part version number is not passed
7480                    find_version_from_git_tags() {
7481                        local variable_name=$1
7482                        local requested_version=${!variable_name}
7483                        if [ "${requested_version}" = "none" ]; then return; fi
7484                        local repository=$2
7485                        local prefix=${3:-"tags/v"}
7486                        local separator=${4:-"."}
7487                        local last_part_optional=${5:-"false"}
7488                        if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
7489                            local escaped_separator=${separator//./\\.}
7490                            local last_part
7491                            if [ "${last_part_optional}" = "true" ]; then
7492                                last_part="(${escaped_separator}[0-9]+)?"
7493                            else
7494                                last_part="${escaped_separator}[0-9]+"
7495                            fi
7496                            local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
7497                            local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
7498                            if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
7499                                declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
7500                            else
7501                                set +e
7502                                declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
7503                                set -e
7504                            fi
7505                        fi
7506                        if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
7507                            echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
7508                            exit 1
7509                        fi
7510                        echo "${variable_name}=${!variable_name}"
7511                    }
7512
7513                    pkg_mgr_update() {
7514                        case $ADJUSTED_ID in
7515                            debian)
7516                                if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7517                                    echo "Running apt-get update..."
7518                                    ${PKG_MGR_CMD} update -y
7519                                fi
7520                                ;;
7521                            rhel)
7522                                if [ ${PKG_MGR_CMD} = "microdnf" ]; then
7523                                    if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
7524                                        echo "Running ${PKG_MGR_CMD} makecache ..."
7525                                        ${PKG_MGR_CMD} makecache
7526                                    fi
7527                                else
7528                                    if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
7529                                        echo "Running ${PKG_MGR_CMD} check-update ..."
7530                                        set +e
7531                                        ${PKG_MGR_CMD} check-update
7532                                        rc=$?
7533                                        if [ $rc != 0 ] && [ $rc != 100 ]; then
7534                                            exit 1
7535                                        fi
7536                                        set -e
7537                                    fi
7538                                fi
7539                                ;;
7540                        esac
7541                    }
7542
7543                    # Checks if packages are installed and installs them if not
7544                    check_packages() {
7545                        case ${ADJUSTED_ID} in
7546                            debian)
7547                                if ! dpkg -s "$@" > /dev/null 2>&1; then
7548                                    pkg_mgr_update
7549                                    ${INSTALL_CMD} "$@"
7550                                fi
7551                                ;;
7552                            rhel)
7553                                if ! rpm -q "$@" > /dev/null 2>&1; then
7554                                    pkg_mgr_update
7555                                    ${INSTALL_CMD} "$@"
7556                                fi
7557                                ;;
7558                        esac
7559                    }
7560
7561                    # Ensure that login shells get the correct path if the user updated the PATH using ENV.
7562                    rm -f /etc/profile.d/00-restore-env.sh
7563                    echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
7564                    chmod +x /etc/profile.d/00-restore-env.sh
7565
7566                    # Some distributions do not install awk by default (e.g. Mariner)
7567                    if ! type awk >/dev/null 2>&1; then
7568                        check_packages awk
7569                    fi
7570
7571                    # Determine the appropriate non-root user
7572                    if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
7573                        USERNAME=""
7574                        POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
7575                        for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
7576                            if id -u ${CURRENT_USER} > /dev/null 2>&1; then
7577                                USERNAME=${CURRENT_USER}
7578                                break
7579                            fi
7580                        done
7581                        if [ "${USERNAME}" = "" ]; then
7582                            USERNAME=root
7583                        fi
7584                    elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
7585                        USERNAME=root
7586                    fi
7587
7588                    export DEBIAN_FRONTEND=noninteractive
7589
7590                    check_packages ca-certificates gnupg2 tar gcc make pkg-config
7591
7592                    if [ $ADJUSTED_ID = "debian" ]; then
7593                        check_packages g++ libc6-dev
7594                    else
7595                        check_packages gcc-c++ glibc-devel
7596                    fi
7597                    # Install curl, git, other dependencies if missing
7598                    if ! type curl > /dev/null 2>&1; then
7599                        check_packages curl
7600                    fi
7601                    if ! type git > /dev/null 2>&1; then
7602                        check_packages git
7603                    fi
7604                    # Some systems, e.g. Mariner, still a few more packages
7605                    if ! type as > /dev/null 2>&1; then
7606                        check_packages binutils
7607                    fi
7608                    if ! [ -f /usr/include/linux/errno.h ]; then
7609                        check_packages kernel-headers
7610                    fi
7611                    # Minimal RHEL install may need findutils installed
7612                    if ! [ -f /usr/bin/find ]; then
7613                        check_packages findutils
7614                    fi
7615
7616                    # Get closest match for version number specified
7617                    find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
7618
7619                    architecture="$(uname -m)"
7620                    case $architecture in
7621                        x86_64) architecture="amd64";;
7622                        aarch64 | armv8*) architecture="arm64";;
7623                        aarch32 | armv7* | armvhf*) architecture="armv6l";;
7624                        i?86) architecture="386";;
7625                        *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
7626                    esac
7627
7628                    # Install Go
7629                    umask 0002
7630                    if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
7631                        groupadd -r golang
7632                    fi
7633                    usermod -a -G golang "${USERNAME}"
7634                    mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7635
7636                    if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
7637                        # Use a temporary location for gpg keys to avoid polluting image
7638                        export GNUPGHOME="/tmp/tmp-gnupg"
7639                        mkdir -p ${GNUPGHOME}
7640                        chmod 700 ${GNUPGHOME}
7641                        curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
7642                        gpg -q --import /tmp/tmp-gnupg/golang_key
7643                        echo "Downloading Go ${TARGET_GO_VERSION}..."
7644                        set +e
7645                        curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7646                        exit_code=$?
7647                        set -e
7648                        if [ "$exit_code" != "0" ]; then
7649                            echo "(!) Download failed."
7650                            # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
7651                            set +e
7652                            major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
7653                            minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
7654                            breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
7655                            # Handle Go's odd version pattern where "0" releases omit the last part
7656                            if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
7657                                ((minor=minor-1))
7658                                TARGET_GO_VERSION="${major}.${minor}"
7659                                # Look for latest version from previous minor release
7660                                find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
7661                            else
7662                                ((breakfix=breakfix-1))
7663                                if [ "${breakfix}" = "0" ]; then
7664                                    TARGET_GO_VERSION="${major}.${minor}"
7665                                else
7666                                    TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
7667                                fi
7668                            fi
7669                            set -e
7670                            echo "Trying ${TARGET_GO_VERSION}..."
7671                            curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7672                        fi
7673                        curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
7674                        gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
7675                        echo "Extracting Go ${TARGET_GO_VERSION}..."
7676                        tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
7677                        rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
7678                    else
7679                        echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
7680                    fi
7681
7682                    # Install Go tools that are isImportant && !replacedByGopls based on
7683                    # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
7684                    GO_TOOLS="\
7685                        golang.org/x/tools/gopls@latest \
7686                        honnef.co/go/tools/cmd/staticcheck@latest \
7687                        golang.org/x/lint/golint@latest \
7688                        github.com/mgechev/revive@latest \
7689                        github.com/go-delve/delve/cmd/dlv@latest \
7690                        github.com/fatih/gomodifytags@latest \
7691                        github.com/haya14busa/goplay/cmd/goplay@latest \
7692                        github.com/cweill/gotests/gotests@latest \
7693                        github.com/josharian/impl@latest"
7694
7695                    if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
7696                        echo "Installing common Go tools..."
7697                        export PATH=${TARGET_GOROOT}/bin:${PATH}
7698                        export GOPATH=/tmp/gotools
7699                        export GOCACHE="${GOPATH}/cache"
7700
7701                        mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
7702                        cd "${GOPATH}"
7703
7704                        # Use go get for versions of go under 1.16
7705                        go_install_command=install
7706                        if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
7707                            export GO111MODULE=on
7708                            go_install_command=get
7709                            echo "Go version < 1.16, using go get."
7710                        fi
7711
7712                        (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
7713
7714                        # Move Go tools into path
7715                        if [ -d "${GOPATH}/bin" ]; then
7716                            mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
7717                        fi
7718
7719                        # Install golangci-lint from precompiled binaries
7720                        if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
7721                            echo "Installing golangci-lint latest..."
7722                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7723                                sh -s -- -b "${TARGET_GOPATH}/bin"
7724                        else
7725                            echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
7726                            curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7727                                sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
7728                        fi
7729
7730                        # Remove Go tools temp directory
7731                        rm -rf "${GOPATH}"
7732                    fi
7733
7734
7735                    chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7736                    chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7737                    find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
7738                    find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
7739
7740                    # Clean up
7741                    clean_up
7742
7743                    echo "Done!"
7744                        "#),
7745                ])
7746                .await;
7747                return Ok(http::Response::builder()
7748                    .status(200)
7749                    .body(AsyncBody::from(response))
7750                    .unwrap());
7751            }
7752            if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
7753                let response = r#"
7754                    {
7755                        "schemaVersion": 2,
7756                        "mediaType": "application/vnd.oci.image.manifest.v1+json",
7757                        "config": {
7758                            "mediaType": "application/vnd.devcontainers",
7759                            "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
7760                            "size": 2
7761                        },
7762                        "layers": [
7763                            {
7764                                "mediaType": "application/vnd.devcontainers.layer.v1+tar",
7765                                "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
7766                                "size": 19968,
7767                                "annotations": {
7768                                    "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
7769                                }
7770                            }
7771                        ],
7772                        "annotations": {
7773                            "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
7774                            "com.github.package.type": "devcontainer_feature"
7775                        }
7776                    }"#;
7777                return Ok(http::Response::builder()
7778                    .status(200)
7779                    .body(AsyncBody::from(response))
7780                    .unwrap());
7781            }
7782            if parts.uri.path()
7783                == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
7784            {
7785                let response = build_tarball(vec![
7786                    (
7787                        "./devcontainer-feature.json",
7788                        r#"
7789{
7790    "id": "aws-cli",
7791    "version": "1.1.3",
7792    "name": "AWS CLI",
7793    "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
7794    "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
7795    "options": {
7796        "version": {
7797            "type": "string",
7798            "proposals": [
7799                "latest"
7800            ],
7801            "default": "latest",
7802            "description": "Select or enter an AWS CLI version."
7803        },
7804        "verbose": {
7805            "type": "boolean",
7806            "default": true,
7807            "description": "Suppress verbose output."
7808        }
7809    },
7810    "customizations": {
7811        "vscode": {
7812            "extensions": [
7813                "AmazonWebServices.aws-toolkit-vscode"
7814            ],
7815            "settings": {
7816                "github.copilot.chat.codeGeneration.instructions": [
7817                    {
7818                        "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
7819                    }
7820                ]
7821            }
7822        }
7823    },
7824    "installsAfter": [
7825        "ghcr.io/devcontainers/features/common-utils"
7826    ]
7827}
7828                    "#,
7829                    ),
7830                    (
7831                        "./install.sh",
7832                        r#"#!/usr/bin/env bash
7833                    #-------------------------------------------------------------------------------------------------------------
7834                    # Copyright (c) Microsoft Corporation. All rights reserved.
7835                    # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7836                    #-------------------------------------------------------------------------------------------------------------
7837                    #
7838                    # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7839                    # Maintainer: The VS Code and Codespaces Teams
7840
7841                    set -e
7842
7843                    # Clean up
7844                    rm -rf /var/lib/apt/lists/*
7845
7846                    VERSION=${VERSION:-"latest"}
7847                    VERBOSE=${VERBOSE:-"true"}
7848
7849                    AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7850                    AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7851
7852                    mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7853                    ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7854                    PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7855                    TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7856                    gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7857                    C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7858                    94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7859                    lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7860                    fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7861                    EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7862                    XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7863                    tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7864                    Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7865                    FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7866                    yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7867                    MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7868                    au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7869                    ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7870                    hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7871                    tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7872                    QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7873                    RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7874                    rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7875                    H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7876                    YLZATHZKTJyiqA==
7877                    =vYOk
7878                    -----END PGP PUBLIC KEY BLOCK-----"
7879
7880                    if [ "$(id -u)" -ne 0 ]; then
7881                        echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7882                        exit 1
7883                    fi
7884
7885                    apt_get_update()
7886                    {
7887                        if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7888                            echo "Running apt-get update..."
7889                            apt-get update -y
7890                        fi
7891                    }
7892
7893                    # Checks if packages are installed and installs them if not
7894                    check_packages() {
7895                        if ! dpkg -s "$@" > /dev/null 2>&1; then
7896                            apt_get_update
7897                            apt-get -y install --no-install-recommends "$@"
7898                        fi
7899                    }
7900
7901                    export DEBIAN_FRONTEND=noninteractive
7902
7903                    check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7904
7905                    verify_aws_cli_gpg_signature() {
7906                        local filePath=$1
7907                        local sigFilePath=$2
7908                        local awsGpgKeyring=aws-cli-public-key.gpg
7909
7910                        echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7911                        gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7912                        local status=$?
7913
7914                        rm "./${awsGpgKeyring}"
7915
7916                        return ${status}
7917                    }
7918
7919                    install() {
7920                        local scriptZipFile=awscli.zip
7921                        local scriptSigFile=awscli.sig
7922
7923                        # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7924                        if [ "${VERSION}" != "latest" ]; then
7925                            local versionStr=-${VERSION}
7926                        fi
7927                        architecture=$(dpkg --print-architecture)
7928                        case "${architecture}" in
7929                            amd64) architectureStr=x86_64 ;;
7930                            arm64) architectureStr=aarch64 ;;
7931                            *)
7932                                echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7933                                exit 1
7934                        esac
7935                        local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7936                        curl "${scriptUrl}" -o "${scriptZipFile}"
7937                        curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7938
7939                        verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7940                        if (( $? > 0 )); then
7941                            echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7942                            exit 1
7943                        fi
7944
7945                        if [ "${VERBOSE}" = "false" ]; then
7946                            unzip -q "${scriptZipFile}"
7947                        else
7948                            unzip "${scriptZipFile}"
7949                        fi
7950
7951                        ./aws/install
7952
7953                        # kubectl bash completion
7954                        mkdir -p /etc/bash_completion.d
7955                        cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7956
7957                        # kubectl zsh completion
7958                        if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7959                            mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7960                            cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7961                            chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7962                        fi
7963
7964                        rm -rf ./aws
7965                    }
7966
7967                    echo "(*) Installing AWS CLI..."
7968
7969                    install
7970
7971                    # Clean up
7972                    rm -rf /var/lib/apt/lists/*
7973
7974                    echo "Done!""#,
7975                    ),
7976                    ("./scripts/", r#""#),
7977                    (
7978                        "./scripts/fetch-latest-completer-scripts.sh",
7979                        r#"
7980                        #!/bin/bash
7981                        #-------------------------------------------------------------------------------------------------------------
7982                        # Copyright (c) Microsoft Corporation. All rights reserved.
7983                        # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7984                        #-------------------------------------------------------------------------------------------------------------
7985                        #
7986                        # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7987                        # Maintainer: The Dev Container spec maintainers
7988                        #
7989                        # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7990                        #
7991                        COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7992                        BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7993                        ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7994
7995                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7996                        chmod +x "$BASH_COMPLETER_SCRIPT"
7997
7998                        wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7999                        chmod +x "$ZSH_COMPLETER_SCRIPT"
8000                        "#,
8001                    ),
8002                    ("./scripts/vendor/", r#""#),
8003                    (
8004                        "./scripts/vendor/aws_bash_completer",
8005                        r#"
8006                        # Typically that would be added under one of the following paths:
8007                        # - /etc/bash_completion.d
8008                        # - /usr/local/etc/bash_completion.d
8009                        # - /usr/share/bash-completion/completions
8010
8011                        complete -C aws_completer aws
8012                        "#,
8013                    ),
8014                    (
8015                        "./scripts/vendor/aws_zsh_completer.sh",
8016                        r#"
8017                        # Source this file to activate auto completion for zsh using the bash
8018                        # compatibility helper.  Make sure to run `compinit` before, which should be
8019                        # given usually.
8020                        #
8021                        # % source /path/to/zsh_complete.sh
8022                        #
8023                        # Typically that would be called somewhere in your .zshrc.
8024                        #
8025                        # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
8026                        # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
8027                        #
8028                        # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
8029                        #
8030                        # zsh releases prior to that version do not export the required env variables!
8031
8032                        autoload -Uz bashcompinit
8033                        bashcompinit -i
8034
8035                        _bash_complete() {
8036                          local ret=1
8037                          local -a suf matches
8038                          local -x COMP_POINT COMP_CWORD
8039                          local -a COMP_WORDS COMPREPLY BASH_VERSINFO
8040                          local -x COMP_LINE="$words"
8041                          local -A savejobstates savejobtexts
8042
8043                          (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
8044                          (( COMP_CWORD = CURRENT - 1))
8045                          COMP_WORDS=( $words )
8046                          BASH_VERSINFO=( 2 05b 0 1 release )
8047
8048                          savejobstates=( ${(kv)jobstates} )
8049                          savejobtexts=( ${(kv)jobtexts} )
8050
8051                          [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
8052
8053                          matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
8054
8055                          if [[ -n $matches ]]; then
8056                            if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
8057                              compset -P '*/' && matches=( ${matches##*/} )
8058                              compset -S '/*' && matches=( ${matches%%/*} )
8059                              compadd -Q -f "${suf[@]}" -a matches && ret=0
8060                            else
8061                              compadd -Q "${suf[@]}" -a matches && ret=0
8062                            fi
8063                          fi
8064
8065                          if (( ret )); then
8066                            if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
8067                              _default "${suf[@]}" && ret=0
8068                            elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
8069                              _directories "${suf[@]}" && ret=0
8070                            fi
8071                          fi
8072
8073                          return ret
8074                        }
8075
8076                        complete -C aws_completer aws
8077                        "#,
8078                    ),
8079                ]).await;
8080
8081                return Ok(http::Response::builder()
8082                    .status(200)
8083                    .body(AsyncBody::from(response))
8084                    .unwrap());
8085            }
8086
8087            Ok(http::Response::builder()
8088                .status(404)
8089                .body(http_client::AsyncBody::default())
8090                .unwrap())
8091        })
8092    }
8093}