1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use fs::Fs;
10use http_client::HttpClient;
11use util::{ResultExt, command::Command};
12
13use crate::{
14 DevContainerConfig, DevContainerContext,
15 command_json::{CommandRunner, DefaultCommandRunner},
16 devcontainer_api::{DevContainerError, DevContainerUp},
17 devcontainer_json::{
18 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
19 deserialize_devcontainer_json,
20 },
21 docker::{
22 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
23 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
24 get_remote_dir_from_config,
25 },
26 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
27 get_oci_token,
28 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
29 safe_id_lower,
30};
31
32enum ConfigStatus {
33 Deserialized(DevContainer),
34 VariableParsed(DevContainer),
35}
36
37#[derive(Debug, Clone, Eq, PartialEq, Default)]
38pub(crate) struct DockerComposeResources {
39 files: Vec<PathBuf>,
40 config: DockerComposeConfig,
41}
42
43struct DevContainerManifest {
44 http_client: Arc<dyn HttpClient>,
45 fs: Arc<dyn Fs>,
46 docker_client: Arc<dyn DockerClient>,
47 command_runner: Arc<dyn CommandRunner>,
48 raw_config: String,
49 config: ConfigStatus,
50 local_environment: HashMap<String, String>,
51 local_project_directory: PathBuf,
52 config_directory: PathBuf,
53 file_name: String,
54 root_image: Option<DockerInspect>,
55 features_build_info: Option<FeaturesBuildInfo>,
56 features: Vec<FeatureManifest>,
57}
58const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
59impl DevContainerManifest {
60 async fn new(
61 context: &DevContainerContext,
62 environment: HashMap<String, String>,
63 docker_client: Arc<dyn DockerClient>,
64 command_runner: Arc<dyn CommandRunner>,
65 local_config: DevContainerConfig,
66 local_project_path: &Path,
67 ) -> Result<Self, DevContainerError> {
68 let config_path = local_project_path.join(local_config.config_path.clone());
69 log::debug!("parsing devcontainer json found in {:?}", &config_path);
70 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
71 log::error!("Unable to read devcontainer contents: {e}");
72 DevContainerError::DevContainerParseFailed
73 })?;
74
75 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
76
77 let devcontainer_directory = config_path.parent().ok_or_else(|| {
78 log::error!("Dev container file should be in a directory");
79 DevContainerError::NotInValidProject
80 })?;
81 let file_name = config_path
82 .file_name()
83 .and_then(|f| f.to_str())
84 .ok_or_else(|| {
85 log::error!("Dev container file has no file name, or is invalid unicode");
86 DevContainerError::DevContainerParseFailed
87 })?;
88
89 Ok(Self {
90 fs: context.fs.clone(),
91 http_client: context.http_client.clone(),
92 docker_client,
93 command_runner,
94 raw_config: devcontainer_contents,
95 config: ConfigStatus::Deserialized(devcontainer),
96 local_project_directory: local_project_path.to_path_buf(),
97 local_environment: environment,
98 config_directory: devcontainer_directory.to_path_buf(),
99 file_name: file_name.to_string(),
100 root_image: None,
101 features_build_info: None,
102 features: Vec::new(),
103 })
104 }
105
106 fn devcontainer_id(&self) -> String {
107 let mut labels = self.identifying_labels();
108 labels.sort_by_key(|(key, _)| *key);
109
110 let mut hasher = DefaultHasher::new();
111 for (key, value) in &labels {
112 key.hash(&mut hasher);
113 value.hash(&mut hasher);
114 }
115
116 format!("{:016x}", hasher.finish())
117 }
118
119 fn identifying_labels(&self) -> Vec<(&str, String)> {
120 let labels = vec![
121 (
122 "devcontainer.local_folder",
123 (self.local_project_directory.display()).to_string(),
124 ),
125 (
126 "devcontainer.config_file",
127 (self.config_file().display()).to_string(),
128 ),
129 ];
130 labels
131 }
132
133 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
134 let mut replaced_content = content
135 .replace("${devcontainerId}", &self.devcontainer_id())
136 .replace(
137 "${containerWorkspaceFolderBasename}",
138 &self.remote_workspace_base_name().unwrap_or_default(),
139 )
140 .replace(
141 "${localWorkspaceFolderBasename}",
142 &self.local_workspace_base_name()?,
143 )
144 .replace(
145 "${containerWorkspaceFolder}",
146 &self
147 .remote_workspace_folder()
148 .map(|path| path.display().to_string())
149 .unwrap_or_default()
150 .replace('\\', "/"),
151 )
152 .replace(
153 "${localWorkspaceFolder}",
154 &self.local_workspace_folder().replace('\\', "/"),
155 );
156 for (k, v) in &self.local_environment {
157 let find = format!("${{localEnv:{k}}}");
158 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
159 }
160
161 Ok(replaced_content)
162 }
163
164 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
165 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
166 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
167
168 self.config = ConfigStatus::VariableParsed(parsed_config);
169
170 Ok(())
171 }
172
173 fn runtime_remote_env(
174 &self,
175 container_env: &HashMap<String, String>,
176 ) -> Result<HashMap<String, String>, DevContainerError> {
177 let mut merged_remote_env = container_env.clone();
178 // HOME is user-specific, and we will often not run as the image user
179 merged_remote_env.remove("HOME");
180 if let Some(remote_env) = self.dev_container().remote_env.clone() {
181 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
182 log::error!(
183 "Unexpected error serializing dev container remote_env: {e} - {:?}",
184 remote_env
185 );
186 DevContainerError::DevContainerParseFailed
187 })?;
188 for (k, v) in container_env {
189 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
190 }
191 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
192 .map_err(|e| {
193 log::error!(
194 "Unexpected error reserializing dev container remote env: {e} - {:?}",
195 &raw
196 );
197 DevContainerError::DevContainerParseFailed
198 })?;
199 for (k, v) in reserialized {
200 merged_remote_env.insert(k, v);
201 }
202 }
203 Ok(merged_remote_env)
204 }
205
206 fn config_file(&self) -> PathBuf {
207 self.config_directory.join(&self.file_name)
208 }
209
210 fn dev_container(&self) -> &DevContainer {
211 match &self.config {
212 ConfigStatus::Deserialized(dev_container) => dev_container,
213 ConfigStatus::VariableParsed(dev_container) => dev_container,
214 }
215 }
216
217 async fn dockerfile_location(&self) -> Option<PathBuf> {
218 let dev_container = self.dev_container();
219 match dev_container.build_type() {
220 DevContainerBuildType::Image => None,
221 DevContainerBuildType::Dockerfile => dev_container
222 .build
223 .as_ref()
224 .map(|build| self.config_directory.join(&build.dockerfile)),
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service
234 .build
235 .and_then(|b| b.dockerfile)
236 .map(|dockerfile| self.config_directory.join(dockerfile))
237 }
238 DevContainerBuildType::None => None,
239 }
240 }
241
242 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
243 let mut hasher = DefaultHasher::new();
244 let prefix = match &self.dev_container().name {
245 Some(name) => &safe_id_lower(name),
246 None => "zed-dc",
247 };
248 let prefix = prefix.get(..6).unwrap_or(prefix);
249
250 dockerfile_build_path.hash(&mut hasher);
251
252 let hash = hasher.finish();
253 format!("{}-{:x}-features", prefix, hash)
254 }
255
256 /// Gets the base image from the devcontainer with the following precedence:
257 /// - The devcontainer image if an image is specified
258 /// - The image sourced in the Dockerfile if a Dockerfile is specified
259 /// - The image sourced in the docker-compose main service, if one is specified
260 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
261 /// If no such image is available, return an error
262 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
263 if let Some(image) = &self.dev_container().image {
264 return Ok(image.to_string());
265 }
266 if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
267 let dockerfile_contents = self
268 .fs
269 .load(&self.config_directory.join(dockerfile))
270 .await
271 .map_err(|e| {
272 log::error!("Error reading dockerfile: {e}");
273 DevContainerError::DevContainerParseFailed
274 })?;
275 return image_from_dockerfile(self, dockerfile_contents);
276 }
277 if self.dev_container().docker_compose_file.is_some() {
278 let docker_compose_manifest = self.docker_compose_manifest().await?;
279 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
280
281 if let Some(dockerfile) = main_service
282 .build
283 .as_ref()
284 .and_then(|b| b.dockerfile.as_ref())
285 {
286 let dockerfile_contents = self
287 .fs
288 .load(&self.config_directory.join(dockerfile))
289 .await
290 .map_err(|e| {
291 log::error!("Error reading dockerfile: {e}");
292 DevContainerError::DevContainerParseFailed
293 })?;
294 return image_from_dockerfile(self, dockerfile_contents);
295 }
296 if let Some(image) = &main_service.image {
297 return Ok(image.to_string());
298 }
299
300 log::error!("No valid base image found in docker-compose configuration");
301 return Err(DevContainerError::DevContainerParseFailed);
302 }
303 log::error!("No valid base image found in dev container configuration");
304 Err(DevContainerError::DevContainerParseFailed)
305 }
306
307 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
308 let dev_container = match &self.config {
309 ConfigStatus::Deserialized(_) => {
310 log::error!(
311 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
312 );
313 return Err(DevContainerError::DevContainerParseFailed);
314 }
315 ConfigStatus::VariableParsed(dev_container) => dev_container,
316 };
317 let root_image_tag = self.get_base_image_from_config().await?;
318 let root_image = self.docker_client.inspect(&root_image_tag).await?;
319
320 if dev_container.build_type() == DevContainerBuildType::Image
321 && !dev_container.has_features()
322 {
323 log::debug!("No resources to download. Proceeding with just the image");
324 return Ok(());
325 }
326
327 let temp_base = std::env::temp_dir().join("devcontainer-zed");
328 let timestamp = std::time::SystemTime::now()
329 .duration_since(std::time::UNIX_EPOCH)
330 .map(|d| d.as_millis())
331 .unwrap_or(0);
332
333 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
334 let empty_context_dir = temp_base.join("empty-folder");
335
336 self.fs
337 .create_dir(&features_content_dir)
338 .await
339 .map_err(|e| {
340 log::error!("Failed to create features content dir: {e}");
341 DevContainerError::FilesystemError
342 })?;
343
344 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
345 log::error!("Failed to create empty context dir: {e}");
346 DevContainerError::FilesystemError
347 })?;
348
349 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
350 let image_tag =
351 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
352
353 let build_info = FeaturesBuildInfo {
354 dockerfile_path,
355 features_content_dir,
356 empty_context_dir,
357 build_image: dev_container.image.clone(),
358 image_tag,
359 };
360
361 let features = match &dev_container.features {
362 Some(features) => features,
363 None => &HashMap::new(),
364 };
365
366 let container_user = get_container_user_from_config(&root_image, self)?;
367 let remote_user = get_remote_user_from_config(&root_image, self)?;
368
369 let builtin_env_content = format!(
370 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
371 container_user, remote_user
372 );
373
374 let builtin_env_path = build_info
375 .features_content_dir
376 .join("devcontainer-features.builtin.env");
377
378 self.fs
379 .write(&builtin_env_path, &builtin_env_content.as_bytes())
380 .await
381 .map_err(|e| {
382 log::error!("Failed to write builtin env file: {e}");
383 DevContainerError::FilesystemError
384 })?;
385
386 let ordered_features =
387 resolve_feature_order(features, &dev_container.override_feature_install_order);
388
389 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
390 if matches!(options, FeatureOptions::Bool(false)) {
391 log::debug!(
392 "Feature '{}' is disabled (set to false), skipping",
393 feature_ref
394 );
395 continue;
396 }
397
398 let feature_id = extract_feature_id(feature_ref);
399 let consecutive_id = format!("{}_{}", feature_id, index);
400 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
401
402 self.fs.create_dir(&feature_dir).await.map_err(|e| {
403 log::error!(
404 "Failed to create feature directory for {}: {e}",
405 feature_ref
406 );
407 DevContainerError::FilesystemError
408 })?;
409
410 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
411 log::error!(
412 "Feature '{}' is not a supported OCI feature reference",
413 feature_ref
414 );
415 DevContainerError::DevContainerParseFailed
416 })?;
417 let TokenResponse { token } =
418 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
419 .await
420 .map_err(|e| {
421 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
422 DevContainerError::ResourceFetchFailed
423 })?;
424 let manifest = get_oci_manifest(
425 &oci_ref.registry,
426 &oci_ref.path,
427 &token,
428 &self.http_client,
429 &oci_ref.version,
430 None,
431 )
432 .await
433 .map_err(|e| {
434 log::error!(
435 "Failed to fetch OCI manifest for feature '{}': {e}",
436 feature_ref
437 );
438 DevContainerError::ResourceFetchFailed
439 })?;
440 let digest = &manifest
441 .layers
442 .first()
443 .ok_or_else(|| {
444 log::error!(
445 "OCI manifest for feature '{}' contains no layers",
446 feature_ref
447 );
448 DevContainerError::ResourceFetchFailed
449 })?
450 .digest;
451 download_oci_tarball(
452 &token,
453 &oci_ref.registry,
454 &oci_ref.path,
455 digest,
456 "application/vnd.devcontainers.layer.v1+tar",
457 &feature_dir,
458 &self.http_client,
459 &self.fs,
460 None,
461 )
462 .await?;
463
464 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
465 if !self.fs.is_file(feature_json_path).await {
466 let message = format!(
467 "No devcontainer-feature.json found in {:?}, no defaults to apply",
468 feature_json_path
469 );
470 log::error!("{}", &message);
471 return Err(DevContainerError::ResourceFetchFailed);
472 }
473
474 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
475 log::error!("error reading devcontainer-feature.json: {:?}", e);
476 DevContainerError::FilesystemError
477 })?;
478
479 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
480
481 let feature_json: DevContainerFeatureJson =
482 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
483 log::error!("Failed to parse devcontainer-feature.json: {e}");
484 DevContainerError::ResourceFetchFailed
485 })?;
486
487 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
488
489 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
490
491 let env_content = feature_manifest
492 .write_feature_env(&self.fs, options)
493 .await?;
494
495 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
496
497 self.fs
498 .write(
499 &feature_manifest
500 .file_path()
501 .join("devcontainer-features-install.sh"),
502 &wrapper_content.as_bytes(),
503 )
504 .await
505 .map_err(|e| {
506 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
507 DevContainerError::FilesystemError
508 })?;
509
510 self.features.push(feature_manifest);
511 }
512
513 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
514
515 let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
516 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
517
518 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
519 self.fs.load(location).await.log_err()
520 } else {
521 None
522 };
523
524 let dockerfile_content = self.generate_dockerfile_extended(
525 &container_user,
526 &remote_user,
527 dockerfile_base_content,
528 use_buildkit,
529 );
530
531 self.fs
532 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
533 .await
534 .map_err(|e| {
535 log::error!("Failed to write Dockerfile.extended: {e}");
536 DevContainerError::FilesystemError
537 })?;
538
539 log::debug!(
540 "Features build resources written to {:?}",
541 build_info.features_content_dir
542 );
543
544 self.root_image = Some(root_image);
545 self.features_build_info = Some(build_info);
546
547 Ok(())
548 }
549
550 fn generate_dockerfile_extended(
551 &self,
552 container_user: &str,
553 remote_user: &str,
554 dockerfile_content: Option<String>,
555 use_buildkit: bool,
556 ) -> String {
557 #[cfg(not(target_os = "windows"))]
558 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
559 #[cfg(target_os = "windows")]
560 let update_remote_user_uid = false;
561 let feature_layers: String = self
562 .features
563 .iter()
564 .map(|manifest| {
565 manifest.generate_dockerfile_feature_layer(
566 use_buildkit,
567 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
568 )
569 })
570 .collect();
571
572 let container_home_cmd = get_ent_passwd_shell_command(container_user);
573 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
574
575 let dockerfile_content = dockerfile_content
576 .map(|content| {
577 if dockerfile_alias(&content).is_some() {
578 content
579 } else {
580 dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
581 }
582 })
583 .unwrap_or("".to_string());
584
585 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
586
587 let feature_content_source_stage = if use_buildkit {
588 "".to_string()
589 } else {
590 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
591 .to_string()
592 };
593
594 let builtin_env_source_path = if use_buildkit {
595 "./devcontainer-features.builtin.env"
596 } else {
597 "/tmp/build-features/devcontainer-features.builtin.env"
598 };
599
600 let mut extended_dockerfile = format!(
601 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
602
603{dockerfile_content}
604{feature_content_source_stage}
605FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
606USER root
607COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
608RUN chmod -R 0755 /tmp/build-features/
609
610FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
611
612USER root
613
614RUN mkdir -p {dest}
615COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
616
617RUN \
618echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
619echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
620
621{feature_layers}
622
623ARG _DEV_CONTAINERS_IMAGE_USER=root
624USER $_DEV_CONTAINERS_IMAGE_USER
625"#
626 );
627
628 // If we're not adding a uid update layer, then we should add env vars to this layer instead
629 if !update_remote_user_uid {
630 extended_dockerfile = format!(
631 r#"{extended_dockerfile}
632# Ensure that /etc/profile does not clobber the existing path
633RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
634"#
635 );
636
637 for feature in &self.features {
638 let container_env_layer = feature.generate_dockerfile_env();
639 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
640 }
641
642 if let Some(env) = &self.dev_container().container_env {
643 for (key, value) in env {
644 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
645 }
646 }
647 }
648
649 extended_dockerfile
650 }
651
652 fn build_merged_resources(
653 &self,
654 base_image: DockerInspect,
655 ) -> Result<DockerBuildResources, DevContainerError> {
656 let dev_container = match &self.config {
657 ConfigStatus::Deserialized(_) => {
658 log::error!(
659 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
660 );
661 return Err(DevContainerError::DevContainerParseFailed);
662 }
663 ConfigStatus::VariableParsed(dev_container) => dev_container,
664 };
665 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
666
667 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
668
669 mounts.append(&mut feature_mounts);
670
671 let privileged = dev_container.privileged.unwrap_or(false)
672 || self.features.iter().any(|f| f.privileged());
673
674 let mut entrypoint_script_lines = vec![
675 "echo Container started".to_string(),
676 "trap \"exit 0\" 15".to_string(),
677 ];
678
679 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
680 entrypoint_script_lines.push(entrypoint.clone());
681 }
682 entrypoint_script_lines.append(&mut vec![
683 "exec \"$@\"".to_string(),
684 "while sleep 1 & wait $!; do :; done".to_string(),
685 ]);
686
687 Ok(DockerBuildResources {
688 image: base_image,
689 additional_mounts: mounts,
690 privileged,
691 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
692 })
693 }
694
695 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
696 if let ConfigStatus::Deserialized(_) = &self.config {
697 log::error!(
698 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
699 );
700 return Err(DevContainerError::DevContainerParseFailed);
701 }
702 let dev_container = self.dev_container();
703 match dev_container.build_type() {
704 DevContainerBuildType::Image | DevContainerBuildType::Dockerfile => {
705 let built_docker_image = self.build_docker_image().await?;
706 let built_docker_image = self
707 .update_remote_user_uid(built_docker_image, None)
708 .await?;
709
710 let resources = self.build_merged_resources(built_docker_image)?;
711 Ok(DevContainerBuildResources::Docker(resources))
712 }
713 DevContainerBuildType::DockerCompose => {
714 log::debug!("Using docker compose. Building extended compose files");
715 let docker_compose_resources = self.build_and_extend_compose_files().await?;
716
717 return Ok(DevContainerBuildResources::DockerCompose(
718 docker_compose_resources,
719 ));
720 }
721 DevContainerBuildType::None => {
722 return Err(DevContainerError::DevContainerParseFailed);
723 }
724 }
725 }
726
727 async fn run_dev_container(
728 &self,
729 build_resources: DevContainerBuildResources,
730 ) -> Result<DevContainerUp, DevContainerError> {
731 let ConfigStatus::VariableParsed(_) = &self.config else {
732 log::error!(
733 "Variables have not been parsed; cannot proceed with running the dev container"
734 );
735 return Err(DevContainerError::DevContainerParseFailed);
736 };
737 let running_container = match build_resources {
738 DevContainerBuildResources::DockerCompose(resources) => {
739 self.run_docker_compose(resources).await?
740 }
741 DevContainerBuildResources::Docker(resources) => {
742 self.run_docker_image(resources).await?
743 }
744 };
745
746 let remote_user = get_remote_user_from_config(&running_container, self)?;
747 let remote_workspace_folder = get_remote_dir_from_config(
748 &running_container,
749 (&self.local_project_directory.display()).to_string(),
750 )?;
751
752 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
753
754 Ok(DevContainerUp {
755 container_id: running_container.id,
756 remote_user,
757 remote_workspace_folder,
758 extension_ids: self.extension_ids(),
759 remote_env,
760 })
761 }
762
763 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
764 let dev_container = match &self.config {
765 ConfigStatus::Deserialized(_) => {
766 log::error!(
767 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
768 );
769 return Err(DevContainerError::DevContainerParseFailed);
770 }
771 ConfigStatus::VariableParsed(dev_container) => dev_container,
772 };
773 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
774 return Err(DevContainerError::DevContainerParseFailed);
775 };
776 let docker_compose_full_paths = docker_compose_files
777 .iter()
778 .map(|relative| self.config_directory.join(relative))
779 .collect::<Vec<PathBuf>>();
780
781 let Some(config) = self
782 .docker_client
783 .get_docker_compose_config(&docker_compose_full_paths)
784 .await?
785 else {
786 log::error!("Output could not deserialize into DockerComposeConfig");
787 return Err(DevContainerError::DevContainerParseFailed);
788 };
789 Ok(DockerComposeResources {
790 files: docker_compose_full_paths,
791 config,
792 })
793 }
794
795 async fn build_and_extend_compose_files(
796 &self,
797 ) -> Result<DockerComposeResources, DevContainerError> {
798 let dev_container = match &self.config {
799 ConfigStatus::Deserialized(_) => {
800 log::error!(
801 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
802 );
803 return Err(DevContainerError::DevContainerParseFailed);
804 }
805 ConfigStatus::VariableParsed(dev_container) => dev_container,
806 };
807
808 let Some(features_build_info) = &self.features_build_info else {
809 log::error!(
810 "Cannot build and extend compose files: features build info is not yet constructed"
811 );
812 return Err(DevContainerError::DevContainerParseFailed);
813 };
814 let mut docker_compose_resources = self.docker_compose_manifest().await?;
815 let supports_buildkit = self.docker_client.supports_compose_buildkit();
816
817 let (main_service_name, main_service) =
818 find_primary_service(&docker_compose_resources, self)?;
819 let built_service_image = if main_service
820 .build
821 .as_ref()
822 .map(|b| b.dockerfile.as_ref())
823 .is_some()
824 {
825 if !supports_buildkit {
826 self.build_feature_content_image().await?;
827 }
828
829 let dockerfile_path = &features_build_info.dockerfile_path;
830
831 let build_args = if !supports_buildkit {
832 HashMap::from([
833 (
834 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
835 "dev_container_auto_added_stage_label".to_string(),
836 ),
837 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
838 ])
839 } else {
840 HashMap::from([
841 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
842 (
843 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
844 "dev_container_auto_added_stage_label".to_string(),
845 ),
846 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
847 ])
848 };
849
850 let additional_contexts = if !supports_buildkit {
851 None
852 } else {
853 Some(HashMap::from([(
854 "dev_containers_feature_content_source".to_string(),
855 features_build_info
856 .features_content_dir
857 .display()
858 .to_string(),
859 )]))
860 };
861
862 let build_override = DockerComposeConfig {
863 name: None,
864 services: HashMap::from([(
865 main_service_name.clone(),
866 DockerComposeService {
867 image: Some(features_build_info.image_tag.clone()),
868 entrypoint: None,
869 cap_add: None,
870 security_opt: None,
871 labels: None,
872 build: Some(DockerComposeServiceBuild {
873 context: Some(
874 features_build_info.empty_context_dir.display().to_string(),
875 ),
876 dockerfile: Some(dockerfile_path.display().to_string()),
877 args: Some(build_args),
878 additional_contexts,
879 }),
880 volumes: Vec::new(),
881 ..Default::default()
882 },
883 )]),
884 volumes: HashMap::new(),
885 };
886
887 let temp_base = std::env::temp_dir().join("devcontainer-zed");
888 let config_location = temp_base.join("docker_compose_build.json");
889
890 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
891 log::error!("Error serializing docker compose runtime override: {e}");
892 DevContainerError::DevContainerParseFailed
893 })?;
894
895 self.fs
896 .write(&config_location, config_json.as_bytes())
897 .await
898 .map_err(|e| {
899 log::error!("Error writing the runtime override file: {e}");
900 DevContainerError::FilesystemError
901 })?;
902
903 docker_compose_resources.files.push(config_location);
904
905 self.docker_client
906 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
907 .await?;
908 self.docker_client
909 .inspect(&features_build_info.image_tag)
910 .await?
911 } else if let Some(image) = &main_service.image {
912 if dev_container
913 .features
914 .as_ref()
915 .is_none_or(|features| features.is_empty())
916 {
917 self.docker_client.inspect(image).await?
918 } else {
919 if !supports_buildkit {
920 self.build_feature_content_image().await?;
921 }
922
923 let dockerfile_path = &features_build_info.dockerfile_path;
924
925 let build_args = if !supports_buildkit {
926 HashMap::from([
927 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
928 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
929 ])
930 } else {
931 HashMap::from([
932 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
933 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
934 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
935 ])
936 };
937
938 let additional_contexts = if !supports_buildkit {
939 None
940 } else {
941 Some(HashMap::from([(
942 "dev_containers_feature_content_source".to_string(),
943 features_build_info
944 .features_content_dir
945 .display()
946 .to_string(),
947 )]))
948 };
949
950 let build_override = DockerComposeConfig {
951 name: None,
952 services: HashMap::from([(
953 main_service_name.clone(),
954 DockerComposeService {
955 image: Some(features_build_info.image_tag.clone()),
956 entrypoint: None,
957 cap_add: None,
958 security_opt: None,
959 labels: None,
960 build: Some(DockerComposeServiceBuild {
961 context: Some(
962 features_build_info.empty_context_dir.display().to_string(),
963 ),
964 dockerfile: Some(dockerfile_path.display().to_string()),
965 args: Some(build_args),
966 additional_contexts,
967 }),
968 volumes: Vec::new(),
969 ..Default::default()
970 },
971 )]),
972 volumes: HashMap::new(),
973 };
974
975 let temp_base = std::env::temp_dir().join("devcontainer-zed");
976 let config_location = temp_base.join("docker_compose_build.json");
977
978 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
979 log::error!("Error serializing docker compose runtime override: {e}");
980 DevContainerError::DevContainerParseFailed
981 })?;
982
983 self.fs
984 .write(&config_location, config_json.as_bytes())
985 .await
986 .map_err(|e| {
987 log::error!("Error writing the runtime override file: {e}");
988 DevContainerError::FilesystemError
989 })?;
990
991 docker_compose_resources.files.push(config_location);
992
993 self.docker_client
994 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
995 .await?;
996
997 self.docker_client
998 .inspect(&features_build_info.image_tag)
999 .await?
1000 }
1001 } else {
1002 log::error!("Docker compose must have either image or dockerfile defined");
1003 return Err(DevContainerError::DevContainerParseFailed);
1004 };
1005
1006 let built_service_image = self
1007 .update_remote_user_uid(built_service_image, Some(&features_build_info.image_tag))
1008 .await?;
1009
1010 let resources = self.build_merged_resources(built_service_image)?;
1011
1012 let network_mode = main_service.network_mode.as_ref();
1013 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1014 let runtime_override_file = self
1015 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1016 .await?;
1017
1018 docker_compose_resources.files.push(runtime_override_file);
1019
1020 Ok(docker_compose_resources)
1021 }
1022
1023 async fn write_runtime_override_file(
1024 &self,
1025 main_service_name: &str,
1026 network_mode_service: Option<&str>,
1027 resources: DockerBuildResources,
1028 ) -> Result<PathBuf, DevContainerError> {
1029 let config =
1030 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1031 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1032 let config_location = temp_base.join("docker_compose_runtime.json");
1033
1034 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1035 log::error!("Error serializing docker compose runtime override: {e}");
1036 DevContainerError::DevContainerParseFailed
1037 })?;
1038
1039 self.fs
1040 .write(&config_location, config_json.as_bytes())
1041 .await
1042 .map_err(|e| {
1043 log::error!("Error writing the runtime override file: {e}");
1044 DevContainerError::FilesystemError
1045 })?;
1046
1047 Ok(config_location)
1048 }
1049
1050 fn build_runtime_override(
1051 &self,
1052 main_service_name: &str,
1053 network_mode_service: Option<&str>,
1054 resources: DockerBuildResources,
1055 ) -> Result<DockerComposeConfig, DevContainerError> {
1056 let mut runtime_labels = HashMap::new();
1057
1058 if let Some(metadata) = &resources.image.config.labels.metadata {
1059 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1060 log::error!("Error serializing docker image metadata: {e}");
1061 DevContainerError::ContainerNotValid(resources.image.id.clone())
1062 })?;
1063
1064 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1065 }
1066
1067 for (k, v) in self.identifying_labels() {
1068 runtime_labels.insert(k.to_string(), v.to_string());
1069 }
1070
1071 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1072 .additional_mounts
1073 .iter()
1074 .filter_map(|mount| {
1075 if let Some(mount_type) = &mount.mount_type
1076 && mount_type.to_lowercase() == "volume"
1077 {
1078 Some((
1079 mount.source.clone(),
1080 DockerComposeVolume {
1081 name: mount.source.clone(),
1082 },
1083 ))
1084 } else {
1085 None
1086 }
1087 })
1088 .collect();
1089
1090 let volumes: Vec<MountDefinition> = resources
1091 .additional_mounts
1092 .iter()
1093 .map(|v| MountDefinition {
1094 source: v.source.clone(),
1095 target: v.target.clone(),
1096 mount_type: v.mount_type.clone(),
1097 })
1098 .collect();
1099
1100 let mut main_service = DockerComposeService {
1101 entrypoint: Some(vec![
1102 "/bin/sh".to_string(),
1103 "-c".to_string(),
1104 resources.entrypoint_script,
1105 "-".to_string(),
1106 ]),
1107 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1108 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1109 labels: Some(runtime_labels),
1110 volumes,
1111 privileged: Some(resources.privileged),
1112 ..Default::default()
1113 };
1114 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1115 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1116 if let Some(forward_ports) = &self.dev_container().forward_ports {
1117 let main_service_ports: Vec<String> = forward_ports
1118 .iter()
1119 .filter_map(|f| match f {
1120 ForwardPort::Number(port) => Some(port.to_string()),
1121 ForwardPort::String(port) => {
1122 let parts: Vec<&str> = port.split(":").collect();
1123 if parts.len() <= 1 {
1124 Some(port.to_string())
1125 } else if parts.len() == 2 {
1126 if parts[0] == main_service_name {
1127 Some(parts[1].to_string())
1128 } else {
1129 None
1130 }
1131 } else {
1132 None
1133 }
1134 }
1135 })
1136 .collect();
1137 for port in main_service_ports {
1138 // If the main service uses a different service's network bridge, append to that service's ports instead
1139 if let Some(network_service_name) = network_mode_service {
1140 if let Some(service) = service_declarations.get_mut(network_service_name) {
1141 service.ports.push(DockerComposeServicePort {
1142 target: port.clone(),
1143 published: port.clone(),
1144 ..Default::default()
1145 });
1146 } else {
1147 service_declarations.insert(
1148 network_service_name.to_string(),
1149 DockerComposeService {
1150 ports: vec![DockerComposeServicePort {
1151 target: port.clone(),
1152 published: port.clone(),
1153 ..Default::default()
1154 }],
1155 ..Default::default()
1156 },
1157 );
1158 }
1159 } else {
1160 main_service.ports.push(DockerComposeServicePort {
1161 target: port.clone(),
1162 published: port.clone(),
1163 ..Default::default()
1164 });
1165 }
1166 }
1167 let other_service_ports: Vec<(&str, &str)> = forward_ports
1168 .iter()
1169 .filter_map(|f| match f {
1170 ForwardPort::Number(_) => None,
1171 ForwardPort::String(port) => {
1172 let parts: Vec<&str> = port.split(":").collect();
1173 if parts.len() != 2 {
1174 None
1175 } else {
1176 if parts[0] == main_service_name {
1177 None
1178 } else {
1179 Some((parts[0], parts[1]))
1180 }
1181 }
1182 }
1183 })
1184 .collect();
1185 for (service_name, port) in other_service_ports {
1186 if let Some(service) = service_declarations.get_mut(service_name) {
1187 service.ports.push(DockerComposeServicePort {
1188 target: port.to_string(),
1189 published: port.to_string(),
1190 ..Default::default()
1191 });
1192 } else {
1193 service_declarations.insert(
1194 service_name.to_string(),
1195 DockerComposeService {
1196 ports: vec![DockerComposeServicePort {
1197 target: port.to_string(),
1198 published: port.to_string(),
1199 ..Default::default()
1200 }],
1201 ..Default::default()
1202 },
1203 );
1204 }
1205 }
1206 }
1207 if let Some(port) = &self.dev_container().app_port {
1208 if let Some(network_service_name) = network_mode_service {
1209 if let Some(service) = service_declarations.get_mut(network_service_name) {
1210 service.ports.push(DockerComposeServicePort {
1211 target: port.clone(),
1212 published: port.clone(),
1213 ..Default::default()
1214 });
1215 } else {
1216 service_declarations.insert(
1217 network_service_name.to_string(),
1218 DockerComposeService {
1219 ports: vec![DockerComposeServicePort {
1220 target: port.clone(),
1221 published: port.clone(),
1222 ..Default::default()
1223 }],
1224 ..Default::default()
1225 },
1226 );
1227 }
1228 } else {
1229 main_service.ports.push(DockerComposeServicePort {
1230 target: port.clone(),
1231 published: port.clone(),
1232 ..Default::default()
1233 });
1234 }
1235 }
1236
1237 service_declarations.insert(main_service_name.to_string(), main_service);
1238 let new_docker_compose_config = DockerComposeConfig {
1239 name: None,
1240 services: service_declarations,
1241 volumes: config_volumes,
1242 };
1243
1244 Ok(new_docker_compose_config)
1245 }
1246
1247 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1248 let dev_container = match &self.config {
1249 ConfigStatus::Deserialized(_) => {
1250 log::error!(
1251 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1252 );
1253 return Err(DevContainerError::DevContainerParseFailed);
1254 }
1255 ConfigStatus::VariableParsed(dev_container) => dev_container,
1256 };
1257
1258 match dev_container.build_type() {
1259 DevContainerBuildType::Image => {
1260 let Some(image_tag) = &dev_container.image else {
1261 return Err(DevContainerError::DevContainerParseFailed);
1262 };
1263 let base_image = self.docker_client.inspect(image_tag).await?;
1264 if dev_container
1265 .features
1266 .as_ref()
1267 .is_none_or(|features| features.is_empty())
1268 {
1269 log::debug!("No features to add. Using base image");
1270 return Ok(base_image);
1271 }
1272 }
1273 DevContainerBuildType::Dockerfile => {}
1274 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1275 return Err(DevContainerError::DevContainerParseFailed);
1276 }
1277 };
1278
1279 let mut command = self.create_docker_build()?;
1280
1281 let output = self
1282 .command_runner
1283 .run_command(&mut command)
1284 .await
1285 .map_err(|e| {
1286 log::error!("Error building docker image: {e}");
1287 DevContainerError::CommandFailed(command.get_program().display().to_string())
1288 })?;
1289
1290 if !output.status.success() {
1291 let stderr = String::from_utf8_lossy(&output.stderr);
1292 log::error!("docker buildx build failed: {stderr}");
1293 return Err(DevContainerError::CommandFailed(
1294 command.get_program().display().to_string(),
1295 ));
1296 }
1297
1298 // After a successful build, inspect the newly tagged image to get its metadata
1299 let Some(features_build_info) = &self.features_build_info else {
1300 log::error!("Features build info expected, but not created");
1301 return Err(DevContainerError::DevContainerParseFailed);
1302 };
1303 let image = self
1304 .docker_client
1305 .inspect(&features_build_info.image_tag)
1306 .await?;
1307
1308 Ok(image)
1309 }
1310
1311 #[cfg(target_os = "windows")]
1312 async fn update_remote_user_uid(
1313 &self,
1314 image: DockerInspect,
1315 _override_tag: Option<&str>,
1316 ) -> Result<DockerInspect, DevContainerError> {
1317 Ok(image)
1318 }
1319 #[cfg(not(target_os = "windows"))]
1320 async fn update_remote_user_uid(
1321 &self,
1322 image: DockerInspect,
1323 override_tag: Option<&str>,
1324 ) -> Result<DockerInspect, DevContainerError> {
1325 let dev_container = self.dev_container();
1326
1327 let Some(features_build_info) = &self.features_build_info else {
1328 return Ok(image);
1329 };
1330
1331 // updateRemoteUserUID defaults to true per the devcontainers spec
1332 if dev_container.update_remote_user_uid == Some(false) {
1333 return Ok(image);
1334 }
1335
1336 let remote_user = get_remote_user_from_config(&image, self)?;
1337 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1338 return Ok(image);
1339 }
1340
1341 let image_user = image
1342 .config
1343 .image_user
1344 .as_deref()
1345 .unwrap_or("root")
1346 .to_string();
1347
1348 let host_uid = Command::new("id")
1349 .arg("-u")
1350 .output()
1351 .await
1352 .map_err(|e| {
1353 log::error!("Failed to get host UID: {e}");
1354 DevContainerError::CommandFailed("id -u".to_string())
1355 })
1356 .and_then(|output| {
1357 String::from_utf8_lossy(&output.stdout)
1358 .trim()
1359 .parse::<u32>()
1360 .map_err(|e| {
1361 log::error!("Failed to parse host UID: {e}");
1362 DevContainerError::CommandFailed("id -u".to_string())
1363 })
1364 })?;
1365
1366 let host_gid = Command::new("id")
1367 .arg("-g")
1368 .output()
1369 .await
1370 .map_err(|e| {
1371 log::error!("Failed to get host GID: {e}");
1372 DevContainerError::CommandFailed("id -g".to_string())
1373 })
1374 .and_then(|output| {
1375 String::from_utf8_lossy(&output.stdout)
1376 .trim()
1377 .parse::<u32>()
1378 .map_err(|e| {
1379 log::error!("Failed to parse host GID: {e}");
1380 DevContainerError::CommandFailed("id -g".to_string())
1381 })
1382 })?;
1383
1384 let dockerfile_content = self.generate_update_uid_dockerfile();
1385
1386 let dockerfile_path = features_build_info
1387 .features_content_dir
1388 .join("updateUID.Dockerfile");
1389 self.fs
1390 .write(&dockerfile_path, dockerfile_content.as_bytes())
1391 .await
1392 .map_err(|e| {
1393 log::error!("Failed to write updateUID Dockerfile: {e}");
1394 DevContainerError::FilesystemError
1395 })?;
1396
1397 let updated_image_tag = override_tag
1398 .map(|t| t.to_string())
1399 .unwrap_or_else(|| format!("{}-uid", features_build_info.image_tag));
1400
1401 let mut command = Command::new(self.docker_client.docker_cli());
1402 command.args(["build"]);
1403 command.args(["-f", &dockerfile_path.display().to_string()]);
1404 command.args(["-t", &updated_image_tag]);
1405 command.args([
1406 "--build-arg",
1407 &format!("BASE_IMAGE={}", features_build_info.image_tag),
1408 ]);
1409 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1410 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1411 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1412 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1413 command.arg(features_build_info.empty_context_dir.display().to_string());
1414
1415 let output = self
1416 .command_runner
1417 .run_command(&mut command)
1418 .await
1419 .map_err(|e| {
1420 log::error!("Error building UID update image: {e}");
1421 DevContainerError::CommandFailed(command.get_program().display().to_string())
1422 })?;
1423
1424 if !output.status.success() {
1425 let stderr = String::from_utf8_lossy(&output.stderr);
1426 log::error!("UID update build failed: {stderr}");
1427 return Err(DevContainerError::CommandFailed(
1428 command.get_program().display().to_string(),
1429 ));
1430 }
1431
1432 self.docker_client.inspect(&updated_image_tag).await
1433 }
1434
1435 #[cfg(not(target_os = "windows"))]
1436 fn generate_update_uid_dockerfile(&self) -> String {
1437 let mut dockerfile = r#"ARG BASE_IMAGE
1438FROM $BASE_IMAGE
1439
1440USER root
1441
1442ARG REMOTE_USER
1443ARG NEW_UID
1444ARG NEW_GID
1445SHELL ["/bin/sh", "-c"]
1446RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1447 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1448 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1449 if [ -z "$OLD_UID" ]; then \
1450 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1451 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1452 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1453 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1454 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1455 else \
1456 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1457 FREE_GID=65532; \
1458 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1459 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1460 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1461 fi; \
1462 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1463 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1464 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1465 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1466 fi; \
1467 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1468 fi;
1469
1470ARG IMAGE_USER
1471USER $IMAGE_USER
1472
1473# Ensure that /etc/profile does not clobber the existing path
1474RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1475"#.to_string();
1476 for feature in &self.features {
1477 let container_env_layer = feature.generate_dockerfile_env();
1478 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1479 }
1480
1481 if let Some(env) = &self.dev_container().container_env {
1482 for (key, value) in env {
1483 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1484 }
1485 }
1486 dockerfile
1487 }
1488
1489 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1490 let Some(features_build_info) = &self.features_build_info else {
1491 log::error!("Features build info not available for building feature content image");
1492 return Err(DevContainerError::DevContainerParseFailed);
1493 };
1494 let features_content_dir = &features_build_info.features_content_dir;
1495
1496 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1497 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1498
1499 self.fs
1500 .write(&dockerfile_path, dockerfile_content.as_bytes())
1501 .await
1502 .map_err(|e| {
1503 log::error!("Failed to write feature content Dockerfile: {e}");
1504 DevContainerError::FilesystemError
1505 })?;
1506
1507 let mut command = Command::new(self.docker_client.docker_cli());
1508 command.args([
1509 "build",
1510 "-t",
1511 "dev_container_feature_content_temp",
1512 "-f",
1513 &dockerfile_path.display().to_string(),
1514 &features_content_dir.display().to_string(),
1515 ]);
1516
1517 let output = self
1518 .command_runner
1519 .run_command(&mut command)
1520 .await
1521 .map_err(|e| {
1522 log::error!("Error building feature content image: {e}");
1523 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1524 })?;
1525
1526 if !output.status.success() {
1527 let stderr = String::from_utf8_lossy(&output.stderr);
1528 log::error!("Feature content image build failed: {stderr}");
1529 return Err(DevContainerError::CommandFailed(
1530 self.docker_client.docker_cli(),
1531 ));
1532 }
1533
1534 Ok(())
1535 }
1536
1537 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1538 let dev_container = match &self.config {
1539 ConfigStatus::Deserialized(_) => {
1540 log::error!(
1541 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1542 );
1543 return Err(DevContainerError::DevContainerParseFailed);
1544 }
1545 ConfigStatus::VariableParsed(dev_container) => dev_container,
1546 };
1547
1548 let Some(features_build_info) = &self.features_build_info else {
1549 log::error!(
1550 "Cannot create docker build command; features build info has not been constructed"
1551 );
1552 return Err(DevContainerError::DevContainerParseFailed);
1553 };
1554 let mut command = Command::new(self.docker_client.docker_cli());
1555
1556 command.args(["buildx", "build"]);
1557
1558 // --load is short for --output=docker, loading the built image into the local docker images
1559 command.arg("--load");
1560
1561 // BuildKit build context: provides the features content directory as a named context
1562 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1563 command.args([
1564 "--build-context",
1565 &format!(
1566 "dev_containers_feature_content_source={}",
1567 features_build_info.features_content_dir.display()
1568 ),
1569 ]);
1570
1571 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1572 if let Some(build_image) = &features_build_info.build_image {
1573 command.args([
1574 "--build-arg",
1575 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1576 ]);
1577 } else {
1578 command.args([
1579 "--build-arg",
1580 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1581 ]);
1582 }
1583
1584 command.args([
1585 "--build-arg",
1586 &format!(
1587 "_DEV_CONTAINERS_IMAGE_USER={}",
1588 self.root_image
1589 .as_ref()
1590 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1591 .unwrap_or(&"root".to_string())
1592 ),
1593 ]);
1594
1595 command.args([
1596 "--build-arg",
1597 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1598 ]);
1599
1600 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1601 for (key, value) in args {
1602 command.args(["--build-arg", &format!("{}={}", key, value)]);
1603 }
1604 }
1605
1606 command.args(["--target", "dev_containers_target_stage"]);
1607
1608 command.args([
1609 "-f",
1610 &features_build_info.dockerfile_path.display().to_string(),
1611 ]);
1612
1613 command.args(["-t", &features_build_info.image_tag]);
1614
1615 if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1616 command.arg(self.config_directory.display().to_string());
1617 } else {
1618 // Use an empty folder as the build context to avoid pulling in unneeded files.
1619 // The actual feature content is supplied via the BuildKit build context above.
1620 command.arg(features_build_info.empty_context_dir.display().to_string());
1621 }
1622
1623 Ok(command)
1624 }
1625
1626 async fn run_docker_compose(
1627 &self,
1628 resources: DockerComposeResources,
1629 ) -> Result<DockerInspect, DevContainerError> {
1630 let mut command = Command::new(self.docker_client.docker_cli());
1631 command.args(&["compose", "--project-name", &self.project_name()]);
1632 for docker_compose_file in resources.files {
1633 command.args(&["-f", &docker_compose_file.display().to_string()]);
1634 }
1635 command.args(&["up", "-d"]);
1636
1637 let output = self
1638 .command_runner
1639 .run_command(&mut command)
1640 .await
1641 .map_err(|e| {
1642 log::error!("Error running docker compose up: {e}");
1643 DevContainerError::CommandFailed(command.get_program().display().to_string())
1644 })?;
1645
1646 if !output.status.success() {
1647 let stderr = String::from_utf8_lossy(&output.stderr);
1648 log::error!("Non-success status from docker compose up: {}", stderr);
1649 return Err(DevContainerError::CommandFailed(
1650 command.get_program().display().to_string(),
1651 ));
1652 }
1653
1654 if let Some(docker_ps) = self.check_for_existing_container().await? {
1655 log::debug!("Found newly created dev container");
1656 return self.docker_client.inspect(&docker_ps.id).await;
1657 }
1658
1659 log::error!("Could not find existing container after docker compose up");
1660
1661 Err(DevContainerError::DevContainerParseFailed)
1662 }
1663
1664 async fn run_docker_image(
1665 &self,
1666 build_resources: DockerBuildResources,
1667 ) -> Result<DockerInspect, DevContainerError> {
1668 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1669
1670 let output = self
1671 .command_runner
1672 .run_command(&mut docker_run_command)
1673 .await
1674 .map_err(|e| {
1675 log::error!("Error running docker run: {e}");
1676 DevContainerError::CommandFailed(
1677 docker_run_command.get_program().display().to_string(),
1678 )
1679 })?;
1680
1681 if !output.status.success() {
1682 let std_err = String::from_utf8_lossy(&output.stderr);
1683 log::error!("Non-success status from docker run. StdErr: {std_err}");
1684 return Err(DevContainerError::CommandFailed(
1685 docker_run_command.get_program().display().to_string(),
1686 ));
1687 }
1688
1689 log::debug!("Checking for container that was started");
1690 let Some(docker_ps) = self.check_for_existing_container().await? else {
1691 log::error!("Could not locate container just created");
1692 return Err(DevContainerError::DevContainerParseFailed);
1693 };
1694 self.docker_client.inspect(&docker_ps.id).await
1695 }
1696
1697 fn local_workspace_folder(&self) -> String {
1698 self.local_project_directory.display().to_string()
1699 }
1700 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1701 self.local_project_directory
1702 .file_name()
1703 .map(|f| f.display().to_string())
1704 .ok_or(DevContainerError::DevContainerParseFailed)
1705 }
1706
1707 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1708 self.dev_container()
1709 .workspace_folder
1710 .as_ref()
1711 .map(|folder| PathBuf::from(folder))
1712 .or(Some(
1713 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1714 ))
1715 .ok_or(DevContainerError::DevContainerParseFailed)
1716 }
1717 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1718 self.remote_workspace_folder().and_then(|f| {
1719 f.file_name()
1720 .map(|file_name| file_name.display().to_string())
1721 .ok_or(DevContainerError::DevContainerParseFailed)
1722 })
1723 }
1724
1725 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1726 if let Some(mount) = &self.dev_container().workspace_mount {
1727 return Ok(mount.clone());
1728 }
1729 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1730 return Err(DevContainerError::DevContainerParseFailed);
1731 };
1732
1733 Ok(MountDefinition {
1734 source: self.local_workspace_folder(),
1735 target: format!("/workspaces/{}", project_directory_name.display()),
1736 mount_type: None,
1737 })
1738 }
1739
1740 fn create_docker_run_command(
1741 &self,
1742 build_resources: DockerBuildResources,
1743 ) -> Result<Command, DevContainerError> {
1744 let remote_workspace_mount = self.remote_workspace_mount()?;
1745
1746 let docker_cli = self.docker_client.docker_cli();
1747 let mut command = Command::new(&docker_cli);
1748
1749 command.arg("run");
1750
1751 if build_resources.privileged {
1752 command.arg("--privileged");
1753 }
1754
1755 if &docker_cli == "podman" {
1756 command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1757 }
1758
1759 command.arg("--sig-proxy=false");
1760 command.arg("-d");
1761 command.arg("--mount");
1762 command.arg(remote_workspace_mount.to_string());
1763
1764 for mount in &build_resources.additional_mounts {
1765 command.arg("--mount");
1766 command.arg(mount.to_string());
1767 }
1768
1769 for (key, val) in self.identifying_labels() {
1770 command.arg("-l");
1771 command.arg(format!("{}={}", key, val));
1772 }
1773
1774 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1775 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1776 log::error!("Problem serializing image metadata: {e}");
1777 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1778 })?;
1779 command.arg("-l");
1780 command.arg(format!(
1781 "{}={}",
1782 "devcontainer.metadata", serialized_metadata
1783 ));
1784 }
1785
1786 if let Some(forward_ports) = &self.dev_container().forward_ports {
1787 for port in forward_ports {
1788 if let ForwardPort::Number(port_number) = port {
1789 command.arg("-p");
1790 command.arg(format!("{port_number}:{port_number}"));
1791 }
1792 }
1793 }
1794 if let Some(app_port) = &self.dev_container().app_port {
1795 command.arg("-p");
1796 command.arg(format!("{app_port}:{app_port}"));
1797 }
1798
1799 command.arg("--entrypoint");
1800 command.arg("/bin/sh");
1801 command.arg(&build_resources.image.id);
1802 command.arg("-c");
1803
1804 command.arg(build_resources.entrypoint_script);
1805 command.arg("-");
1806
1807 Ok(command)
1808 }
1809
1810 fn extension_ids(&self) -> Vec<String> {
1811 self.dev_container()
1812 .customizations
1813 .as_ref()
1814 .map(|c| c.zed.extensions.clone())
1815 .unwrap_or_default()
1816 }
1817
1818 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1819 self.run_initialize_commands().await?;
1820
1821 self.download_feature_and_dockerfile_resources().await?;
1822
1823 let build_resources = self.build_resources().await?;
1824
1825 let devcontainer_up = self.run_dev_container(build_resources).await?;
1826
1827 self.run_remote_scripts(&devcontainer_up, true).await?;
1828
1829 Ok(devcontainer_up)
1830 }
1831
1832 async fn run_remote_scripts(
1833 &self,
1834 devcontainer_up: &DevContainerUp,
1835 new_container: bool,
1836 ) -> Result<(), DevContainerError> {
1837 let ConfigStatus::VariableParsed(config) = &self.config else {
1838 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1839 return Err(DevContainerError::DevContainerScriptsFailed);
1840 };
1841 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1842
1843 if new_container {
1844 if let Some(on_create_command) = &config.on_create_command {
1845 for (command_name, command) in on_create_command.script_commands() {
1846 log::debug!("Running on create command {command_name}");
1847 self.docker_client
1848 .run_docker_exec(
1849 &devcontainer_up.container_id,
1850 &remote_folder,
1851 "root",
1852 &devcontainer_up.remote_env,
1853 command,
1854 )
1855 .await?;
1856 }
1857 }
1858 if let Some(update_content_command) = &config.update_content_command {
1859 for (command_name, command) in update_content_command.script_commands() {
1860 log::debug!("Running update content command {command_name}");
1861 self.docker_client
1862 .run_docker_exec(
1863 &devcontainer_up.container_id,
1864 &remote_folder,
1865 "root",
1866 &devcontainer_up.remote_env,
1867 command,
1868 )
1869 .await?;
1870 }
1871 }
1872
1873 if let Some(post_create_command) = &config.post_create_command {
1874 for (command_name, command) in post_create_command.script_commands() {
1875 log::debug!("Running post create command {command_name}");
1876 self.docker_client
1877 .run_docker_exec(
1878 &devcontainer_up.container_id,
1879 &remote_folder,
1880 &devcontainer_up.remote_user,
1881 &devcontainer_up.remote_env,
1882 command,
1883 )
1884 .await?;
1885 }
1886 }
1887 if let Some(post_start_command) = &config.post_start_command {
1888 for (command_name, command) in post_start_command.script_commands() {
1889 log::debug!("Running post start command {command_name}");
1890 self.docker_client
1891 .run_docker_exec(
1892 &devcontainer_up.container_id,
1893 &remote_folder,
1894 &devcontainer_up.remote_user,
1895 &devcontainer_up.remote_env,
1896 command,
1897 )
1898 .await?;
1899 }
1900 }
1901 }
1902 if let Some(post_attach_command) = &config.post_attach_command {
1903 for (command_name, command) in post_attach_command.script_commands() {
1904 log::debug!("Running post attach command {command_name}");
1905 self.docker_client
1906 .run_docker_exec(
1907 &devcontainer_up.container_id,
1908 &remote_folder,
1909 &devcontainer_up.remote_user,
1910 &devcontainer_up.remote_env,
1911 command,
1912 )
1913 .await?;
1914 }
1915 }
1916
1917 Ok(())
1918 }
1919
1920 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1921 let ConfigStatus::VariableParsed(config) = &self.config else {
1922 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1923 return Err(DevContainerError::DevContainerParseFailed);
1924 };
1925
1926 if let Some(initialize_command) = &config.initialize_command {
1927 log::debug!("Running initialize command");
1928 initialize_command
1929 .run(&self.command_runner, &self.local_project_directory)
1930 .await
1931 } else {
1932 log::warn!("No initialize command found");
1933 Ok(())
1934 }
1935 }
1936
1937 async fn check_for_existing_devcontainer(
1938 &self,
1939 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1940 if let Some(docker_ps) = self.check_for_existing_container().await? {
1941 log::debug!("Dev container already found. Proceeding with it");
1942
1943 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1944
1945 if !docker_inspect.is_running() {
1946 log::debug!("Container not running. Will attempt to start, and then proceed");
1947 self.docker_client.start_container(&docker_ps.id).await?;
1948 }
1949
1950 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1951
1952 let remote_folder = get_remote_dir_from_config(
1953 &docker_inspect,
1954 (&self.local_project_directory.display()).to_string(),
1955 )?;
1956
1957 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1958
1959 let dev_container_up = DevContainerUp {
1960 container_id: docker_ps.id,
1961 remote_user: remote_user,
1962 remote_workspace_folder: remote_folder,
1963 extension_ids: self.extension_ids(),
1964 remote_env,
1965 };
1966
1967 self.run_remote_scripts(&dev_container_up, false).await?;
1968
1969 Ok(Some(dev_container_up))
1970 } else {
1971 log::debug!("Existing container not found.");
1972
1973 Ok(None)
1974 }
1975 }
1976
1977 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1978 self.docker_client
1979 .find_process_by_filters(
1980 self.identifying_labels()
1981 .iter()
1982 .map(|(k, v)| format!("label={k}={v}"))
1983 .collect(),
1984 )
1985 .await
1986 }
1987
1988 fn project_name(&self) -> String {
1989 if let Some(name) = &self.dev_container().name {
1990 safe_id_lower(name)
1991 } else {
1992 let alternate_name = &self
1993 .local_workspace_base_name()
1994 .unwrap_or(self.local_workspace_folder());
1995 safe_id_lower(alternate_name)
1996 }
1997 }
1998}
1999
2000/// Holds all the information needed to construct a `docker buildx build` command
2001/// that extends a base image with dev container features.
2002///
2003/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2004/// (cli/src/spec-node/containerFeatures.ts).
2005#[derive(Debug, Eq, PartialEq)]
2006pub(crate) struct FeaturesBuildInfo {
2007 /// Path to the generated Dockerfile.extended
2008 pub dockerfile_path: PathBuf,
2009 /// Path to the features content directory (used as a BuildKit build context)
2010 pub features_content_dir: PathBuf,
2011 /// Path to an empty directory used as the Docker build context
2012 pub empty_context_dir: PathBuf,
2013 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2014 pub build_image: Option<String>,
2015 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2016 pub image_tag: String,
2017}
2018
2019pub(crate) async fn read_devcontainer_configuration(
2020 config: DevContainerConfig,
2021 context: &DevContainerContext,
2022 environment: HashMap<String, String>,
2023) -> Result<DevContainer, DevContainerError> {
2024 let docker = if context.use_podman {
2025 Docker::new("podman")
2026 } else {
2027 Docker::new("docker")
2028 };
2029 let mut dev_container = DevContainerManifest::new(
2030 context,
2031 environment,
2032 Arc::new(docker),
2033 Arc::new(DefaultCommandRunner::new()),
2034 config,
2035 &context.project_directory.as_ref(),
2036 )
2037 .await?;
2038 dev_container.parse_nonremote_vars()?;
2039 Ok(dev_container.dev_container().clone())
2040}
2041
2042pub(crate) async fn spawn_dev_container(
2043 context: &DevContainerContext,
2044 environment: HashMap<String, String>,
2045 config: DevContainerConfig,
2046 local_project_path: &Path,
2047) -> Result<DevContainerUp, DevContainerError> {
2048 let docker = if context.use_podman {
2049 Docker::new("podman")
2050 } else {
2051 Docker::new("docker")
2052 };
2053 let mut devcontainer_manifest = DevContainerManifest::new(
2054 context,
2055 environment,
2056 Arc::new(docker),
2057 Arc::new(DefaultCommandRunner::new()),
2058 config,
2059 local_project_path,
2060 )
2061 .await?;
2062
2063 devcontainer_manifest.parse_nonremote_vars()?;
2064
2065 log::debug!("Checking for existing container");
2066 if let Some(devcontainer) = devcontainer_manifest
2067 .check_for_existing_devcontainer()
2068 .await?
2069 {
2070 Ok(devcontainer)
2071 } else {
2072 log::debug!("Existing container not found. Building");
2073
2074 devcontainer_manifest.build_and_run().await
2075 }
2076}
2077
2078#[derive(Debug)]
2079struct DockerBuildResources {
2080 image: DockerInspect,
2081 additional_mounts: Vec<MountDefinition>,
2082 privileged: bool,
2083 entrypoint_script: String,
2084}
2085
2086#[derive(Debug)]
2087enum DevContainerBuildResources {
2088 DockerCompose(DockerComposeResources),
2089 Docker(DockerBuildResources),
2090}
2091
2092fn find_primary_service(
2093 docker_compose: &DockerComposeResources,
2094 devcontainer: &DevContainerManifest,
2095) -> Result<(String, DockerComposeService), DevContainerError> {
2096 let Some(service_name) = &devcontainer.dev_container().service else {
2097 return Err(DevContainerError::DevContainerParseFailed);
2098 };
2099
2100 match docker_compose.config.services.get(service_name) {
2101 Some(service) => Ok((service_name.clone(), service.clone())),
2102 None => Err(DevContainerError::DevContainerParseFailed),
2103 }
2104}
2105
2106/// Destination folder inside the container where feature content is staged during build.
2107/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2108const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2109
2110/// Escapes regex special characters in a string.
2111fn escape_regex_chars(input: &str) -> String {
2112 let mut result = String::with_capacity(input.len() * 2);
2113 for c in input.chars() {
2114 if ".*+?^${}()|[]\\".contains(c) {
2115 result.push('\\');
2116 }
2117 result.push(c);
2118 }
2119 result
2120}
2121
2122/// Extracts the short feature ID from a full feature reference string.
2123///
2124/// Examples:
2125/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2126/// - `ghcr.io/user/repo/go` → `go`
2127/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2128/// - `./myFeature` → `myFeature`
2129fn extract_feature_id(feature_ref: &str) -> &str {
2130 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2131 &feature_ref[..at_idx]
2132 } else {
2133 let last_slash = feature_ref.rfind('/');
2134 let last_colon = feature_ref.rfind(':');
2135 match (last_slash, last_colon) {
2136 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2137 _ => feature_ref,
2138 }
2139 };
2140 match without_version.rfind('/') {
2141 Some(idx) => &without_version[idx + 1..],
2142 None => without_version,
2143 }
2144}
2145
2146/// Generates a shell command that looks up a user's passwd entry.
2147///
2148/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2149/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2150fn get_ent_passwd_shell_command(user: &str) -> String {
2151 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2152 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2153 format!(
2154 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2155 shell = escaped_for_shell,
2156 re = escaped_for_regex,
2157 )
2158}
2159
2160/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2161///
2162/// Features listed in the override come first (in the specified order), followed
2163/// by any remaining features sorted lexicographically by their full reference ID.
2164fn resolve_feature_order<'a>(
2165 features: &'a HashMap<String, FeatureOptions>,
2166 override_order: &Option<Vec<String>>,
2167) -> Vec<(&'a String, &'a FeatureOptions)> {
2168 if let Some(order) = override_order {
2169 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2170 for ordered_id in order {
2171 if let Some((key, options)) = features.get_key_value(ordered_id) {
2172 ordered.push((key, options));
2173 }
2174 }
2175 let mut remaining: Vec<_> = features
2176 .iter()
2177 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2178 .collect();
2179 remaining.sort_by_key(|(id, _)| id.as_str());
2180 ordered.extend(remaining);
2181 ordered
2182 } else {
2183 let mut entries: Vec<_> = features.iter().collect();
2184 entries.sort_by_key(|(id, _)| id.as_str());
2185 entries
2186 }
2187}
2188
2189/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2190///
2191/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2192/// `containerFeaturesConfiguration.ts`.
2193fn generate_install_wrapper(
2194 feature_ref: &str,
2195 feature_id: &str,
2196 env_variables: &str,
2197) -> Result<String, DevContainerError> {
2198 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2199 log::error!("Error escaping feature ref {feature_ref}: {e}");
2200 DevContainerError::DevContainerParseFailed
2201 })?;
2202 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2203 log::error!("Error escaping feature {feature_id}: {e}");
2204 DevContainerError::DevContainerParseFailed
2205 })?;
2206 let options_indented: String = env_variables
2207 .lines()
2208 .filter(|l| !l.is_empty())
2209 .map(|l| format!(" {}", l))
2210 .collect::<Vec<_>>()
2211 .join("\n");
2212 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2213 log::error!("Error escaping options {options_indented}: {e}");
2214 DevContainerError::DevContainerParseFailed
2215 })?;
2216
2217 let script = format!(
2218 r#"#!/bin/sh
2219set -e
2220
2221on_exit () {{
2222 [ $? -eq 0 ] && exit
2223 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2224}}
2225
2226trap on_exit EXIT
2227
2228echo ===========================================================================
2229echo 'Feature : {escaped_name}'
2230echo 'Id : {escaped_id}'
2231echo 'Options :'
2232echo {escaped_options}
2233echo ===========================================================================
2234
2235set -a
2236. ../devcontainer-features.builtin.env
2237. ./devcontainer-features.env
2238set +a
2239
2240chmod +x ./install.sh
2241./install.sh
2242"#
2243 );
2244
2245 Ok(script)
2246}
2247
2248// Dockerfile actions need to be moved to their own file
2249fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2250 dockerfile_content
2251 .lines()
2252 .find(|line| line.starts_with("FROM"))
2253 .and_then(|line| {
2254 let words: Vec<&str> = line.split(" ").collect();
2255 if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2256 return Some(words[words.len() - 1].to_string());
2257 } else {
2258 return None;
2259 }
2260 })
2261}
2262
2263fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2264 if dockerfile_alias(dockerfile_content).is_some() {
2265 dockerfile_content.to_string()
2266 } else {
2267 dockerfile_content
2268 .lines()
2269 .map(|line| {
2270 if line.starts_with("FROM") {
2271 format!("{} AS {}", line, alias)
2272 } else {
2273 line.to_string()
2274 }
2275 })
2276 .collect::<Vec<String>>()
2277 .join("\n")
2278 }
2279}
2280
2281fn image_from_dockerfile(
2282 devcontainer: &DevContainerManifest,
2283 dockerfile_contents: String,
2284) -> Result<String, DevContainerError> {
2285 let mut raw_contents = dockerfile_contents
2286 .lines()
2287 .find(|line| line.starts_with("FROM"))
2288 .and_then(|from_line| {
2289 from_line
2290 .split(' ')
2291 .collect::<Vec<&str>>()
2292 .get(1)
2293 .map(|s| s.to_string())
2294 })
2295 .ok_or_else(|| {
2296 log::error!("Could not find an image definition in dockerfile");
2297 DevContainerError::DevContainerParseFailed
2298 })?;
2299
2300 for (k, v) in devcontainer
2301 .dev_container()
2302 .build
2303 .as_ref()
2304 .and_then(|b| b.args.as_ref())
2305 .unwrap_or(&HashMap::new())
2306 {
2307 raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2308 }
2309 Ok(raw_contents)
2310}
2311
2312// Container user things
2313// This should come from spec - see the docs
2314fn get_remote_user_from_config(
2315 docker_config: &DockerInspect,
2316 devcontainer: &DevContainerManifest,
2317) -> Result<String, DevContainerError> {
2318 if let DevContainer {
2319 remote_user: Some(user),
2320 ..
2321 } = &devcontainer.dev_container()
2322 {
2323 return Ok(user.clone());
2324 }
2325 if let Some(metadata) = &docker_config.config.labels.metadata {
2326 for metadatum in metadata {
2327 if let Some(remote_user) = metadatum.get("remoteUser") {
2328 if let Some(remote_user_str) = remote_user.as_str() {
2329 return Ok(remote_user_str.to_string());
2330 }
2331 }
2332 }
2333 }
2334 if let Some(image_user) = &docker_config.config.image_user {
2335 if !image_user.is_empty() {
2336 return Ok(image_user.to_string());
2337 }
2338 }
2339 Ok("root".to_string())
2340}
2341
2342// This should come from spec - see the docs
2343fn get_container_user_from_config(
2344 docker_config: &DockerInspect,
2345 devcontainer: &DevContainerManifest,
2346) -> Result<String, DevContainerError> {
2347 if let Some(user) = &devcontainer.dev_container().container_user {
2348 return Ok(user.to_string());
2349 }
2350 if let Some(metadata) = &docker_config.config.labels.metadata {
2351 for metadatum in metadata {
2352 if let Some(container_user) = metadatum.get("containerUser") {
2353 if let Some(container_user_str) = container_user.as_str() {
2354 return Ok(container_user_str.to_string());
2355 }
2356 }
2357 }
2358 }
2359 if let Some(image_user) = &docker_config.config.image_user {
2360 return Ok(image_user.to_string());
2361 }
2362
2363 Ok("root".to_string())
2364}
2365
2366#[cfg(test)]
2367mod test {
2368 use std::{
2369 collections::HashMap,
2370 ffi::OsStr,
2371 path::PathBuf,
2372 process::{ExitStatus, Output},
2373 sync::{Arc, Mutex},
2374 };
2375
2376 use async_trait::async_trait;
2377 use fs::{FakeFs, Fs};
2378 use gpui::{AppContext, TestAppContext};
2379 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2380 use project::{
2381 ProjectEnvironment,
2382 worktree_store::{WorktreeIdCounter, WorktreeStore},
2383 };
2384 use serde_json_lenient::Value;
2385 use util::{command::Command, paths::SanitizedPath};
2386
2387 use crate::{
2388 DevContainerConfig, DevContainerContext,
2389 command_json::CommandRunner,
2390 devcontainer_api::DevContainerError,
2391 devcontainer_json::MountDefinition,
2392 devcontainer_manifest::{
2393 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2394 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2395 },
2396 docker::{
2397 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2398 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2399 DockerPs,
2400 },
2401 oci::TokenResponse,
2402 };
2403 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2404
2405 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2406 let buffer = futures::io::Cursor::new(Vec::new());
2407 let mut builder = async_tar::Builder::new(buffer);
2408 for (file_name, content) in content {
2409 if content.is_empty() {
2410 let mut header = async_tar::Header::new_gnu();
2411 header.set_size(0);
2412 header.set_mode(0o755);
2413 header.set_entry_type(async_tar::EntryType::Directory);
2414 header.set_cksum();
2415 builder
2416 .append_data(&mut header, file_name, &[] as &[u8])
2417 .await
2418 .unwrap();
2419 } else {
2420 let data = content.as_bytes();
2421 let mut header = async_tar::Header::new_gnu();
2422 header.set_size(data.len() as u64);
2423 header.set_mode(0o755);
2424 header.set_entry_type(async_tar::EntryType::Regular);
2425 header.set_cksum();
2426 builder
2427 .append_data(&mut header, file_name, data)
2428 .await
2429 .unwrap();
2430 }
2431 }
2432 let buffer = builder.into_inner().await.unwrap();
2433 buffer.into_inner()
2434 }
2435
2436 fn test_project_filename() -> String {
2437 PathBuf::from(TEST_PROJECT_PATH)
2438 .file_name()
2439 .expect("is valid")
2440 .display()
2441 .to_string()
2442 }
2443
2444 async fn init_devcontainer_config(
2445 fs: &Arc<FakeFs>,
2446 devcontainer_contents: &str,
2447 ) -> DevContainerConfig {
2448 fs.insert_tree(
2449 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2450 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2451 )
2452 .await;
2453
2454 DevContainerConfig::default_config()
2455 }
2456
2457 struct TestDependencies {
2458 fs: Arc<FakeFs>,
2459 _http_client: Arc<dyn HttpClient>,
2460 docker: Arc<FakeDocker>,
2461 command_runner: Arc<TestCommandRunner>,
2462 }
2463
2464 async fn init_default_devcontainer_manifest(
2465 cx: &mut TestAppContext,
2466 devcontainer_contents: &str,
2467 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2468 let fs = FakeFs::new(cx.executor());
2469 let http_client = fake_http_client();
2470 let command_runner = Arc::new(TestCommandRunner::new());
2471 let docker = Arc::new(FakeDocker::new());
2472 let environment = HashMap::new();
2473
2474 init_devcontainer_manifest(
2475 cx,
2476 fs,
2477 http_client,
2478 docker,
2479 command_runner,
2480 environment,
2481 devcontainer_contents,
2482 )
2483 .await
2484 }
2485
2486 async fn init_devcontainer_manifest(
2487 cx: &mut TestAppContext,
2488 fs: Arc<FakeFs>,
2489 http_client: Arc<dyn HttpClient>,
2490 docker_client: Arc<FakeDocker>,
2491 command_runner: Arc<TestCommandRunner>,
2492 environment: HashMap<String, String>,
2493 devcontainer_contents: &str,
2494 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2495 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2496 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2497 let worktree_store =
2498 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2499 let project_environment =
2500 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2501
2502 let context = DevContainerContext {
2503 project_directory: SanitizedPath::cast_arc(project_path),
2504 use_podman: false,
2505 fs: fs.clone(),
2506 http_client: http_client.clone(),
2507 environment: project_environment.downgrade(),
2508 };
2509
2510 let test_dependencies = TestDependencies {
2511 fs: fs.clone(),
2512 _http_client: http_client.clone(),
2513 docker: docker_client.clone(),
2514 command_runner: command_runner.clone(),
2515 };
2516 let manifest = DevContainerManifest::new(
2517 &context,
2518 environment,
2519 docker_client,
2520 command_runner,
2521 local_config,
2522 &PathBuf::from(TEST_PROJECT_PATH),
2523 )
2524 .await?;
2525
2526 Ok((test_dependencies, manifest))
2527 }
2528
2529 #[gpui::test]
2530 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2531 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2532 cx,
2533 r#"
2534// These are some external comments. serde_lenient should handle them
2535{
2536 // These are some internal comments
2537 "image": "image",
2538 "remoteUser": "root",
2539}
2540 "#,
2541 )
2542 .await
2543 .unwrap();
2544
2545 let mut metadata = HashMap::new();
2546 metadata.insert(
2547 "remoteUser".to_string(),
2548 serde_json_lenient::Value::String("vsCode".to_string()),
2549 );
2550 let given_docker_config = DockerInspect {
2551 id: "docker_id".to_string(),
2552 config: DockerInspectConfig {
2553 labels: DockerConfigLabels {
2554 metadata: Some(vec![metadata]),
2555 },
2556 image_user: None,
2557 env: Vec::new(),
2558 },
2559 mounts: None,
2560 state: None,
2561 };
2562
2563 let remote_user =
2564 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2565
2566 assert_eq!(remote_user, "root".to_string())
2567 }
2568
2569 #[gpui::test]
2570 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2571 let (_, devcontainer_manifest) =
2572 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2573 let mut metadata = HashMap::new();
2574 metadata.insert(
2575 "remoteUser".to_string(),
2576 serde_json_lenient::Value::String("vsCode".to_string()),
2577 );
2578 let given_docker_config = DockerInspect {
2579 id: "docker_id".to_string(),
2580 config: DockerInspectConfig {
2581 labels: DockerConfigLabels {
2582 metadata: Some(vec![metadata]),
2583 },
2584 image_user: None,
2585 env: Vec::new(),
2586 },
2587 mounts: None,
2588 state: None,
2589 };
2590
2591 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2592
2593 assert!(remote_user.is_ok());
2594 let remote_user = remote_user.expect("ok");
2595 assert_eq!(&remote_user, "vsCode")
2596 }
2597
2598 #[test]
2599 fn should_extract_feature_id_from_references() {
2600 assert_eq!(
2601 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2602 "aws-cli"
2603 );
2604 assert_eq!(
2605 extract_feature_id("ghcr.io/devcontainers/features/go"),
2606 "go"
2607 );
2608 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2609 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2610 assert_eq!(
2611 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2612 "rust"
2613 );
2614 }
2615
2616 #[gpui::test]
2617 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2618 let mut metadata = HashMap::new();
2619 metadata.insert(
2620 "remoteUser".to_string(),
2621 serde_json_lenient::Value::String("vsCode".to_string()),
2622 );
2623
2624 let (_, devcontainer_manifest) =
2625 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2626 let build_resources = DockerBuildResources {
2627 image: DockerInspect {
2628 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2629 config: DockerInspectConfig {
2630 labels: DockerConfigLabels { metadata: None },
2631 image_user: None,
2632 env: Vec::new(),
2633 },
2634 mounts: None,
2635 state: None,
2636 },
2637 additional_mounts: vec![],
2638 privileged: false,
2639 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2640 };
2641 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2642
2643 assert!(docker_run_command.is_ok());
2644 let docker_run_command = docker_run_command.expect("ok");
2645
2646 assert_eq!(docker_run_command.get_program(), "docker");
2647 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2648 .join(".devcontainer")
2649 .join("devcontainer.json");
2650 let expected_config_file_label = expected_config_file_label.display();
2651 assert_eq!(
2652 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2653 vec![
2654 OsStr::new("run"),
2655 OsStr::new("--sig-proxy=false"),
2656 OsStr::new("-d"),
2657 OsStr::new("--mount"),
2658 OsStr::new(
2659 "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2660 ),
2661 OsStr::new("-l"),
2662 OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2663 OsStr::new("-l"),
2664 OsStr::new(&format!(
2665 "devcontainer.config_file={expected_config_file_label}"
2666 )),
2667 OsStr::new("--entrypoint"),
2668 OsStr::new("/bin/sh"),
2669 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2670 OsStr::new("-c"),
2671 OsStr::new(
2672 "
2673 echo Container started
2674 trap \"exit 0\" 15
2675 exec \"$@\"
2676 while sleep 1 & wait $!; do :; done
2677 "
2678 .trim()
2679 ),
2680 OsStr::new("-"),
2681 ]
2682 )
2683 }
2684
2685 #[gpui::test]
2686 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2687 // State where service not defined in dev container
2688 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2689 let given_docker_compose_config = DockerComposeResources {
2690 config: DockerComposeConfig {
2691 name: Some("devcontainers".to_string()),
2692 services: HashMap::new(),
2693 ..Default::default()
2694 },
2695 ..Default::default()
2696 };
2697
2698 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2699
2700 assert!(bad_result.is_err());
2701
2702 // State where service defined in devcontainer, not found in DockerCompose config
2703 let (_, given_dev_container) =
2704 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2705 .await
2706 .unwrap();
2707 let given_docker_compose_config = DockerComposeResources {
2708 config: DockerComposeConfig {
2709 name: Some("devcontainers".to_string()),
2710 services: HashMap::new(),
2711 ..Default::default()
2712 },
2713 ..Default::default()
2714 };
2715
2716 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2717
2718 assert!(bad_result.is_err());
2719 // State where service defined in devcontainer and in DockerCompose config
2720
2721 let (_, given_dev_container) =
2722 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2723 .await
2724 .unwrap();
2725 let given_docker_compose_config = DockerComposeResources {
2726 config: DockerComposeConfig {
2727 name: Some("devcontainers".to_string()),
2728 services: HashMap::from([(
2729 "found_service".to_string(),
2730 DockerComposeService {
2731 ..Default::default()
2732 },
2733 )]),
2734 ..Default::default()
2735 },
2736 ..Default::default()
2737 };
2738
2739 let (service_name, _) =
2740 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2741
2742 assert_eq!(service_name, "found_service".to_string());
2743 }
2744
2745 #[gpui::test]
2746 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2747 let fs = FakeFs::new(cx.executor());
2748 let given_devcontainer_contents = r#"
2749// These are some external comments. serde_lenient should handle them
2750{
2751 // These are some internal comments
2752 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2753 "name": "myDevContainer-${devcontainerId}",
2754 "remoteUser": "root",
2755 "remoteEnv": {
2756 "DEVCONTAINER_ID": "${devcontainerId}",
2757 "MYVAR2": "myvarothervalue",
2758 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2759 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2760 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2761 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2762 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2763 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2764
2765 }
2766}
2767 "#;
2768 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2769 cx,
2770 fs,
2771 fake_http_client(),
2772 Arc::new(FakeDocker::new()),
2773 Arc::new(TestCommandRunner::new()),
2774 HashMap::from([
2775 ("local_env_1".to_string(), "local_env_value1".to_string()),
2776 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2777 ]),
2778 given_devcontainer_contents,
2779 )
2780 .await
2781 .unwrap();
2782
2783 devcontainer_manifest.parse_nonremote_vars().unwrap();
2784
2785 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2786 &devcontainer_manifest.config
2787 else {
2788 panic!("Config not parsed");
2789 };
2790
2791 // ${devcontainerId}
2792 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2793 assert_eq!(
2794 variable_replaced_devcontainer.name,
2795 Some(format!("myDevContainer-{devcontainer_id}"))
2796 );
2797 assert_eq!(
2798 variable_replaced_devcontainer
2799 .remote_env
2800 .as_ref()
2801 .and_then(|env| env.get("DEVCONTAINER_ID")),
2802 Some(&devcontainer_id)
2803 );
2804
2805 // ${containerWorkspaceFolderBasename}
2806 assert_eq!(
2807 variable_replaced_devcontainer
2808 .remote_env
2809 .as_ref()
2810 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2811 Some(&test_project_filename())
2812 );
2813
2814 // ${localWorkspaceFolderBasename}
2815 assert_eq!(
2816 variable_replaced_devcontainer
2817 .remote_env
2818 .as_ref()
2819 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2820 Some(&test_project_filename())
2821 );
2822
2823 // ${containerWorkspaceFolder}
2824 assert_eq!(
2825 variable_replaced_devcontainer
2826 .remote_env
2827 .as_ref()
2828 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2829 Some(&format!("/workspaces/{}", test_project_filename()))
2830 );
2831
2832 // ${localWorkspaceFolder}
2833 assert_eq!(
2834 variable_replaced_devcontainer
2835 .remote_env
2836 .as_ref()
2837 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2838 Some(&TEST_PROJECT_PATH.to_string())
2839 );
2840
2841 // ${localEnv:VARIABLE_NAME}
2842 assert_eq!(
2843 variable_replaced_devcontainer
2844 .remote_env
2845 .as_ref()
2846 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2847 Some(&"local_env_value1".to_string())
2848 );
2849 assert_eq!(
2850 variable_replaced_devcontainer
2851 .remote_env
2852 .as_ref()
2853 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2854 Some(&"THISVALUEHERE".to_string())
2855 );
2856 }
2857
2858 #[gpui::test]
2859 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2860 let given_devcontainer_contents = r#"
2861 // These are some external comments. serde_lenient should handle them
2862 {
2863 // These are some internal comments
2864 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2865 "name": "myDevContainer-${devcontainerId}",
2866 "remoteUser": "root",
2867 "remoteEnv": {
2868 "DEVCONTAINER_ID": "${devcontainerId}",
2869 "MYVAR2": "myvarothervalue",
2870 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2871 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2872 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2873 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2874
2875 },
2876 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2877 "workspaceFolder": "/workspace/customfolder"
2878 }
2879 "#;
2880
2881 let (_, mut devcontainer_manifest) =
2882 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2883 .await
2884 .unwrap();
2885
2886 devcontainer_manifest.parse_nonremote_vars().unwrap();
2887
2888 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2889 &devcontainer_manifest.config
2890 else {
2891 panic!("Config not parsed");
2892 };
2893
2894 // ${devcontainerId}
2895 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2896 assert_eq!(
2897 variable_replaced_devcontainer.name,
2898 Some(format!("myDevContainer-{devcontainer_id}"))
2899 );
2900 assert_eq!(
2901 variable_replaced_devcontainer
2902 .remote_env
2903 .as_ref()
2904 .and_then(|env| env.get("DEVCONTAINER_ID")),
2905 Some(&devcontainer_id)
2906 );
2907
2908 // ${containerWorkspaceFolderBasename}
2909 assert_eq!(
2910 variable_replaced_devcontainer
2911 .remote_env
2912 .as_ref()
2913 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2914 Some(&"customfolder".to_string())
2915 );
2916
2917 // ${localWorkspaceFolderBasename}
2918 assert_eq!(
2919 variable_replaced_devcontainer
2920 .remote_env
2921 .as_ref()
2922 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2923 Some(&"project".to_string())
2924 );
2925
2926 // ${containerWorkspaceFolder}
2927 assert_eq!(
2928 variable_replaced_devcontainer
2929 .remote_env
2930 .as_ref()
2931 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2932 Some(&"/workspace/customfolder".to_string())
2933 );
2934
2935 // ${localWorkspaceFolder}
2936 assert_eq!(
2937 variable_replaced_devcontainer
2938 .remote_env
2939 .as_ref()
2940 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2941 Some(&TEST_PROJECT_PATH.to_string())
2942 );
2943 }
2944
2945 // updateRemoteUserUID is treated as false in Windows, so this test will fail
2946 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2947 #[cfg(not(target_os = "windows"))]
2948 #[gpui::test]
2949 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2950 cx.executor().allow_parking();
2951 env_logger::try_init().ok();
2952 let given_devcontainer_contents = r#"
2953 /*---------------------------------------------------------------------------------------------
2954 * Copyright (c) Microsoft Corporation. All rights reserved.
2955 * Licensed under the MIT License. See License.txt in the project root for license information.
2956 *--------------------------------------------------------------------------------------------*/
2957 {
2958 "name": "cli-${devcontainerId}",
2959 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2960 "build": {
2961 "dockerfile": "Dockerfile",
2962 "args": {
2963 "VARIANT": "18-bookworm",
2964 "FOO": "bar",
2965 },
2966 },
2967 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2968 "workspaceFolder": "/workspace2",
2969 "mounts": [
2970 // Keep command history across instances
2971 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2972 ],
2973
2974 "forwardPorts": [
2975 8082,
2976 8083,
2977 ],
2978 "appPort": "8084",
2979
2980 "containerEnv": {
2981 "VARIABLE_VALUE": "value",
2982 },
2983
2984 "initializeCommand": "touch IAM.md",
2985
2986 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
2987
2988 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
2989
2990 "postCreateCommand": {
2991 "yarn": "yarn install",
2992 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2993 },
2994
2995 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2996
2997 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
2998
2999 "remoteUser": "node",
3000
3001 "remoteEnv": {
3002 "PATH": "${containerEnv:PATH}:/some/other/path",
3003 "OTHER_ENV": "other_env_value"
3004 },
3005
3006 "features": {
3007 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3008 "moby": false,
3009 },
3010 "ghcr.io/devcontainers/features/go:1": {},
3011 },
3012
3013 "customizations": {
3014 "vscode": {
3015 "extensions": [
3016 "dbaeumer.vscode-eslint",
3017 "GitHub.vscode-pull-request-github",
3018 ],
3019 },
3020 "zed": {
3021 "extensions": ["vue", "ruby"],
3022 },
3023 "codespaces": {
3024 "repositories": {
3025 "devcontainers/features": {
3026 "permissions": {
3027 "contents": "write",
3028 "workflows": "write",
3029 },
3030 },
3031 },
3032 },
3033 },
3034 }
3035 "#;
3036
3037 let (test_dependencies, mut devcontainer_manifest) =
3038 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3039 .await
3040 .unwrap();
3041
3042 test_dependencies
3043 .fs
3044 .atomic_write(
3045 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3046 r#"
3047# Copyright (c) Microsoft Corporation. All rights reserved.
3048# Licensed under the MIT License. See License.txt in the project root for license information.
3049ARG VARIANT="16-bullseye"
3050FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3051
3052RUN mkdir -p /workspaces && chown node:node /workspaces
3053
3054ARG USERNAME=node
3055USER $USERNAME
3056
3057# Save command line history
3058RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3059&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3060&& mkdir -p /home/$USERNAME/commandhistory \
3061&& touch /home/$USERNAME/commandhistory/.bash_history \
3062&& chown -R $USERNAME /home/$USERNAME/commandhistory
3063 "#.trim().to_string(),
3064 )
3065 .await
3066 .unwrap();
3067
3068 devcontainer_manifest.parse_nonremote_vars().unwrap();
3069
3070 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3071
3072 assert_eq!(
3073 devcontainer_up.extension_ids,
3074 vec!["vue".to_string(), "ruby".to_string()]
3075 );
3076
3077 let files = test_dependencies.fs.files();
3078 let feature_dockerfile = files
3079 .iter()
3080 .find(|f| {
3081 f.file_name()
3082 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3083 })
3084 .expect("to be found");
3085 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3086 assert_eq!(
3087 &feature_dockerfile,
3088 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3089
3090# Copyright (c) Microsoft Corporation. All rights reserved.
3091# Licensed under the MIT License. See License.txt in the project root for license information.
3092ARG VARIANT="16-bullseye"
3093FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3094
3095RUN mkdir -p /workspaces && chown node:node /workspaces
3096
3097ARG USERNAME=node
3098USER $USERNAME
3099
3100# Save command line history
3101RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3102&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3103&& mkdir -p /home/$USERNAME/commandhistory \
3104&& touch /home/$USERNAME/commandhistory/.bash_history \
3105&& chown -R $USERNAME /home/$USERNAME/commandhistory
3106
3107FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3108USER root
3109COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3110RUN chmod -R 0755 /tmp/build-features/
3111
3112FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3113
3114USER root
3115
3116RUN mkdir -p /tmp/dev-container-features
3117COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3118
3119RUN \
3120echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3121echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3122
3123
3124RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3125cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3126&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3127&& cd /tmp/dev-container-features/docker-in-docker_0 \
3128&& chmod +x ./devcontainer-features-install.sh \
3129&& ./devcontainer-features-install.sh \
3130&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3131
3132RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3133cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3134&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3135&& cd /tmp/dev-container-features/go_1 \
3136&& chmod +x ./devcontainer-features-install.sh \
3137&& ./devcontainer-features-install.sh \
3138&& rm -rf /tmp/dev-container-features/go_1
3139
3140
3141ARG _DEV_CONTAINERS_IMAGE_USER=root
3142USER $_DEV_CONTAINERS_IMAGE_USER
3143"#
3144 );
3145
3146 let uid_dockerfile = files
3147 .iter()
3148 .find(|f| {
3149 f.file_name()
3150 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3151 })
3152 .expect("to be found");
3153 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3154
3155 assert_eq!(
3156 &uid_dockerfile,
3157 r#"ARG BASE_IMAGE
3158FROM $BASE_IMAGE
3159
3160USER root
3161
3162ARG REMOTE_USER
3163ARG NEW_UID
3164ARG NEW_GID
3165SHELL ["/bin/sh", "-c"]
3166RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3167 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3168 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3169 if [ -z "$OLD_UID" ]; then \
3170 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3171 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3172 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3173 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3174 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3175 else \
3176 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3177 FREE_GID=65532; \
3178 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3179 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3180 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3181 fi; \
3182 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3183 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3184 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3185 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3186 fi; \
3187 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3188 fi;
3189
3190ARG IMAGE_USER
3191USER $IMAGE_USER
3192
3193# Ensure that /etc/profile does not clobber the existing path
3194RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3195
3196ENV DOCKER_BUILDKIT=1
3197
3198ENV GOPATH=/go
3199ENV GOROOT=/usr/local/go
3200ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3201ENV VARIABLE_VALUE=value
3202"#
3203 );
3204
3205 let golang_install_wrapper = files
3206 .iter()
3207 .find(|f| {
3208 f.file_name()
3209 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3210 && f.to_str().is_some_and(|s| s.contains("/go_"))
3211 })
3212 .expect("to be found");
3213 let golang_install_wrapper = test_dependencies
3214 .fs
3215 .load(golang_install_wrapper)
3216 .await
3217 .unwrap();
3218 assert_eq!(
3219 &golang_install_wrapper,
3220 r#"#!/bin/sh
3221set -e
3222
3223on_exit () {
3224 [ $? -eq 0 ] && exit
3225 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3226}
3227
3228trap on_exit EXIT
3229
3230echo ===========================================================================
3231echo 'Feature : go'
3232echo 'Id : ghcr.io/devcontainers/features/go:1'
3233echo 'Options :'
3234echo ' GOLANGCILINTVERSION=latest
3235 VERSION=latest'
3236echo ===========================================================================
3237
3238set -a
3239. ../devcontainer-features.builtin.env
3240. ./devcontainer-features.env
3241set +a
3242
3243chmod +x ./install.sh
3244./install.sh
3245"#
3246 );
3247
3248 let docker_commands = test_dependencies
3249 .command_runner
3250 .commands_by_program("docker");
3251
3252 let docker_run_command = docker_commands
3253 .iter()
3254 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3255 .expect("found");
3256
3257 assert_eq!(
3258 docker_run_command.args,
3259 vec![
3260 "run".to_string(),
3261 "--privileged".to_string(),
3262 "--sig-proxy=false".to_string(),
3263 "-d".to_string(),
3264 "--mount".to_string(),
3265 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3266 "--mount".to_string(),
3267 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3268 "--mount".to_string(),
3269 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3270 "-l".to_string(),
3271 "devcontainer.local_folder=/path/to/local/project".to_string(),
3272 "-l".to_string(),
3273 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3274 "-l".to_string(),
3275 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3276 "-p".to_string(),
3277 "8082:8082".to_string(),
3278 "-p".to_string(),
3279 "8083:8083".to_string(),
3280 "-p".to_string(),
3281 "8084:8084".to_string(),
3282 "--entrypoint".to_string(),
3283 "/bin/sh".to_string(),
3284 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3285 "-c".to_string(),
3286 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3287 "-".to_string()
3288 ]
3289 );
3290
3291 let docker_exec_commands = test_dependencies
3292 .docker
3293 .exec_commands_recorded
3294 .lock()
3295 .unwrap();
3296
3297 assert!(docker_exec_commands.iter().all(|exec| {
3298 exec.env
3299 == HashMap::from([
3300 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3301 (
3302 "PATH".to_string(),
3303 "/initial/path:/some/other/path".to_string(),
3304 ),
3305 ])
3306 }))
3307 }
3308
3309 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3310 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3311 #[cfg(not(target_os = "windows"))]
3312 #[gpui::test]
3313 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3314 use crate::docker::DockerComposeServicePort;
3315
3316 cx.executor().allow_parking();
3317 env_logger::try_init().ok();
3318 let given_devcontainer_contents = r#"
3319 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3320 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3321 {
3322 "features": {
3323 "ghcr.io/devcontainers/features/aws-cli:1": {},
3324 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3325 },
3326 "name": "Rust and PostgreSQL",
3327 "dockerComposeFile": "docker-compose.yml",
3328 "service": "app",
3329 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3330
3331 // Features to add to the dev container. More info: https://containers.dev/features.
3332 // "features": {},
3333
3334 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3335 "forwardPorts": [
3336 8083,
3337 "db:5432",
3338 "db:1234",
3339 ],
3340 "appPort": "8084",
3341
3342 // Use 'postCreateCommand' to run commands after the container is created.
3343 // "postCreateCommand": "rustc --version",
3344
3345 // Configure tool-specific properties.
3346 // "customizations": {},
3347
3348 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3349 // "remoteUser": "root"
3350 }
3351 "#;
3352 let (test_dependencies, mut devcontainer_manifest) =
3353 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3354 .await
3355 .unwrap();
3356
3357 test_dependencies
3358 .fs
3359 .atomic_write(
3360 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3361 r#"
3362version: '3.8'
3363
3364volumes:
3365 postgres-data:
3366
3367services:
3368 app:
3369 build:
3370 context: .
3371 dockerfile: Dockerfile
3372 env_file:
3373 # Ensure that the variables in .env match the same variables in devcontainer.json
3374 - .env
3375
3376 volumes:
3377 - ../..:/workspaces:cached
3378
3379 # Overrides default command so things don't shut down after the process ends.
3380 command: sleep infinity
3381
3382 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3383 network_mode: service:db
3384
3385 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3386 # (Adding the "ports" property to this file will not forward from a Codespace.)
3387
3388 db:
3389 image: postgres:14.1
3390 restart: unless-stopped
3391 volumes:
3392 - postgres-data:/var/lib/postgresql/data
3393 env_file:
3394 # Ensure that the variables in .env match the same variables in devcontainer.json
3395 - .env
3396
3397 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3398 # (Adding the "ports" property to this file will not forward from a Codespace.)
3399 "#.trim().to_string(),
3400 )
3401 .await
3402 .unwrap();
3403
3404 test_dependencies.fs.atomic_write(
3405 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3406 r#"
3407FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3408
3409# Include lld linker to improve build times either by using environment variable
3410# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3411RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3412 && apt-get -y install clang lld \
3413 && apt-get autoremove -y && apt-get clean -y
3414 "#.trim().to_string()).await.unwrap();
3415
3416 devcontainer_manifest.parse_nonremote_vars().unwrap();
3417
3418 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3419
3420 let files = test_dependencies.fs.files();
3421 let feature_dockerfile = files
3422 .iter()
3423 .find(|f| {
3424 f.file_name()
3425 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3426 })
3427 .expect("to be found");
3428 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3429 assert_eq!(
3430 &feature_dockerfile,
3431 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3432
3433FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3434
3435# Include lld linker to improve build times either by using environment variable
3436# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3437RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3438 && apt-get -y install clang lld \
3439 && apt-get autoremove -y && apt-get clean -y
3440
3441FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3442USER root
3443COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3444RUN chmod -R 0755 /tmp/build-features/
3445
3446FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3447
3448USER root
3449
3450RUN mkdir -p /tmp/dev-container-features
3451COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3452
3453RUN \
3454echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3455echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3456
3457
3458RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3459cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3460&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3461&& cd /tmp/dev-container-features/aws-cli_0 \
3462&& chmod +x ./devcontainer-features-install.sh \
3463&& ./devcontainer-features-install.sh \
3464&& rm -rf /tmp/dev-container-features/aws-cli_0
3465
3466RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3467cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3468&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3469&& cd /tmp/dev-container-features/docker-in-docker_1 \
3470&& chmod +x ./devcontainer-features-install.sh \
3471&& ./devcontainer-features-install.sh \
3472&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3473
3474
3475ARG _DEV_CONTAINERS_IMAGE_USER=root
3476USER $_DEV_CONTAINERS_IMAGE_USER
3477"#
3478 );
3479
3480 let uid_dockerfile = files
3481 .iter()
3482 .find(|f| {
3483 f.file_name()
3484 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3485 })
3486 .expect("to be found");
3487 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3488
3489 assert_eq!(
3490 &uid_dockerfile,
3491 r#"ARG BASE_IMAGE
3492FROM $BASE_IMAGE
3493
3494USER root
3495
3496ARG REMOTE_USER
3497ARG NEW_UID
3498ARG NEW_GID
3499SHELL ["/bin/sh", "-c"]
3500RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3501 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3502 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3503 if [ -z "$OLD_UID" ]; then \
3504 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3505 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3506 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3507 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3508 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3509 else \
3510 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3511 FREE_GID=65532; \
3512 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3513 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3514 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3515 fi; \
3516 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3517 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3518 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3519 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3520 fi; \
3521 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3522 fi;
3523
3524ARG IMAGE_USER
3525USER $IMAGE_USER
3526
3527# Ensure that /etc/profile does not clobber the existing path
3528RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3529
3530
3531ENV DOCKER_BUILDKIT=1
3532"#
3533 );
3534
3535 let runtime_override = files
3536 .iter()
3537 .find(|f| {
3538 f.file_name()
3539 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3540 })
3541 .expect("to be found");
3542 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3543
3544 let expected_runtime_override = DockerComposeConfig {
3545 name: None,
3546 services: HashMap::from([
3547 (
3548 "app".to_string(),
3549 DockerComposeService {
3550 entrypoint: Some(vec![
3551 "/bin/sh".to_string(),
3552 "-c".to_string(),
3553 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3554 "-".to_string(),
3555 ]),
3556 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3557 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3558 privileged: Some(true),
3559 labels: Some(HashMap::from([
3560 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3561 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3562 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3563 ])),
3564 volumes: vec![
3565 MountDefinition {
3566 source: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3567 target: "/var/lib/docker".to_string(),
3568 mount_type: Some("volume".to_string())
3569 }
3570 ],
3571 ..Default::default()
3572 },
3573 ),
3574 (
3575 "db".to_string(),
3576 DockerComposeService {
3577 ports: vec![
3578 DockerComposeServicePort {
3579 target: "8083".to_string(),
3580 published: "8083".to_string(),
3581 ..Default::default()
3582 },
3583 DockerComposeServicePort {
3584 target: "5432".to_string(),
3585 published: "5432".to_string(),
3586 ..Default::default()
3587 },
3588 DockerComposeServicePort {
3589 target: "1234".to_string(),
3590 published: "1234".to_string(),
3591 ..Default::default()
3592 },
3593 DockerComposeServicePort {
3594 target: "8084".to_string(),
3595 published: "8084".to_string(),
3596 ..Default::default()
3597 },
3598 ],
3599 ..Default::default()
3600 },
3601 ),
3602 ]),
3603 volumes: HashMap::from([(
3604 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3605 DockerComposeVolume {
3606 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3607 },
3608 )]),
3609 };
3610
3611 assert_eq!(
3612 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3613 expected_runtime_override
3614 )
3615 }
3616
3617 #[gpui::test]
3618 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3619 cx: &mut TestAppContext,
3620 ) {
3621 cx.executor().allow_parking();
3622 env_logger::try_init().ok();
3623 let given_devcontainer_contents = r#"
3624 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3625 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3626 {
3627 "features": {
3628 "ghcr.io/devcontainers/features/aws-cli:1": {},
3629 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3630 },
3631 "name": "Rust and PostgreSQL",
3632 "dockerComposeFile": "docker-compose.yml",
3633 "service": "app",
3634 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3635
3636 // Features to add to the dev container. More info: https://containers.dev/features.
3637 // "features": {},
3638
3639 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3640 "forwardPorts": [
3641 8083,
3642 "db:5432",
3643 "db:1234",
3644 ],
3645 "updateRemoteUserUID": false,
3646 "appPort": "8084",
3647
3648 // Use 'postCreateCommand' to run commands after the container is created.
3649 // "postCreateCommand": "rustc --version",
3650
3651 // Configure tool-specific properties.
3652 // "customizations": {},
3653
3654 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3655 // "remoteUser": "root"
3656 }
3657 "#;
3658 let (test_dependencies, mut devcontainer_manifest) =
3659 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3660 .await
3661 .unwrap();
3662
3663 test_dependencies
3664 .fs
3665 .atomic_write(
3666 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3667 r#"
3668version: '3.8'
3669
3670volumes:
3671postgres-data:
3672
3673services:
3674app:
3675 build:
3676 context: .
3677 dockerfile: Dockerfile
3678 env_file:
3679 # Ensure that the variables in .env match the same variables in devcontainer.json
3680 - .env
3681
3682 volumes:
3683 - ../..:/workspaces:cached
3684
3685 # Overrides default command so things don't shut down after the process ends.
3686 command: sleep infinity
3687
3688 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3689 network_mode: service:db
3690
3691 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3692 # (Adding the "ports" property to this file will not forward from a Codespace.)
3693
3694db:
3695 image: postgres:14.1
3696 restart: unless-stopped
3697 volumes:
3698 - postgres-data:/var/lib/postgresql/data
3699 env_file:
3700 # Ensure that the variables in .env match the same variables in devcontainer.json
3701 - .env
3702
3703 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3704 # (Adding the "ports" property to this file will not forward from a Codespace.)
3705 "#.trim().to_string(),
3706 )
3707 .await
3708 .unwrap();
3709
3710 test_dependencies.fs.atomic_write(
3711 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3712 r#"
3713FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3714
3715# Include lld linker to improve build times either by using environment variable
3716# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3717RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3718&& apt-get -y install clang lld \
3719&& apt-get autoremove -y && apt-get clean -y
3720 "#.trim().to_string()).await.unwrap();
3721
3722 devcontainer_manifest.parse_nonremote_vars().unwrap();
3723
3724 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3725
3726 let files = test_dependencies.fs.files();
3727 let feature_dockerfile = files
3728 .iter()
3729 .find(|f| {
3730 f.file_name()
3731 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3732 })
3733 .expect("to be found");
3734 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3735 assert_eq!(
3736 &feature_dockerfile,
3737 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3738
3739FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3740
3741# Include lld linker to improve build times either by using environment variable
3742# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3743RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3744&& apt-get -y install clang lld \
3745&& apt-get autoremove -y && apt-get clean -y
3746
3747FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3748USER root
3749COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3750RUN chmod -R 0755 /tmp/build-features/
3751
3752FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3753
3754USER root
3755
3756RUN mkdir -p /tmp/dev-container-features
3757COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3758
3759RUN \
3760echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3761echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3762
3763
3764RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3765cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3766&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3767&& cd /tmp/dev-container-features/aws-cli_0 \
3768&& chmod +x ./devcontainer-features-install.sh \
3769&& ./devcontainer-features-install.sh \
3770&& rm -rf /tmp/dev-container-features/aws-cli_0
3771
3772RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3773cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3774&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3775&& cd /tmp/dev-container-features/docker-in-docker_1 \
3776&& chmod +x ./devcontainer-features-install.sh \
3777&& ./devcontainer-features-install.sh \
3778&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3779
3780
3781ARG _DEV_CONTAINERS_IMAGE_USER=root
3782USER $_DEV_CONTAINERS_IMAGE_USER
3783
3784# Ensure that /etc/profile does not clobber the existing path
3785RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3786
3787
3788ENV DOCKER_BUILDKIT=1
3789"#
3790 );
3791 }
3792
3793 #[cfg(not(target_os = "windows"))]
3794 #[gpui::test]
3795 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3796 cx.executor().allow_parking();
3797 env_logger::try_init().ok();
3798 let given_devcontainer_contents = r#"
3799 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3800 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3801 {
3802 "features": {
3803 "ghcr.io/devcontainers/features/aws-cli:1": {},
3804 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3805 },
3806 "name": "Rust and PostgreSQL",
3807 "dockerComposeFile": "docker-compose.yml",
3808 "service": "app",
3809 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3810
3811 // Features to add to the dev container. More info: https://containers.dev/features.
3812 // "features": {},
3813
3814 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3815 // "forwardPorts": [5432],
3816
3817 // Use 'postCreateCommand' to run commands after the container is created.
3818 // "postCreateCommand": "rustc --version",
3819
3820 // Configure tool-specific properties.
3821 // "customizations": {},
3822
3823 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3824 // "remoteUser": "root"
3825 }
3826 "#;
3827 let mut fake_docker = FakeDocker::new();
3828 fake_docker.set_podman(true);
3829 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3830 cx,
3831 FakeFs::new(cx.executor()),
3832 fake_http_client(),
3833 Arc::new(fake_docker),
3834 Arc::new(TestCommandRunner::new()),
3835 HashMap::new(),
3836 given_devcontainer_contents,
3837 )
3838 .await
3839 .unwrap();
3840
3841 test_dependencies
3842 .fs
3843 .atomic_write(
3844 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3845 r#"
3846version: '3.8'
3847
3848volumes:
3849postgres-data:
3850
3851services:
3852app:
3853build:
3854 context: .
3855 dockerfile: Dockerfile
3856env_file:
3857 # Ensure that the variables in .env match the same variables in devcontainer.json
3858 - .env
3859
3860volumes:
3861 - ../..:/workspaces:cached
3862
3863# Overrides default command so things don't shut down after the process ends.
3864command: sleep infinity
3865
3866# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3867network_mode: service:db
3868
3869# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3870# (Adding the "ports" property to this file will not forward from a Codespace.)
3871
3872db:
3873image: postgres:14.1
3874restart: unless-stopped
3875volumes:
3876 - postgres-data:/var/lib/postgresql/data
3877env_file:
3878 # Ensure that the variables in .env match the same variables in devcontainer.json
3879 - .env
3880
3881# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3882# (Adding the "ports" property to this file will not forward from a Codespace.)
3883 "#.trim().to_string(),
3884 )
3885 .await
3886 .unwrap();
3887
3888 test_dependencies.fs.atomic_write(
3889 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3890 r#"
3891FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3892
3893# Include lld linker to improve build times either by using environment variable
3894# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3895RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3896&& apt-get -y install clang lld \
3897&& apt-get autoremove -y && apt-get clean -y
3898 "#.trim().to_string()).await.unwrap();
3899
3900 devcontainer_manifest.parse_nonremote_vars().unwrap();
3901
3902 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3903
3904 let files = test_dependencies.fs.files();
3905
3906 let feature_dockerfile = files
3907 .iter()
3908 .find(|f| {
3909 f.file_name()
3910 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3911 })
3912 .expect("to be found");
3913 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3914 assert_eq!(
3915 &feature_dockerfile,
3916 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3917
3918FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3919
3920# Include lld linker to improve build times either by using environment variable
3921# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3922RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3923&& apt-get -y install clang lld \
3924&& apt-get autoremove -y && apt-get clean -y
3925
3926FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3927
3928FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3929USER root
3930COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3931RUN chmod -R 0755 /tmp/build-features/
3932
3933FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3934
3935USER root
3936
3937RUN mkdir -p /tmp/dev-container-features
3938COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3939
3940RUN \
3941echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3942echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3943
3944
3945COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3946RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3947&& cd /tmp/dev-container-features/aws-cli_0 \
3948&& chmod +x ./devcontainer-features-install.sh \
3949&& ./devcontainer-features-install.sh
3950
3951COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3952RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3953&& cd /tmp/dev-container-features/docker-in-docker_1 \
3954&& chmod +x ./devcontainer-features-install.sh \
3955&& ./devcontainer-features-install.sh
3956
3957
3958ARG _DEV_CONTAINERS_IMAGE_USER=root
3959USER $_DEV_CONTAINERS_IMAGE_USER
3960"#
3961 );
3962
3963 let uid_dockerfile = files
3964 .iter()
3965 .find(|f| {
3966 f.file_name()
3967 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3968 })
3969 .expect("to be found");
3970 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3971
3972 assert_eq!(
3973 &uid_dockerfile,
3974 r#"ARG BASE_IMAGE
3975FROM $BASE_IMAGE
3976
3977USER root
3978
3979ARG REMOTE_USER
3980ARG NEW_UID
3981ARG NEW_GID
3982SHELL ["/bin/sh", "-c"]
3983RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3984 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3985 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3986 if [ -z "$OLD_UID" ]; then \
3987 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3988 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3989 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3990 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3991 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3992 else \
3993 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3994 FREE_GID=65532; \
3995 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3996 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3997 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3998 fi; \
3999 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4000 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4001 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4002 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4003 fi; \
4004 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4005 fi;
4006
4007ARG IMAGE_USER
4008USER $IMAGE_USER
4009
4010# Ensure that /etc/profile does not clobber the existing path
4011RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4012
4013
4014ENV DOCKER_BUILDKIT=1
4015"#
4016 );
4017 }
4018
4019 #[gpui::test]
4020 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4021 cx.executor().allow_parking();
4022 env_logger::try_init().ok();
4023 let given_devcontainer_contents = r#"
4024 /*---------------------------------------------------------------------------------------------
4025 * Copyright (c) Microsoft Corporation. All rights reserved.
4026 * Licensed under the MIT License. See License.txt in the project root for license information.
4027 *--------------------------------------------------------------------------------------------*/
4028 {
4029 "name": "cli-${devcontainerId}",
4030 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4031 "build": {
4032 "dockerfile": "Dockerfile",
4033 "args": {
4034 "VARIANT": "18-bookworm",
4035 "FOO": "bar",
4036 },
4037 },
4038 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4039 "workspaceFolder": "/workspace2",
4040 "mounts": [
4041 // Keep command history across instances
4042 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4043 ],
4044
4045 "forwardPorts": [
4046 8082,
4047 8083,
4048 ],
4049 "appPort": "8084",
4050 "updateRemoteUserUID": false,
4051
4052 "containerEnv": {
4053 "VARIABLE_VALUE": "value",
4054 },
4055
4056 "initializeCommand": "touch IAM.md",
4057
4058 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4059
4060 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4061
4062 "postCreateCommand": {
4063 "yarn": "yarn install",
4064 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4065 },
4066
4067 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4068
4069 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4070
4071 "remoteUser": "node",
4072
4073 "remoteEnv": {
4074 "PATH": "${containerEnv:PATH}:/some/other/path",
4075 "OTHER_ENV": "other_env_value"
4076 },
4077
4078 "features": {
4079 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4080 "moby": false,
4081 },
4082 "ghcr.io/devcontainers/features/go:1": {},
4083 },
4084
4085 "customizations": {
4086 "vscode": {
4087 "extensions": [
4088 "dbaeumer.vscode-eslint",
4089 "GitHub.vscode-pull-request-github",
4090 ],
4091 },
4092 "zed": {
4093 "extensions": ["vue", "ruby"],
4094 },
4095 "codespaces": {
4096 "repositories": {
4097 "devcontainers/features": {
4098 "permissions": {
4099 "contents": "write",
4100 "workflows": "write",
4101 },
4102 },
4103 },
4104 },
4105 },
4106 }
4107 "#;
4108
4109 let (test_dependencies, mut devcontainer_manifest) =
4110 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4111 .await
4112 .unwrap();
4113
4114 test_dependencies
4115 .fs
4116 .atomic_write(
4117 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4118 r#"
4119# Copyright (c) Microsoft Corporation. All rights reserved.
4120# Licensed under the MIT License. See License.txt in the project root for license information.
4121ARG VARIANT="16-bullseye"
4122FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4123
4124RUN mkdir -p /workspaces && chown node:node /workspaces
4125
4126ARG USERNAME=node
4127USER $USERNAME
4128
4129# Save command line history
4130RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4131&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4132&& mkdir -p /home/$USERNAME/commandhistory \
4133&& touch /home/$USERNAME/commandhistory/.bash_history \
4134&& chown -R $USERNAME /home/$USERNAME/commandhistory
4135 "#.trim().to_string(),
4136 )
4137 .await
4138 .unwrap();
4139
4140 devcontainer_manifest.parse_nonremote_vars().unwrap();
4141
4142 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4143
4144 assert_eq!(
4145 devcontainer_up.extension_ids,
4146 vec!["vue".to_string(), "ruby".to_string()]
4147 );
4148
4149 let files = test_dependencies.fs.files();
4150 let feature_dockerfile = files
4151 .iter()
4152 .find(|f| {
4153 f.file_name()
4154 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4155 })
4156 .expect("to be found");
4157 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4158 assert_eq!(
4159 &feature_dockerfile,
4160 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4161
4162# Copyright (c) Microsoft Corporation. All rights reserved.
4163# Licensed under the MIT License. See License.txt in the project root for license information.
4164ARG VARIANT="16-bullseye"
4165FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4166
4167RUN mkdir -p /workspaces && chown node:node /workspaces
4168
4169ARG USERNAME=node
4170USER $USERNAME
4171
4172# Save command line history
4173RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4174&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4175&& mkdir -p /home/$USERNAME/commandhistory \
4176&& touch /home/$USERNAME/commandhistory/.bash_history \
4177&& chown -R $USERNAME /home/$USERNAME/commandhistory
4178
4179FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4180USER root
4181COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4182RUN chmod -R 0755 /tmp/build-features/
4183
4184FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4185
4186USER root
4187
4188RUN mkdir -p /tmp/dev-container-features
4189COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4190
4191RUN \
4192echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4193echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4194
4195
4196RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4197cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4198&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4199&& cd /tmp/dev-container-features/docker-in-docker_0 \
4200&& chmod +x ./devcontainer-features-install.sh \
4201&& ./devcontainer-features-install.sh \
4202&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4203
4204RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4205cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4206&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4207&& cd /tmp/dev-container-features/go_1 \
4208&& chmod +x ./devcontainer-features-install.sh \
4209&& ./devcontainer-features-install.sh \
4210&& rm -rf /tmp/dev-container-features/go_1
4211
4212
4213ARG _DEV_CONTAINERS_IMAGE_USER=root
4214USER $_DEV_CONTAINERS_IMAGE_USER
4215
4216# Ensure that /etc/profile does not clobber the existing path
4217RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4218
4219ENV DOCKER_BUILDKIT=1
4220
4221ENV GOPATH=/go
4222ENV GOROOT=/usr/local/go
4223ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4224ENV VARIABLE_VALUE=value
4225"#
4226 );
4227
4228 let golang_install_wrapper = files
4229 .iter()
4230 .find(|f| {
4231 f.file_name()
4232 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4233 && f.to_str().is_some_and(|s| s.contains("go_"))
4234 })
4235 .expect("to be found");
4236 let golang_install_wrapper = test_dependencies
4237 .fs
4238 .load(golang_install_wrapper)
4239 .await
4240 .unwrap();
4241 assert_eq!(
4242 &golang_install_wrapper,
4243 r#"#!/bin/sh
4244set -e
4245
4246on_exit () {
4247 [ $? -eq 0 ] && exit
4248 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4249}
4250
4251trap on_exit EXIT
4252
4253echo ===========================================================================
4254echo 'Feature : go'
4255echo 'Id : ghcr.io/devcontainers/features/go:1'
4256echo 'Options :'
4257echo ' GOLANGCILINTVERSION=latest
4258 VERSION=latest'
4259echo ===========================================================================
4260
4261set -a
4262. ../devcontainer-features.builtin.env
4263. ./devcontainer-features.env
4264set +a
4265
4266chmod +x ./install.sh
4267./install.sh
4268"#
4269 );
4270
4271 let docker_commands = test_dependencies
4272 .command_runner
4273 .commands_by_program("docker");
4274
4275 let docker_run_command = docker_commands
4276 .iter()
4277 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4278
4279 assert!(docker_run_command.is_some());
4280
4281 let docker_exec_commands = test_dependencies
4282 .docker
4283 .exec_commands_recorded
4284 .lock()
4285 .unwrap();
4286
4287 assert!(docker_exec_commands.iter().all(|exec| {
4288 exec.env
4289 == HashMap::from([
4290 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4291 (
4292 "PATH".to_string(),
4293 "/initial/path:/some/other/path".to_string(),
4294 ),
4295 ])
4296 }))
4297 }
4298
4299 pub(crate) struct RecordedExecCommand {
4300 pub(crate) _container_id: String,
4301 pub(crate) _remote_folder: String,
4302 pub(crate) _user: String,
4303 pub(crate) env: HashMap<String, String>,
4304 pub(crate) _inner_command: Command,
4305 }
4306
4307 pub(crate) struct FakeDocker {
4308 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4309 podman: bool,
4310 }
4311
4312 impl FakeDocker {
4313 pub(crate) fn new() -> Self {
4314 Self {
4315 podman: false,
4316 exec_commands_recorded: Mutex::new(Vec::new()),
4317 }
4318 }
4319 #[cfg(not(target_os = "windows"))]
4320 fn set_podman(&mut self, podman: bool) {
4321 self.podman = podman;
4322 }
4323 }
4324
4325 #[async_trait]
4326 impl DockerClient for FakeDocker {
4327 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4328 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4329 return Ok(DockerInspect {
4330 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4331 .to_string(),
4332 config: DockerInspectConfig {
4333 labels: DockerConfigLabels {
4334 metadata: Some(vec![HashMap::from([(
4335 "remoteUser".to_string(),
4336 Value::String("node".to_string()),
4337 )])]),
4338 },
4339 env: Vec::new(),
4340 image_user: Some("root".to_string()),
4341 },
4342 mounts: None,
4343 state: None,
4344 });
4345 }
4346 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4347 return Ok(DockerInspect {
4348 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4349 .to_string(),
4350 config: DockerInspectConfig {
4351 labels: DockerConfigLabels {
4352 metadata: Some(vec![HashMap::from([(
4353 "remoteUser".to_string(),
4354 Value::String("vscode".to_string()),
4355 )])]),
4356 },
4357 image_user: Some("root".to_string()),
4358 env: Vec::new(),
4359 },
4360 mounts: None,
4361 state: None,
4362 });
4363 }
4364 if id.starts_with("cli_") {
4365 return Ok(DockerInspect {
4366 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4367 .to_string(),
4368 config: DockerInspectConfig {
4369 labels: DockerConfigLabels {
4370 metadata: Some(vec![HashMap::from([(
4371 "remoteUser".to_string(),
4372 Value::String("node".to_string()),
4373 )])]),
4374 },
4375 image_user: Some("root".to_string()),
4376 env: vec!["PATH=/initial/path".to_string()],
4377 },
4378 mounts: None,
4379 state: None,
4380 });
4381 }
4382 if id == "found_docker_ps" {
4383 return Ok(DockerInspect {
4384 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4385 .to_string(),
4386 config: DockerInspectConfig {
4387 labels: DockerConfigLabels {
4388 metadata: Some(vec![HashMap::from([(
4389 "remoteUser".to_string(),
4390 Value::String("node".to_string()),
4391 )])]),
4392 },
4393 image_user: Some("root".to_string()),
4394 env: vec!["PATH=/initial/path".to_string()],
4395 },
4396 mounts: Some(vec![DockerInspectMount {
4397 source: "/path/to/local/project".to_string(),
4398 destination: "/workspaces/project".to_string(),
4399 }]),
4400 state: None,
4401 });
4402 }
4403 if id.starts_with("rust_a-") {
4404 return Ok(DockerInspect {
4405 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4406 .to_string(),
4407 config: DockerInspectConfig {
4408 labels: DockerConfigLabels {
4409 metadata: Some(vec![HashMap::from([(
4410 "remoteUser".to_string(),
4411 Value::String("vscode".to_string()),
4412 )])]),
4413 },
4414 image_user: Some("root".to_string()),
4415 env: Vec::new(),
4416 },
4417 mounts: None,
4418 state: None,
4419 });
4420 }
4421
4422 Err(DevContainerError::DockerNotAvailable)
4423 }
4424 async fn get_docker_compose_config(
4425 &self,
4426 config_files: &Vec<PathBuf>,
4427 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4428 if config_files.len() == 1
4429 && config_files.get(0)
4430 == Some(&PathBuf::from(
4431 "/path/to/local/project/.devcontainer/docker-compose.yml",
4432 ))
4433 {
4434 return Ok(Some(DockerComposeConfig {
4435 name: None,
4436 services: HashMap::from([
4437 (
4438 "app".to_string(),
4439 DockerComposeService {
4440 build: Some(DockerComposeServiceBuild {
4441 context: Some(".".to_string()),
4442 dockerfile: Some("Dockerfile".to_string()),
4443 args: None,
4444 additional_contexts: None,
4445 }),
4446 volumes: vec![MountDefinition {
4447 source: "../..".to_string(),
4448 target: "/workspaces".to_string(),
4449 mount_type: Some("bind".to_string()),
4450 }],
4451 network_mode: Some("service:db".to_string()),
4452 ..Default::default()
4453 },
4454 ),
4455 (
4456 "db".to_string(),
4457 DockerComposeService {
4458 image: Some("postgres:14.1".to_string()),
4459 volumes: vec![MountDefinition {
4460 source: "postgres-data".to_string(),
4461 target: "/var/lib/postgresql/data".to_string(),
4462 mount_type: Some("volume".to_string()),
4463 }],
4464 env_file: Some(vec![".env".to_string()]),
4465 ..Default::default()
4466 },
4467 ),
4468 ]),
4469 volumes: HashMap::from([(
4470 "postgres-data".to_string(),
4471 DockerComposeVolume::default(),
4472 )]),
4473 }));
4474 }
4475 Err(DevContainerError::DockerNotAvailable)
4476 }
4477 async fn docker_compose_build(
4478 &self,
4479 _config_files: &Vec<PathBuf>,
4480 _project_name: &str,
4481 ) -> Result<(), DevContainerError> {
4482 Ok(())
4483 }
4484 async fn run_docker_exec(
4485 &self,
4486 container_id: &str,
4487 remote_folder: &str,
4488 user: &str,
4489 env: &HashMap<String, String>,
4490 inner_command: Command,
4491 ) -> Result<(), DevContainerError> {
4492 let mut record = self
4493 .exec_commands_recorded
4494 .lock()
4495 .expect("should be available");
4496 record.push(RecordedExecCommand {
4497 _container_id: container_id.to_string(),
4498 _remote_folder: remote_folder.to_string(),
4499 _user: user.to_string(),
4500 env: env.clone(),
4501 _inner_command: inner_command,
4502 });
4503 Ok(())
4504 }
4505 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4506 Err(DevContainerError::DockerNotAvailable)
4507 }
4508 async fn find_process_by_filters(
4509 &self,
4510 _filters: Vec<String>,
4511 ) -> Result<Option<DockerPs>, DevContainerError> {
4512 Ok(Some(DockerPs {
4513 id: "found_docker_ps".to_string(),
4514 }))
4515 }
4516 fn supports_compose_buildkit(&self) -> bool {
4517 !self.podman
4518 }
4519 fn docker_cli(&self) -> String {
4520 if self.podman {
4521 "podman".to_string()
4522 } else {
4523 "docker".to_string()
4524 }
4525 }
4526 }
4527
4528 #[derive(Debug, Clone)]
4529 pub(crate) struct TestCommand {
4530 pub(crate) program: String,
4531 pub(crate) args: Vec<String>,
4532 }
4533
4534 pub(crate) struct TestCommandRunner {
4535 commands_recorded: Mutex<Vec<TestCommand>>,
4536 }
4537
4538 impl TestCommandRunner {
4539 fn new() -> Self {
4540 Self {
4541 commands_recorded: Mutex::new(Vec::new()),
4542 }
4543 }
4544
4545 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4546 let record = self.commands_recorded.lock().expect("poisoned");
4547 record
4548 .iter()
4549 .filter(|r| r.program == program)
4550 .map(|r| r.clone())
4551 .collect()
4552 }
4553 }
4554
4555 #[async_trait]
4556 impl CommandRunner for TestCommandRunner {
4557 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4558 let mut record = self.commands_recorded.lock().expect("poisoned");
4559
4560 record.push(TestCommand {
4561 program: command.get_program().display().to_string(),
4562 args: command
4563 .get_args()
4564 .map(|a| a.display().to_string())
4565 .collect(),
4566 });
4567
4568 Ok(Output {
4569 status: ExitStatus::default(),
4570 stdout: vec![],
4571 stderr: vec![],
4572 })
4573 }
4574 }
4575
4576 fn fake_http_client() -> Arc<dyn HttpClient> {
4577 FakeHttpClient::create(|request| async move {
4578 let (parts, _body) = request.into_parts();
4579 if parts.uri.path() == "/token" {
4580 let token_response = TokenResponse {
4581 token: "token".to_string(),
4582 };
4583 return Ok(http::Response::builder()
4584 .status(200)
4585 .body(http_client::AsyncBody::from(
4586 serde_json_lenient::to_string(&token_response).unwrap(),
4587 ))
4588 .unwrap());
4589 }
4590
4591 // OCI specific things
4592 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4593 let response = r#"
4594 {
4595 "schemaVersion": 2,
4596 "mediaType": "application/vnd.oci.image.manifest.v1+json",
4597 "config": {
4598 "mediaType": "application/vnd.devcontainers",
4599 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4600 "size": 2
4601 },
4602 "layers": [
4603 {
4604 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4605 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4606 "size": 59392,
4607 "annotations": {
4608 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4609 }
4610 }
4611 ],
4612 "annotations": {
4613 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4614 "com.github.package.type": "devcontainer_feature"
4615 }
4616 }
4617 "#;
4618 return Ok(http::Response::builder()
4619 .status(200)
4620 .body(http_client::AsyncBody::from(response))
4621 .unwrap());
4622 }
4623
4624 if parts.uri.path()
4625 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4626 {
4627 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4632 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4633 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4634 ```
4635 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4636 ```
4637 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4638
4639
4640 ## OS Support
4641
4642 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4643
4644 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4645
4646 `bash` is required to execute the `install.sh` script."#),
4647 ("./README.md", r#"
4648 # Docker (Docker-in-Docker) (docker-in-docker)
4649
4650 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4651
4652 ## Example Usage
4653
4654 ```json
4655 "features": {
4656 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4657 }
4658 ```
4659
4660 ## Options
4661
4662 | Options Id | Description | Type | Default Value |
4663 |-----|-----|-----|-----|
4664 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4665 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4666 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4667 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4668 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4669 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4670 | installDockerBuildx | Install Docker Buildx | boolean | true |
4671 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4672 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4673
4674 ## Customizations
4675
4676 ### VS Code Extensions
4677
4678 - `ms-azuretools.vscode-containers`
4679
4680 ## Limitations
4681
4682 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4683 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4684 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4685 ```
4686 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4687 ```
4688 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4689
4690
4691 ## OS Support
4692
4693 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4694
4695 `bash` is required to execute the `install.sh` script.
4696
4697
4698 ---
4699
4700 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
4701 ("./devcontainer-feature.json", r#"
4702 {
4703 "id": "docker-in-docker",
4704 "version": "2.16.1",
4705 "name": "Docker (Docker-in-Docker)",
4706 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4707 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4708 "options": {
4709 "version": {
4710 "type": "string",
4711 "proposals": [
4712 "latest",
4713 "none",
4714 "20.10"
4715 ],
4716 "default": "latest",
4717 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4718 },
4719 "moby": {
4720 "type": "boolean",
4721 "default": true,
4722 "description": "Install OSS Moby build instead of Docker CE"
4723 },
4724 "mobyBuildxVersion": {
4725 "type": "string",
4726 "default": "latest",
4727 "description": "Install a specific version of moby-buildx when using Moby"
4728 },
4729 "dockerDashComposeVersion": {
4730 "type": "string",
4731 "enum": [
4732 "none",
4733 "v1",
4734 "v2"
4735 ],
4736 "default": "v2",
4737 "description": "Default version of Docker Compose (v1, v2 or none)"
4738 },
4739 "azureDnsAutoDetection": {
4740 "type": "boolean",
4741 "default": true,
4742 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4743 },
4744 "dockerDefaultAddressPool": {
4745 "type": "string",
4746 "default": "",
4747 "proposals": [],
4748 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4749 },
4750 "installDockerBuildx": {
4751 "type": "boolean",
4752 "default": true,
4753 "description": "Install Docker Buildx"
4754 },
4755 "installDockerComposeSwitch": {
4756 "type": "boolean",
4757 "default": false,
4758 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4759 },
4760 "disableIp6tables": {
4761 "type": "boolean",
4762 "default": false,
4763 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4764 }
4765 },
4766 "entrypoint": "/usr/local/share/docker-init.sh",
4767 "privileged": true,
4768 "containerEnv": {
4769 "DOCKER_BUILDKIT": "1"
4770 },
4771 "customizations": {
4772 "vscode": {
4773 "extensions": [
4774 "ms-azuretools.vscode-containers"
4775 ],
4776 "settings": {
4777 "github.copilot.chat.codeGeneration.instructions": [
4778 {
4779 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
4780 }
4781 ]
4782 }
4783 }
4784 },
4785 "mounts": [
4786 {
4787 "source": "dind-var-lib-docker-${devcontainerId}",
4788 "target": "/var/lib/docker",
4789 "type": "volume"
4790 }
4791 ],
4792 "installsAfter": [
4793 "ghcr.io/devcontainers/features/common-utils"
4794 ]
4795 }"#),
4796 ("./install.sh", r#"
4797 #!/usr/bin/env bash
4798 #-------------------------------------------------------------------------------------------------------------
4799 # Copyright (c) Microsoft Corporation. All rights reserved.
4800 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
4801 #-------------------------------------------------------------------------------------------------------------
4802 #
4803 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
4804 # Maintainer: The Dev Container spec maintainers
4805
4806
4807 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
4808 USE_MOBY="${MOBY:-"true"}"
4809 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
4810 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
4811 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
4812 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
4813 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
4814 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
4815 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
4816 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
4817 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
4818 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
4819 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
4820 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
4821
4822 # Default: Exit on any failure.
4823 set -e
4824
4825 # Clean up
4826 rm -rf /var/lib/apt/lists/*
4827
4828 # Setup STDERR.
4829 err() {
4830 echo "(!) $*" >&2
4831 }
4832
4833 if [ "$(id -u)" -ne 0 ]; then
4834 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
4835 exit 1
4836 fi
4837
4838 ###################
4839 # Helper Functions
4840 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
4841 ###################
4842
4843 # Determine the appropriate non-root user
4844 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
4845 USERNAME=""
4846 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
4847 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
4848 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
4849 USERNAME=${CURRENT_USER}
4850 break
4851 fi
4852 done
4853 if [ "${USERNAME}" = "" ]; then
4854 USERNAME=root
4855 fi
4856 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
4857 USERNAME=root
4858 fi
4859
4860 # Package manager update function
4861 pkg_mgr_update() {
4862 case ${ADJUSTED_ID} in
4863 debian)
4864 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
4865 echo "Running apt-get update..."
4866 apt-get update -y
4867 fi
4868 ;;
4869 rhel)
4870 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
4871 cache_check_dir="/var/cache/yum"
4872 else
4873 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
4874 fi
4875 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
4876 echo "Running ${PKG_MGR_CMD} makecache ..."
4877 ${PKG_MGR_CMD} makecache
4878 fi
4879 ;;
4880 esac
4881 }
4882
4883 # Checks if packages are installed and installs them if not
4884 check_packages() {
4885 case ${ADJUSTED_ID} in
4886 debian)
4887 if ! dpkg -s "$@" > /dev/null 2>&1; then
4888 pkg_mgr_update
4889 apt-get -y install --no-install-recommends "$@"
4890 fi
4891 ;;
4892 rhel)
4893 if ! rpm -q "$@" > /dev/null 2>&1; then
4894 pkg_mgr_update
4895 ${PKG_MGR_CMD} -y install "$@"
4896 fi
4897 ;;
4898 esac
4899 }
4900
4901 # Figure out correct version of a three part version number is not passed
4902 find_version_from_git_tags() {
4903 local variable_name=$1
4904 local requested_version=${!variable_name}
4905 if [ "${requested_version}" = "none" ]; then return; fi
4906 local repository=$2
4907 local prefix=${3:-"tags/v"}
4908 local separator=${4:-"."}
4909 local last_part_optional=${5:-"false"}
4910 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
4911 local escaped_separator=${separator//./\\.}
4912 local last_part
4913 if [ "${last_part_optional}" = "true" ]; then
4914 last_part="(${escaped_separator}[0-9]+)?"
4915 else
4916 last_part="${escaped_separator}[0-9]+"
4917 fi
4918 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
4919 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
4920 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
4921 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
4922 else
4923 set +e
4924 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
4925 set -e
4926 fi
4927 fi
4928 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
4929 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
4930 exit 1
4931 fi
4932 echo "${variable_name}=${!variable_name}"
4933 }
4934
4935 # Use semver logic to decrement a version number then look for the closest match
4936 find_prev_version_from_git_tags() {
4937 local variable_name=$1
4938 local current_version=${!variable_name}
4939 local repository=$2
4940 # Normally a "v" is used before the version number, but support alternate cases
4941 local prefix=${3:-"tags/v"}
4942 # Some repositories use "_" instead of "." for version number part separation, support that
4943 local separator=${4:-"."}
4944 # Some tools release versions that omit the last digit (e.g. go)
4945 local last_part_optional=${5:-"false"}
4946 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
4947 local version_suffix_regex=$6
4948 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
4949 set +e
4950 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
4951 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
4952 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
4953
4954 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
4955 ((major=major-1))
4956 declare -g ${variable_name}="${major}"
4957 # Look for latest version from previous major release
4958 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4959 # Handle situations like Go's odd version pattern where "0" releases omit the last part
4960 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
4961 ((minor=minor-1))
4962 declare -g ${variable_name}="${major}.${minor}"
4963 # Look for latest version from previous minor release
4964 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4965 else
4966 ((breakfix=breakfix-1))
4967 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
4968 declare -g ${variable_name}="${major}.${minor}"
4969 else
4970 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
4971 fi
4972 fi
4973 set -e
4974 }
4975
4976 # Function to fetch the version released prior to the latest version
4977 get_previous_version() {
4978 local url=$1
4979 local repo_url=$2
4980 local variable_name=$3
4981 prev_version=${!variable_name}
4982
4983 output=$(curl -s "$repo_url");
4984 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
4985 message=$(echo "$output" | jq -r '.message')
4986
4987 if [[ $message == "API rate limit exceeded"* ]]; then
4988 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
4989 echo -e "\nAttempting to find latest version using GitHub tags."
4990 find_prev_version_from_git_tags prev_version "$url" "tags/v"
4991 declare -g ${variable_name}="${prev_version}"
4992 fi
4993 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
4994 echo -e "\nAttempting to find latest version using GitHub Api."
4995 version=$(echo "$output" | jq -r '.[1].tag_name')
4996 declare -g ${variable_name}="${version#v}"
4997 fi
4998 echo "${variable_name}=${!variable_name}"
4999 }
5000
5001 get_github_api_repo_url() {
5002 local url=$1
5003 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5004 }
5005
5006 ###########################################
5007 # Start docker-in-docker installation
5008 ###########################################
5009
5010 # Ensure apt is in non-interactive to avoid prompts
5011 export DEBIAN_FRONTEND=noninteractive
5012
5013 # Source /etc/os-release to get OS info
5014 . /etc/os-release
5015
5016 # Determine adjusted ID and package manager
5017 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5018 ADJUSTED_ID="debian"
5019 PKG_MGR_CMD="apt-get"
5020 # Use dpkg for Debian-based systems
5021 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5022 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5023 ADJUSTED_ID="rhel"
5024 # Determine the appropriate package manager for RHEL-based systems
5025 for pkg_mgr in tdnf dnf microdnf yum; do
5026 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5027 PKG_MGR_CMD="$pkg_mgr"
5028 break
5029 fi
5030 done
5031
5032 if [ -z "${PKG_MGR_CMD}" ]; then
5033 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5034 exit 1
5035 fi
5036
5037 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5038 else
5039 err "Linux distro ${ID} not supported."
5040 exit 1
5041 fi
5042
5043 # Azure Linux specific setup
5044 if [ "${ID}" = "azurelinux" ]; then
5045 VERSION_CODENAME="azurelinux${VERSION_ID}"
5046 fi
5047
5048 # Prevent attempting to install Moby on Debian trixie (packages removed)
5049 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5050 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5051 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5052 exit 1
5053 fi
5054
5055 # Check if distro is supported
5056 if [ "${USE_MOBY}" = "true" ]; then
5057 if [ "${ADJUSTED_ID}" = "debian" ]; then
5058 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5059 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5060 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5061 exit 1
5062 fi
5063 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5064 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5065 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5066 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5067 else
5068 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5069 fi
5070 fi
5071 else
5072 if [ "${ADJUSTED_ID}" = "debian" ]; then
5073 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5074 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5075 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5076 exit 1
5077 fi
5078 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5079 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5080
5081 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5082 fi
5083 fi
5084
5085 # Install base dependencies
5086 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5087 case ${ADJUSTED_ID} in
5088 debian)
5089 check_packages apt-transport-https $base_packages dirmngr
5090 ;;
5091 rhel)
5092 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5093
5094 ;;
5095 esac
5096
5097 # Install git if not already present
5098 if ! command -v git >/dev/null 2>&1; then
5099 check_packages git
5100 fi
5101
5102 # Update CA certificates to ensure HTTPS connections work properly
5103 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5104 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5105 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5106 update-ca-certificates
5107 fi
5108
5109 # Swap to legacy iptables for compatibility (Debian only)
5110 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5111 update-alternatives --set iptables /usr/sbin/iptables-legacy
5112 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5113 fi
5114
5115 # Set up the necessary repositories
5116 if [ "${USE_MOBY}" = "true" ]; then
5117 # Name of open source engine/cli
5118 engine_package_name="moby-engine"
5119 cli_package_name="moby-cli"
5120
5121 case ${ADJUSTED_ID} in
5122 debian)
5123 # Import key safely and import Microsoft apt repo
5124 {
5125 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5126 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5127 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5128 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5129 ;;
5130 rhel)
5131 echo "(*) ${ID} detected - checking for Moby packages..."
5132
5133 # Check if moby packages are available in default repos
5134 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5135 echo "(*) Using built-in ${ID} Moby packages"
5136 else
5137 case "${ID}" in
5138 azurelinux)
5139 echo "(*) Moby packages not found in Azure Linux repositories"
5140 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5141 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5142 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5143 exit 1
5144 ;;
5145 mariner)
5146 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5147 # Add Microsoft repository if packages aren't available locally
5148 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5149 cat > /etc/yum.repos.d/microsoft.repo << EOF
5150 [microsoft]
5151 name=Microsoft Repository
5152 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5153 enabled=1
5154 gpgcheck=1
5155 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5156 EOF
5157 # Verify packages are available after adding repo
5158 pkg_mgr_update
5159 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5160 echo "(*) Moby packages not found in Microsoft repository either"
5161 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5162 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5163 exit 1
5164 fi
5165 ;;
5166 *)
5167 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5168 exit 1
5169 ;;
5170 esac
5171 fi
5172 ;;
5173 esac
5174 else
5175 # Name of licensed engine/cli
5176 engine_package_name="docker-ce"
5177 cli_package_name="docker-ce-cli"
5178 case ${ADJUSTED_ID} in
5179 debian)
5180 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5181 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5182 ;;
5183 rhel)
5184 # Docker CE repository setup for RHEL-based systems
5185 setup_docker_ce_repo() {
5186 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5187 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5188 [docker-ce-stable]
5189 name=Docker CE Stable
5190 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5191 enabled=1
5192 gpgcheck=1
5193 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5194 skip_if_unavailable=1
5195 module_hotfixes=1
5196 EOF
5197 }
5198 install_azure_linux_deps() {
5199 echo "(*) Installing device-mapper libraries for Docker CE..."
5200 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5201 echo "(*) Installing additional Docker CE dependencies..."
5202 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5203 echo "(*) Some optional dependencies could not be installed, continuing..."
5204 }
5205 }
5206 setup_selinux_context() {
5207 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5208 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5209 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5210 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5211 fi
5212 }
5213
5214 # Special handling for RHEL Docker CE installation
5215 case "${ID}" in
5216 azurelinux|mariner)
5217 echo "(*) ${ID} detected"
5218 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5219 echo "(*) Setting up Docker CE repository..."
5220
5221 setup_docker_ce_repo
5222 install_azure_linux_deps
5223
5224 if [ "${USE_MOBY}" != "true" ]; then
5225 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5226 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5227 setup_selinux_context
5228 else
5229 echo "(*) Using Moby - container-selinux not required"
5230 fi
5231 ;;
5232 *)
5233 # Standard RHEL/CentOS/Fedora approach
5234 if command -v dnf >/dev/null 2>&1; then
5235 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5236 elif command -v yum-config-manager >/dev/null 2>&1; then
5237 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5238 else
5239 # Manual fallback
5240 setup_docker_ce_repo
5241 fi
5242 ;;
5243 esac
5244 ;;
5245 esac
5246 fi
5247
5248 # Refresh package database
5249 case ${ADJUSTED_ID} in
5250 debian)
5251 apt-get update
5252 ;;
5253 rhel)
5254 pkg_mgr_update
5255 ;;
5256 esac
5257
5258 # Soft version matching
5259 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5260 # Empty, meaning grab whatever "latest" is in apt repo
5261 engine_version_suffix=""
5262 cli_version_suffix=""
5263 else
5264 case ${ADJUSTED_ID} in
5265 debian)
5266 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5267 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5268 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5269 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5270 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5271 set +e # Don't exit if finding version fails - will handle gracefully
5272 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5273 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5274 set -e
5275 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5276 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5277 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5278 exit 1
5279 fi
5280 ;;
5281 rhel)
5282 # For RHEL-based systems, use dnf/yum to find versions
5283 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5284 set +e # Don't exit if finding version fails - will handle gracefully
5285 if [ "${USE_MOBY}" = "true" ]; then
5286 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5287 else
5288 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5289 fi
5290 set -e
5291 if [ -n "${available_versions}" ]; then
5292 engine_version_suffix="-${available_versions}"
5293 cli_version_suffix="-${available_versions}"
5294 else
5295 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5296 engine_version_suffix=""
5297 cli_version_suffix=""
5298 fi
5299 ;;
5300 esac
5301 fi
5302
5303 # Version matching for moby-buildx
5304 if [ "${USE_MOBY}" = "true" ]; then
5305 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5306 # Empty, meaning grab whatever "latest" is in apt repo
5307 buildx_version_suffix=""
5308 else
5309 case ${ADJUSTED_ID} in
5310 debian)
5311 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5312 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5313 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5314 set +e
5315 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5316 set -e
5317 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5318 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5319 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5320 exit 1
5321 fi
5322 ;;
5323 rhel)
5324 # For RHEL-based systems, try to find buildx version or use latest
5325 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5326 set +e
5327 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5328 set -e
5329 if [ -n "${available_buildx}" ]; then
5330 buildx_version_suffix="-${available_buildx}"
5331 else
5332 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5333 buildx_version_suffix=""
5334 fi
5335 ;;
5336 esac
5337 echo "buildx_version_suffix ${buildx_version_suffix}"
5338 fi
5339 fi
5340
5341 # Install Docker / Moby CLI if not already installed
5342 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5343 echo "Docker / Moby CLI and Engine already installed."
5344 else
5345 case ${ADJUSTED_ID} in
5346 debian)
5347 if [ "${USE_MOBY}" = "true" ]; then
5348 # Install engine
5349 set +e # Handle error gracefully
5350 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5351 exit_code=$?
5352 set -e
5353
5354 if [ ${exit_code} -ne 0 ]; then
5355 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5356 exit 1
5357 fi
5358
5359 # Install compose
5360 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5361 else
5362 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5363 # Install compose
5364 apt-mark hold docker-ce docker-ce-cli
5365 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5366 fi
5367 ;;
5368 rhel)
5369 if [ "${USE_MOBY}" = "true" ]; then
5370 set +e # Handle error gracefully
5371 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5372 exit_code=$?
5373 set -e
5374
5375 if [ ${exit_code} -ne 0 ]; then
5376 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5377 exit 1
5378 fi
5379
5380 # Install compose
5381 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5382 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5383 fi
5384 else
5385 # Special handling for Azure Linux Docker CE installation
5386 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5387 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5388
5389 # Use rpm with --force and --nodeps for Azure Linux
5390 set +e # Don't exit on error for this section
5391 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5392 install_result=$?
5393 set -e
5394
5395 if [ $install_result -ne 0 ]; then
5396 echo "(*) Standard installation failed, trying manual installation..."
5397
5398 echo "(*) Standard installation failed, trying manual installation..."
5399
5400 # Create directory for downloading packages
5401 mkdir -p /tmp/docker-ce-install
5402
5403 # Download packages manually using curl since tdnf doesn't support download
5404 echo "(*) Downloading Docker CE packages manually..."
5405
5406 # Get the repository baseurl
5407 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5408
5409 # Download packages directly
5410 cd /tmp/docker-ce-install
5411
5412 # Get package names with versions
5413 if [ -n "${cli_version_suffix}" ]; then
5414 docker_ce_version="${cli_version_suffix#-}"
5415 docker_cli_version="${engine_version_suffix#-}"
5416 else
5417 # Get latest version from repository
5418 docker_ce_version="latest"
5419 fi
5420
5421 echo "(*) Attempting to download Docker CE packages from repository..."
5422
5423 # Try to download latest packages if specific version fails
5424 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5425 # Fallback: try to get latest available version
5426 echo "(*) Specific version not found, trying latest..."
5427 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5428 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5429 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5430
5431 if [ -n "${latest_docker}" ]; then
5432 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5433 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5434 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5435 else
5436 echo "(*) ERROR: Could not find Docker CE packages in repository"
5437 echo "(*) Please check repository configuration or use 'moby': true"
5438 exit 1
5439 fi
5440 fi
5441 # Install systemd libraries required by Docker CE
5442 echo "(*) Installing systemd libraries required by Docker CE..."
5443 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5444 echo "(*) WARNING: Could not install systemd libraries"
5445 echo "(*) Docker may fail to start without these"
5446 }
5447
5448 # Install with rpm --force --nodeps
5449 echo "(*) Installing Docker CE packages with dependency override..."
5450 rpm -Uvh --force --nodeps *.rpm
5451
5452 # Cleanup
5453 cd /
5454 rm -rf /tmp/docker-ce-install
5455
5456 echo "(*) Docker CE installation completed with dependency bypass"
5457 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5458 fi
5459 else
5460 # Standard installation for other RHEL-based systems
5461 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5462 fi
5463 # Install compose
5464 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5465 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5466 fi
5467 fi
5468 ;;
5469 esac
5470 fi
5471
5472 echo "Finished installing docker / moby!"
5473
5474 docker_home="/usr/libexec/docker"
5475 cli_plugins_dir="${docker_home}/cli-plugins"
5476
5477 # fallback for docker-compose
5478 fallback_compose(){
5479 local url=$1
5480 local repo_url=$(get_github_api_repo_url "$url")
5481 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5482 get_previous_version "${url}" "${repo_url}" compose_version
5483 echo -e "\nAttempting to install v${compose_version}"
5484 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5485 }
5486
5487 # If 'docker-compose' command is to be included
5488 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5489 case "${architecture}" in
5490 amd64|x86_64) target_compose_arch=x86_64 ;;
5491 arm64|aarch64) target_compose_arch=aarch64 ;;
5492 *)
5493 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5494 exit 1
5495 esac
5496
5497 docker_compose_path="/usr/local/bin/docker-compose"
5498 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5499 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5500 INSTALL_DOCKER_COMPOSE_SWITCH="false"
5501
5502 if [ "${target_compose_arch}" = "x86_64" ]; then
5503 echo "(*) Installing docker compose v1..."
5504 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5505 chmod +x ${docker_compose_path}
5506
5507 # Download the SHA256 checksum
5508 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5509 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5510 sha256sum -c docker-compose.sha256sum --ignore-missing
5511 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5512 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5513 exit 1
5514 else
5515 # Use pip to get a version that runs on this architecture
5516 check_packages python3-minimal python3-pip libffi-dev python3-venv
5517 echo "(*) Installing docker compose v1 via pip..."
5518 export PYTHONUSERBASE=/usr/local
5519 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5520 fi
5521 else
5522 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5523 docker_compose_url="https://github.com/docker/compose"
5524 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5525 echo "(*) Installing docker-compose ${compose_version}..."
5526 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5527 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5528 fallback_compose "$docker_compose_url"
5529 }
5530
5531 chmod +x ${docker_compose_path}
5532
5533 # Download the SHA256 checksum
5534 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5535 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5536 sha256sum -c docker-compose.sha256sum --ignore-missing
5537
5538 mkdir -p ${cli_plugins_dir}
5539 cp ${docker_compose_path} ${cli_plugins_dir}
5540 fi
5541 fi
5542
5543 # fallback method for compose-switch
5544 fallback_compose-switch() {
5545 local url=$1
5546 local repo_url=$(get_github_api_repo_url "$url")
5547 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5548 get_previous_version "$url" "$repo_url" compose_switch_version
5549 echo -e "\nAttempting to install v${compose_switch_version}"
5550 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5551 }
5552 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5553 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5554 if type docker-compose > /dev/null 2>&1; then
5555 echo "(*) Installing compose-switch..."
5556 current_compose_path="$(command -v docker-compose)"
5557 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5558 compose_switch_version="latest"
5559 compose_switch_url="https://github.com/docker/compose-switch"
5560 # Try to get latest version, fallback to known stable version if GitHub API fails
5561 set +e
5562 find_version_from_git_tags compose_switch_version "$compose_switch_url"
5563 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5564 echo "(*) GitHub API rate limited or failed, using fallback method"
5565 fallback_compose-switch "$compose_switch_url"
5566 fi
5567 set -e
5568
5569 # Map architecture for compose-switch downloads
5570 case "${architecture}" in
5571 amd64|x86_64) target_switch_arch=amd64 ;;
5572 arm64|aarch64) target_switch_arch=arm64 ;;
5573 *) target_switch_arch=${architecture} ;;
5574 esac
5575 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5576 chmod +x /usr/local/bin/compose-switch
5577 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5578 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5579 mv "${current_compose_path}" "${target_compose_path}"
5580 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5581 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5582 else
5583 err "Skipping installation of compose-switch as docker compose is unavailable..."
5584 fi
5585 fi
5586
5587 # If init file already exists, exit
5588 if [ -f "/usr/local/share/docker-init.sh" ]; then
5589 echo "/usr/local/share/docker-init.sh already exists, so exiting."
5590 # Clean up
5591 rm -rf /var/lib/apt/lists/*
5592 exit 0
5593 fi
5594 echo "docker-init doesn't exist, adding..."
5595
5596 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5597 groupadd -r docker
5598 fi
5599
5600 usermod -aG docker ${USERNAME}
5601
5602 # fallback for docker/buildx
5603 fallback_buildx() {
5604 local url=$1
5605 local repo_url=$(get_github_api_repo_url "$url")
5606 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5607 get_previous_version "$url" "$repo_url" buildx_version
5608 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5609 echo -e "\nAttempting to install v${buildx_version}"
5610 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5611 }
5612
5613 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5614 buildx_version="latest"
5615 docker_buildx_url="https://github.com/docker/buildx"
5616 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5617 echo "(*) Installing buildx ${buildx_version}..."
5618
5619 # Map architecture for buildx downloads
5620 case "${architecture}" in
5621 amd64|x86_64) target_buildx_arch=amd64 ;;
5622 arm64|aarch64) target_buildx_arch=arm64 ;;
5623 *) target_buildx_arch=${architecture} ;;
5624 esac
5625
5626 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5627
5628 cd /tmp
5629 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5630
5631 docker_home="/usr/libexec/docker"
5632 cli_plugins_dir="${docker_home}/cli-plugins"
5633
5634 mkdir -p ${cli_plugins_dir}
5635 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5636 chmod +x ${cli_plugins_dir}/docker-buildx
5637
5638 chown -R "${USERNAME}:docker" "${docker_home}"
5639 chmod -R g+r+w "${docker_home}"
5640 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5641 fi
5642
5643 DOCKER_DEFAULT_IP6_TABLES=""
5644 if [ "$DISABLE_IP6_TABLES" == true ]; then
5645 requested_version=""
5646 # checking whether the version requested either is in semver format or just a number denoting the major version
5647 # and, extracting the major version number out of the two scenarios
5648 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5649 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5650 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5651 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5652 requested_version=$DOCKER_VERSION
5653 fi
5654 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5655 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5656 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5657 fi
5658 fi
5659
5660 if [ ! -d /usr/local/share ]; then
5661 mkdir -p /usr/local/share
5662 fi
5663
5664 tee /usr/local/share/docker-init.sh > /dev/null \
5665 << EOF
5666 #!/bin/sh
5667 #-------------------------------------------------------------------------------------------------------------
5668 # Copyright (c) Microsoft Corporation. All rights reserved.
5669 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5670 #-------------------------------------------------------------------------------------------------------------
5671
5672 set -e
5673
5674 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5675 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5676 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5677 EOF
5678
5679 tee -a /usr/local/share/docker-init.sh > /dev/null \
5680 << 'EOF'
5681 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5682 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5683 find /run /var/run -iname 'docker*.pid' -delete || :
5684 find /run /var/run -iname 'container*.pid' -delete || :
5685
5686 # -- Start: dind wrapper script --
5687 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5688
5689 export container=docker
5690
5691 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5692 mount -t securityfs none /sys/kernel/security || {
5693 echo >&2 'Could not mount /sys/kernel/security.'
5694 echo >&2 'AppArmor detection and --privileged mode might break.'
5695 }
5696 fi
5697
5698 # Mount /tmp (conditionally)
5699 if ! mountpoint -q /tmp; then
5700 mount -t tmpfs none /tmp
5701 fi
5702
5703 set_cgroup_nesting()
5704 {
5705 # cgroup v2: enable nesting
5706 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5707 # move the processes from the root group to the /init group,
5708 # otherwise writing subtree_control fails with EBUSY.
5709 # An error during moving non-existent process (i.e., "cat") is ignored.
5710 mkdir -p /sys/fs/cgroup/init
5711 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5712 # enable controllers
5713 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5714 > /sys/fs/cgroup/cgroup.subtree_control
5715 fi
5716 }
5717
5718 # Set cgroup nesting, retrying if necessary
5719 retry_cgroup_nesting=0
5720
5721 until [ "${retry_cgroup_nesting}" -eq "5" ];
5722 do
5723 set +e
5724 set_cgroup_nesting
5725
5726 if [ $? -ne 0 ]; then
5727 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5728 else
5729 break
5730 fi
5731
5732 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5733 set -e
5734 done
5735
5736 # -- End: dind wrapper script --
5737
5738 # Handle DNS
5739 set +e
5740 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5741 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5742 then
5743 echo "Setting dockerd Azure DNS."
5744 CUSTOMDNS="--dns 168.63.129.16"
5745 else
5746 echo "Not setting dockerd DNS manually."
5747 CUSTOMDNS=""
5748 fi
5749 set -e
5750
5751 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5752 then
5753 DEFAULT_ADDRESS_POOL=""
5754 else
5755 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5756 fi
5757
5758 # Start docker/moby engine
5759 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5760 INNEREOF
5761 )"
5762
5763 sudo_if() {
5764 COMMAND="$*"
5765
5766 if [ "$(id -u)" -ne 0 ]; then
5767 sudo $COMMAND
5768 else
5769 $COMMAND
5770 fi
5771 }
5772
5773 retry_docker_start_count=0
5774 docker_ok="false"
5775
5776 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
5777 do
5778 # Start using sudo if not invoked as root
5779 if [ "$(id -u)" -ne 0 ]; then
5780 sudo /bin/sh -c "${dockerd_start}"
5781 else
5782 eval "${dockerd_start}"
5783 fi
5784
5785 retry_count=0
5786 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
5787 do
5788 sleep 1s
5789 set +e
5790 docker info > /dev/null 2>&1 && docker_ok="true"
5791 set -e
5792
5793 retry_count=`expr $retry_count + 1`
5794 done
5795
5796 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
5797 echo "(*) Failed to start docker, retrying..."
5798 set +e
5799 sudo_if pkill dockerd
5800 sudo_if pkill containerd
5801 set -e
5802 fi
5803
5804 retry_docker_start_count=`expr $retry_docker_start_count + 1`
5805 done
5806
5807 # Execute whatever commands were passed in (if any). This allows us
5808 # to set this script to ENTRYPOINT while still executing the default CMD.
5809 exec "$@"
5810 EOF
5811
5812 chmod +x /usr/local/share/docker-init.sh
5813 chown ${USERNAME}:root /usr/local/share/docker-init.sh
5814
5815 # Clean up
5816 rm -rf /var/lib/apt/lists/*
5817
5818 echo 'docker-in-docker-debian script has completed!'"#),
5819 ]).await;
5820
5821 return Ok(http::Response::builder()
5822 .status(200)
5823 .body(AsyncBody::from(response))
5824 .unwrap());
5825 }
5826 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
5827 let response = r#"
5828 {
5829 "schemaVersion": 2,
5830 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5831 "config": {
5832 "mediaType": "application/vnd.devcontainers",
5833 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5834 "size": 2
5835 },
5836 "layers": [
5837 {
5838 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5839 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
5840 "size": 20992,
5841 "annotations": {
5842 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
5843 }
5844 }
5845 ],
5846 "annotations": {
5847 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5848 "com.github.package.type": "devcontainer_feature"
5849 }
5850 }
5851 "#;
5852
5853 return Ok(http::Response::builder()
5854 .status(200)
5855 .body(http_client::AsyncBody::from(response))
5856 .unwrap());
5857 }
5858 if parts.uri.path()
5859 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
5860 {
5861 let response = build_tarball(vec![
5862 ("./devcontainer-feature.json", r#"
5863 {
5864 "id": "go",
5865 "version": "1.3.3",
5866 "name": "Go",
5867 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
5868 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
5869 "options": {
5870 "version": {
5871 "type": "string",
5872 "proposals": [
5873 "latest",
5874 "none",
5875 "1.24",
5876 "1.23"
5877 ],
5878 "default": "latest",
5879 "description": "Select or enter a Go version to install"
5880 },
5881 "golangciLintVersion": {
5882 "type": "string",
5883 "default": "latest",
5884 "description": "Version of golangci-lint to install"
5885 }
5886 },
5887 "init": true,
5888 "customizations": {
5889 "vscode": {
5890 "extensions": [
5891 "golang.Go"
5892 ],
5893 "settings": {
5894 "github.copilot.chat.codeGeneration.instructions": [
5895 {
5896 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
5897 }
5898 ]
5899 }
5900 }
5901 },
5902 "containerEnv": {
5903 "GOROOT": "/usr/local/go",
5904 "GOPATH": "/go",
5905 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
5906 },
5907 "capAdd": [
5908 "SYS_PTRACE"
5909 ],
5910 "securityOpt": [
5911 "seccomp=unconfined"
5912 ],
5913 "installsAfter": [
5914 "ghcr.io/devcontainers/features/common-utils"
5915 ]
5916 }
5917 "#),
5918 ("./install.sh", r#"
5919 #!/usr/bin/env bash
5920 #-------------------------------------------------------------------------------------------------------------
5921 # Copyright (c) Microsoft Corporation. All rights reserved.
5922 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
5923 #-------------------------------------------------------------------------------------------------------------
5924 #
5925 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
5926 # Maintainer: The VS Code and Codespaces Teams
5927
5928 TARGET_GO_VERSION="${VERSION:-"latest"}"
5929 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
5930
5931 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
5932 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
5933 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5934 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
5935
5936 # https://www.google.com/linuxrepositories/
5937 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
5938
5939 set -e
5940
5941 if [ "$(id -u)" -ne 0 ]; then
5942 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5943 exit 1
5944 fi
5945
5946 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
5947 . /etc/os-release
5948 # Get an adjusted ID independent of distro variants
5949 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
5950 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5951 ADJUSTED_ID="debian"
5952 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
5953 ADJUSTED_ID="rhel"
5954 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
5955 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
5956 else
5957 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
5958 fi
5959 else
5960 echo "Linux distro ${ID} not supported."
5961 exit 1
5962 fi
5963
5964 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
5965 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
5966 # Update the repo files to reference vault.centos.org.
5967 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
5968 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
5969 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
5970 fi
5971
5972 # Setup INSTALL_CMD & PKG_MGR_CMD
5973 if type apt-get > /dev/null 2>&1; then
5974 PKG_MGR_CMD=apt-get
5975 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
5976 elif type microdnf > /dev/null 2>&1; then
5977 PKG_MGR_CMD=microdnf
5978 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5979 elif type dnf > /dev/null 2>&1; then
5980 PKG_MGR_CMD=dnf
5981 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5982 else
5983 PKG_MGR_CMD=yum
5984 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
5985 fi
5986
5987 # Clean up
5988 clean_up() {
5989 case ${ADJUSTED_ID} in
5990 debian)
5991 rm -rf /var/lib/apt/lists/*
5992 ;;
5993 rhel)
5994 rm -rf /var/cache/dnf/* /var/cache/yum/*
5995 rm -rf /tmp/yum.log
5996 rm -rf ${GPG_INSTALL_PATH}
5997 ;;
5998 esac
5999 }
6000 clean_up
6001
6002
6003 # Figure out correct version of a three part version number is not passed
6004 find_version_from_git_tags() {
6005 local variable_name=$1
6006 local requested_version=${!variable_name}
6007 if [ "${requested_version}" = "none" ]; then return; fi
6008 local repository=$2
6009 local prefix=${3:-"tags/v"}
6010 local separator=${4:-"."}
6011 local last_part_optional=${5:-"false"}
6012 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6013 local escaped_separator=${separator//./\\.}
6014 local last_part
6015 if [ "${last_part_optional}" = "true" ]; then
6016 last_part="(${escaped_separator}[0-9]+)?"
6017 else
6018 last_part="${escaped_separator}[0-9]+"
6019 fi
6020 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6021 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6022 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6023 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6024 else
6025 set +e
6026 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6027 set -e
6028 fi
6029 fi
6030 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6031 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6032 exit 1
6033 fi
6034 echo "${variable_name}=${!variable_name}"
6035 }
6036
6037 pkg_mgr_update() {
6038 case $ADJUSTED_ID in
6039 debian)
6040 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6041 echo "Running apt-get update..."
6042 ${PKG_MGR_CMD} update -y
6043 fi
6044 ;;
6045 rhel)
6046 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6047 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6048 echo "Running ${PKG_MGR_CMD} makecache ..."
6049 ${PKG_MGR_CMD} makecache
6050 fi
6051 else
6052 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6053 echo "Running ${PKG_MGR_CMD} check-update ..."
6054 set +e
6055 ${PKG_MGR_CMD} check-update
6056 rc=$?
6057 if [ $rc != 0 ] && [ $rc != 100 ]; then
6058 exit 1
6059 fi
6060 set -e
6061 fi
6062 fi
6063 ;;
6064 esac
6065 }
6066
6067 # Checks if packages are installed and installs them if not
6068 check_packages() {
6069 case ${ADJUSTED_ID} in
6070 debian)
6071 if ! dpkg -s "$@" > /dev/null 2>&1; then
6072 pkg_mgr_update
6073 ${INSTALL_CMD} "$@"
6074 fi
6075 ;;
6076 rhel)
6077 if ! rpm -q "$@" > /dev/null 2>&1; then
6078 pkg_mgr_update
6079 ${INSTALL_CMD} "$@"
6080 fi
6081 ;;
6082 esac
6083 }
6084
6085 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6086 rm -f /etc/profile.d/00-restore-env.sh
6087 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6088 chmod +x /etc/profile.d/00-restore-env.sh
6089
6090 # Some distributions do not install awk by default (e.g. Mariner)
6091 if ! type awk >/dev/null 2>&1; then
6092 check_packages awk
6093 fi
6094
6095 # Determine the appropriate non-root user
6096 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6097 USERNAME=""
6098 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6099 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6100 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6101 USERNAME=${CURRENT_USER}
6102 break
6103 fi
6104 done
6105 if [ "${USERNAME}" = "" ]; then
6106 USERNAME=root
6107 fi
6108 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6109 USERNAME=root
6110 fi
6111
6112 export DEBIAN_FRONTEND=noninteractive
6113
6114 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6115
6116 if [ $ADJUSTED_ID = "debian" ]; then
6117 check_packages g++ libc6-dev
6118 else
6119 check_packages gcc-c++ glibc-devel
6120 fi
6121 # Install curl, git, other dependencies if missing
6122 if ! type curl > /dev/null 2>&1; then
6123 check_packages curl
6124 fi
6125 if ! type git > /dev/null 2>&1; then
6126 check_packages git
6127 fi
6128 # Some systems, e.g. Mariner, still a few more packages
6129 if ! type as > /dev/null 2>&1; then
6130 check_packages binutils
6131 fi
6132 if ! [ -f /usr/include/linux/errno.h ]; then
6133 check_packages kernel-headers
6134 fi
6135 # Minimal RHEL install may need findutils installed
6136 if ! [ -f /usr/bin/find ]; then
6137 check_packages findutils
6138 fi
6139
6140 # Get closest match for version number specified
6141 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6142
6143 architecture="$(uname -m)"
6144 case $architecture in
6145 x86_64) architecture="amd64";;
6146 aarch64 | armv8*) architecture="arm64";;
6147 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6148 i?86) architecture="386";;
6149 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6150 esac
6151
6152 # Install Go
6153 umask 0002
6154 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6155 groupadd -r golang
6156 fi
6157 usermod -a -G golang "${USERNAME}"
6158 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6159
6160 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6161 # Use a temporary location for gpg keys to avoid polluting image
6162 export GNUPGHOME="/tmp/tmp-gnupg"
6163 mkdir -p ${GNUPGHOME}
6164 chmod 700 ${GNUPGHOME}
6165 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6166 gpg -q --import /tmp/tmp-gnupg/golang_key
6167 echo "Downloading Go ${TARGET_GO_VERSION}..."
6168 set +e
6169 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6170 exit_code=$?
6171 set -e
6172 if [ "$exit_code" != "0" ]; then
6173 echo "(!) Download failed."
6174 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6175 set +e
6176 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6177 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6178 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6179 # Handle Go's odd version pattern where "0" releases omit the last part
6180 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6181 ((minor=minor-1))
6182 TARGET_GO_VERSION="${major}.${minor}"
6183 # Look for latest version from previous minor release
6184 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6185 else
6186 ((breakfix=breakfix-1))
6187 if [ "${breakfix}" = "0" ]; then
6188 TARGET_GO_VERSION="${major}.${minor}"
6189 else
6190 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6191 fi
6192 fi
6193 set -e
6194 echo "Trying ${TARGET_GO_VERSION}..."
6195 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6196 fi
6197 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6198 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6199 echo "Extracting Go ${TARGET_GO_VERSION}..."
6200 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6201 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6202 else
6203 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6204 fi
6205
6206 # Install Go tools that are isImportant && !replacedByGopls based on
6207 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6208 GO_TOOLS="\
6209 golang.org/x/tools/gopls@latest \
6210 honnef.co/go/tools/cmd/staticcheck@latest \
6211 golang.org/x/lint/golint@latest \
6212 github.com/mgechev/revive@latest \
6213 github.com/go-delve/delve/cmd/dlv@latest \
6214 github.com/fatih/gomodifytags@latest \
6215 github.com/haya14busa/goplay/cmd/goplay@latest \
6216 github.com/cweill/gotests/gotests@latest \
6217 github.com/josharian/impl@latest"
6218
6219 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6220 echo "Installing common Go tools..."
6221 export PATH=${TARGET_GOROOT}/bin:${PATH}
6222 export GOPATH=/tmp/gotools
6223 export GOCACHE="${GOPATH}/cache"
6224
6225 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6226 cd "${GOPATH}"
6227
6228 # Use go get for versions of go under 1.16
6229 go_install_command=install
6230 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6231 export GO111MODULE=on
6232 go_install_command=get
6233 echo "Go version < 1.16, using go get."
6234 fi
6235
6236 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6237
6238 # Move Go tools into path
6239 if [ -d "${GOPATH}/bin" ]; then
6240 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6241 fi
6242
6243 # Install golangci-lint from precompiled binaries
6244 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6245 echo "Installing golangci-lint latest..."
6246 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6247 sh -s -- -b "${TARGET_GOPATH}/bin"
6248 else
6249 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6250 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6251 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6252 fi
6253
6254 # Remove Go tools temp directory
6255 rm -rf "${GOPATH}"
6256 fi
6257
6258
6259 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6260 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6261 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6262 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6263
6264 # Clean up
6265 clean_up
6266
6267 echo "Done!"
6268 "#),
6269 ])
6270 .await;
6271 return Ok(http::Response::builder()
6272 .status(200)
6273 .body(AsyncBody::from(response))
6274 .unwrap());
6275 }
6276 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6277 let response = r#"
6278 {
6279 "schemaVersion": 2,
6280 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6281 "config": {
6282 "mediaType": "application/vnd.devcontainers",
6283 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6284 "size": 2
6285 },
6286 "layers": [
6287 {
6288 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6289 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6290 "size": 19968,
6291 "annotations": {
6292 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6293 }
6294 }
6295 ],
6296 "annotations": {
6297 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6298 "com.github.package.type": "devcontainer_feature"
6299 }
6300 }"#;
6301 return Ok(http::Response::builder()
6302 .status(200)
6303 .body(AsyncBody::from(response))
6304 .unwrap());
6305 }
6306 if parts.uri.path()
6307 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6308 {
6309 let response = build_tarball(vec![
6310 (
6311 "./devcontainer-feature.json",
6312 r#"
6313{
6314 "id": "aws-cli",
6315 "version": "1.1.3",
6316 "name": "AWS CLI",
6317 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6318 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6319 "options": {
6320 "version": {
6321 "type": "string",
6322 "proposals": [
6323 "latest"
6324 ],
6325 "default": "latest",
6326 "description": "Select or enter an AWS CLI version."
6327 },
6328 "verbose": {
6329 "type": "boolean",
6330 "default": true,
6331 "description": "Suppress verbose output."
6332 }
6333 },
6334 "customizations": {
6335 "vscode": {
6336 "extensions": [
6337 "AmazonWebServices.aws-toolkit-vscode"
6338 ],
6339 "settings": {
6340 "github.copilot.chat.codeGeneration.instructions": [
6341 {
6342 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6343 }
6344 ]
6345 }
6346 }
6347 },
6348 "installsAfter": [
6349 "ghcr.io/devcontainers/features/common-utils"
6350 ]
6351}
6352 "#,
6353 ),
6354 (
6355 "./install.sh",
6356 r#"#!/usr/bin/env bash
6357 #-------------------------------------------------------------------------------------------------------------
6358 # Copyright (c) Microsoft Corporation. All rights reserved.
6359 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6360 #-------------------------------------------------------------------------------------------------------------
6361 #
6362 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6363 # Maintainer: The VS Code and Codespaces Teams
6364
6365 set -e
6366
6367 # Clean up
6368 rm -rf /var/lib/apt/lists/*
6369
6370 VERSION=${VERSION:-"latest"}
6371 VERBOSE=${VERBOSE:-"true"}
6372
6373 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6374 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6375
6376 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6377 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6378 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6379 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6380 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6381 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6382 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6383 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6384 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6385 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6386 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6387 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6388 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6389 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6390 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6391 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6392 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6393 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6394 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6395 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6396 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6397 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6398 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6399 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6400 YLZATHZKTJyiqA==
6401 =vYOk
6402 -----END PGP PUBLIC KEY BLOCK-----"
6403
6404 if [ "$(id -u)" -ne 0 ]; then
6405 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6406 exit 1
6407 fi
6408
6409 apt_get_update()
6410 {
6411 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6412 echo "Running apt-get update..."
6413 apt-get update -y
6414 fi
6415 }
6416
6417 # Checks if packages are installed and installs them if not
6418 check_packages() {
6419 if ! dpkg -s "$@" > /dev/null 2>&1; then
6420 apt_get_update
6421 apt-get -y install --no-install-recommends "$@"
6422 fi
6423 }
6424
6425 export DEBIAN_FRONTEND=noninteractive
6426
6427 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6428
6429 verify_aws_cli_gpg_signature() {
6430 local filePath=$1
6431 local sigFilePath=$2
6432 local awsGpgKeyring=aws-cli-public-key.gpg
6433
6434 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6435 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6436 local status=$?
6437
6438 rm "./${awsGpgKeyring}"
6439
6440 return ${status}
6441 }
6442
6443 install() {
6444 local scriptZipFile=awscli.zip
6445 local scriptSigFile=awscli.sig
6446
6447 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6448 if [ "${VERSION}" != "latest" ]; then
6449 local versionStr=-${VERSION}
6450 fi
6451 architecture=$(dpkg --print-architecture)
6452 case "${architecture}" in
6453 amd64) architectureStr=x86_64 ;;
6454 arm64) architectureStr=aarch64 ;;
6455 *)
6456 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6457 exit 1
6458 esac
6459 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6460 curl "${scriptUrl}" -o "${scriptZipFile}"
6461 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6462
6463 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6464 if (( $? > 0 )); then
6465 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6466 exit 1
6467 fi
6468
6469 if [ "${VERBOSE}" = "false" ]; then
6470 unzip -q "${scriptZipFile}"
6471 else
6472 unzip "${scriptZipFile}"
6473 fi
6474
6475 ./aws/install
6476
6477 # kubectl bash completion
6478 mkdir -p /etc/bash_completion.d
6479 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6480
6481 # kubectl zsh completion
6482 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6483 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6484 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6485 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6486 fi
6487
6488 rm -rf ./aws
6489 }
6490
6491 echo "(*) Installing AWS CLI..."
6492
6493 install
6494
6495 # Clean up
6496 rm -rf /var/lib/apt/lists/*
6497
6498 echo "Done!""#,
6499 ),
6500 ("./scripts/", r#""#),
6501 (
6502 "./scripts/fetch-latest-completer-scripts.sh",
6503 r#"
6504 #!/bin/bash
6505 #-------------------------------------------------------------------------------------------------------------
6506 # Copyright (c) Microsoft Corporation. All rights reserved.
6507 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6508 #-------------------------------------------------------------------------------------------------------------
6509 #
6510 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6511 # Maintainer: The Dev Container spec maintainers
6512 #
6513 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6514 #
6515 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6516 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6517 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6518
6519 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6520 chmod +x "$BASH_COMPLETER_SCRIPT"
6521
6522 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6523 chmod +x "$ZSH_COMPLETER_SCRIPT"
6524 "#,
6525 ),
6526 ("./scripts/vendor/", r#""#),
6527 (
6528 "./scripts/vendor/aws_bash_completer",
6529 r#"
6530 # Typically that would be added under one of the following paths:
6531 # - /etc/bash_completion.d
6532 # - /usr/local/etc/bash_completion.d
6533 # - /usr/share/bash-completion/completions
6534
6535 complete -C aws_completer aws
6536 "#,
6537 ),
6538 (
6539 "./scripts/vendor/aws_zsh_completer.sh",
6540 r#"
6541 # Source this file to activate auto completion for zsh using the bash
6542 # compatibility helper. Make sure to run `compinit` before, which should be
6543 # given usually.
6544 #
6545 # % source /path/to/zsh_complete.sh
6546 #
6547 # Typically that would be called somewhere in your .zshrc.
6548 #
6549 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6550 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6551 #
6552 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6553 #
6554 # zsh releases prior to that version do not export the required env variables!
6555
6556 autoload -Uz bashcompinit
6557 bashcompinit -i
6558
6559 _bash_complete() {
6560 local ret=1
6561 local -a suf matches
6562 local -x COMP_POINT COMP_CWORD
6563 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6564 local -x COMP_LINE="$words"
6565 local -A savejobstates savejobtexts
6566
6567 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6568 (( COMP_CWORD = CURRENT - 1))
6569 COMP_WORDS=( $words )
6570 BASH_VERSINFO=( 2 05b 0 1 release )
6571
6572 savejobstates=( ${(kv)jobstates} )
6573 savejobtexts=( ${(kv)jobtexts} )
6574
6575 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6576
6577 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6578
6579 if [[ -n $matches ]]; then
6580 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6581 compset -P '*/' && matches=( ${matches##*/} )
6582 compset -S '/*' && matches=( ${matches%%/*} )
6583 compadd -Q -f "${suf[@]}" -a matches && ret=0
6584 else
6585 compadd -Q "${suf[@]}" -a matches && ret=0
6586 fi
6587 fi
6588
6589 if (( ret )); then
6590 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6591 _default "${suf[@]}" && ret=0
6592 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6593 _directories "${suf[@]}" && ret=0
6594 fi
6595 fi
6596
6597 return ret
6598 }
6599
6600 complete -C aws_completer aws
6601 "#,
6602 ),
6603 ]).await;
6604
6605 return Ok(http::Response::builder()
6606 .status(200)
6607 .body(AsyncBody::from(response))
6608 .unwrap());
6609 }
6610
6611 Ok(http::Response::builder()
6612 .status(404)
6613 .body(http_client::AsyncBody::default())
6614 .unwrap())
6615 })
6616 }
6617}