1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use regex::Regex;
10
11use fs::Fs;
12use http_client::HttpClient;
13use util::{ResultExt, command::Command, normalize_path};
14
15use crate::{
16 DevContainerConfig, DevContainerContext,
17 command_json::{CommandRunner, DefaultCommandRunner},
18 devcontainer_api::{DevContainerError, DevContainerUp},
19 devcontainer_json::{
20 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
21 deserialize_devcontainer_json,
22 },
23 docker::{
24 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
25 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
26 },
27 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
28 get_oci_token,
29 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
30 safe_id_lower,
31};
32
33enum ConfigStatus {
34 Deserialized(DevContainer),
35 VariableParsed(DevContainer),
36}
37
38#[derive(Debug, Clone, Eq, PartialEq, Default)]
39pub(crate) struct DockerComposeResources {
40 files: Vec<PathBuf>,
41 config: DockerComposeConfig,
42}
43
44struct DevContainerManifest {
45 http_client: Arc<dyn HttpClient>,
46 fs: Arc<dyn Fs>,
47 docker_client: Arc<dyn DockerClient>,
48 command_runner: Arc<dyn CommandRunner>,
49 raw_config: String,
50 config: ConfigStatus,
51 local_environment: HashMap<String, String>,
52 local_project_directory: PathBuf,
53 config_directory: PathBuf,
54 file_name: String,
55 root_image: Option<DockerInspect>,
56 features_build_info: Option<FeaturesBuildInfo>,
57 features: Vec<FeatureManifest>,
58}
59const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
60impl DevContainerManifest {
61 async fn new(
62 context: &DevContainerContext,
63 environment: HashMap<String, String>,
64 docker_client: Arc<dyn DockerClient>,
65 command_runner: Arc<dyn CommandRunner>,
66 local_config: DevContainerConfig,
67 local_project_path: &Path,
68 ) -> Result<Self, DevContainerError> {
69 let config_path = local_project_path.join(local_config.config_path.clone());
70 log::debug!("parsing devcontainer json found in {:?}", &config_path);
71 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
72 log::error!("Unable to read devcontainer contents: {e}");
73 DevContainerError::DevContainerParseFailed
74 })?;
75
76 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
77
78 let devcontainer_directory = config_path.parent().ok_or_else(|| {
79 log::error!("Dev container file should be in a directory");
80 DevContainerError::NotInValidProject
81 })?;
82 let file_name = config_path
83 .file_name()
84 .and_then(|f| f.to_str())
85 .ok_or_else(|| {
86 log::error!("Dev container file has no file name, or is invalid unicode");
87 DevContainerError::DevContainerParseFailed
88 })?;
89
90 Ok(Self {
91 fs: context.fs.clone(),
92 http_client: context.http_client.clone(),
93 docker_client,
94 command_runner,
95 raw_config: devcontainer_contents,
96 config: ConfigStatus::Deserialized(devcontainer),
97 local_project_directory: local_project_path.to_path_buf(),
98 local_environment: environment,
99 config_directory: devcontainer_directory.to_path_buf(),
100 file_name: file_name.to_string(),
101 root_image: None,
102 features_build_info: None,
103 features: Vec::new(),
104 })
105 }
106
107 fn devcontainer_id(&self) -> String {
108 let mut labels = self.identifying_labels();
109 labels.sort_by_key(|(key, _)| *key);
110
111 let mut hasher = DefaultHasher::new();
112 for (key, value) in &labels {
113 key.hash(&mut hasher);
114 value.hash(&mut hasher);
115 }
116
117 format!("{:016x}", hasher.finish())
118 }
119
120 fn identifying_labels(&self) -> Vec<(&str, String)> {
121 let labels = vec![
122 (
123 "devcontainer.local_folder",
124 (self.local_project_directory.display()).to_string(),
125 ),
126 (
127 "devcontainer.config_file",
128 (self.config_file().display()).to_string(),
129 ),
130 ];
131 labels
132 }
133
134 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
135 let mut replaced_content = content
136 .replace("${devcontainerId}", &self.devcontainer_id())
137 .replace(
138 "${containerWorkspaceFolderBasename}",
139 &self.remote_workspace_base_name().unwrap_or_default(),
140 )
141 .replace(
142 "${localWorkspaceFolderBasename}",
143 &self.local_workspace_base_name()?,
144 )
145 .replace(
146 "${containerWorkspaceFolder}",
147 &self
148 .remote_workspace_folder()
149 .map(|path| path.display().to_string())
150 .unwrap_or_default()
151 .replace('\\', "/"),
152 )
153 .replace(
154 "${localWorkspaceFolder}",
155 &self.local_workspace_folder().replace('\\', "/"),
156 );
157 for (k, v) in &self.local_environment {
158 let find = format!("${{localEnv:{k}}}");
159 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
160 }
161
162 Ok(replaced_content)
163 }
164
165 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
166 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
167 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
168
169 self.config = ConfigStatus::VariableParsed(parsed_config);
170
171 Ok(())
172 }
173
174 fn runtime_remote_env(
175 &self,
176 container_env: &HashMap<String, String>,
177 ) -> Result<HashMap<String, String>, DevContainerError> {
178 let mut merged_remote_env = container_env.clone();
179 // HOME is user-specific, and we will often not run as the image user
180 merged_remote_env.remove("HOME");
181 if let Some(remote_env) = self.dev_container().remote_env.clone() {
182 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
183 log::error!(
184 "Unexpected error serializing dev container remote_env: {e} - {:?}",
185 remote_env
186 );
187 DevContainerError::DevContainerParseFailed
188 })?;
189 for (k, v) in container_env {
190 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
191 }
192 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
193 .map_err(|e| {
194 log::error!(
195 "Unexpected error reserializing dev container remote env: {e} - {:?}",
196 &raw
197 );
198 DevContainerError::DevContainerParseFailed
199 })?;
200 for (k, v) in reserialized {
201 merged_remote_env.insert(k, v);
202 }
203 }
204 Ok(merged_remote_env)
205 }
206
207 fn config_file(&self) -> PathBuf {
208 self.config_directory.join(&self.file_name)
209 }
210
211 fn dev_container(&self) -> &DevContainer {
212 match &self.config {
213 ConfigStatus::Deserialized(dev_container) => dev_container,
214 ConfigStatus::VariableParsed(dev_container) => dev_container,
215 }
216 }
217
218 async fn dockerfile_location(&self) -> Option<PathBuf> {
219 let dev_container = self.dev_container();
220 match dev_container.build_type() {
221 DevContainerBuildType::Image(_) => None,
222 DevContainerBuildType::Dockerfile(build) => {
223 Some(self.config_directory.join(&build.dockerfile))
224 }
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service.build.and_then(|b| {
234 let compose_file = docker_compose_manifest.files.first()?;
235 resolve_compose_dockerfile(
236 compose_file,
237 b.context.as_deref(),
238 b.dockerfile.as_deref()?,
239 )
240 })
241 }
242 DevContainerBuildType::None => None,
243 }
244 }
245
246 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
247 let mut hasher = DefaultHasher::new();
248 let prefix = match &self.dev_container().name {
249 Some(name) => &safe_id_lower(name),
250 None => "zed-dc",
251 };
252 let prefix = prefix.get(..6).unwrap_or(prefix);
253
254 dockerfile_build_path.hash(&mut hasher);
255
256 let hash = hasher.finish();
257 format!("{}-{:x}-features", prefix, hash)
258 }
259
260 /// Gets the base image from the devcontainer with the following precedence:
261 /// - The devcontainer image if an image is specified
262 /// - The image sourced in the Dockerfile if a Dockerfile is specified
263 /// - The image sourced in the docker-compose main service, if one is specified
264 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
265 /// If no such image is available, return an error
266 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
267 match self.dev_container().build_type() {
268 DevContainerBuildType::Image(image) => {
269 return Ok(image);
270 }
271 DevContainerBuildType::Dockerfile(build) => {
272 let dockerfile_contents = self.expanded_dockerfile_content().await?;
273 return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
274 || {
275 log::error!("Unable to find base image in Dockerfile");
276 DevContainerError::DevContainerParseFailed
277 },
278 );
279 }
280 DevContainerBuildType::DockerCompose => {
281 let docker_compose_manifest = self.docker_compose_manifest().await?;
282 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
283
284 if let Some(_) = main_service
285 .build
286 .as_ref()
287 .and_then(|b| b.dockerfile.as_ref())
288 {
289 let dockerfile_contents = self.expanded_dockerfile_content().await?;
290 return image_from_dockerfile(
291 dockerfile_contents,
292 &main_service.build.as_ref().and_then(|b| b.target.clone()),
293 )
294 .ok_or_else(|| {
295 log::error!("Unable to find base image in Dockerfile");
296 DevContainerError::DevContainerParseFailed
297 });
298 }
299 if let Some(image) = &main_service.image {
300 return Ok(image.to_string());
301 }
302
303 log::error!("No valid base image found in docker-compose configuration");
304 return Err(DevContainerError::DevContainerParseFailed);
305 }
306 DevContainerBuildType::None => {
307 log::error!("Not a valid devcontainer config for build");
308 return Err(DevContainerError::NotInValidProject);
309 }
310 }
311 }
312
313 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
314 let dev_container = match &self.config {
315 ConfigStatus::Deserialized(_) => {
316 log::error!(
317 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
318 );
319 return Err(DevContainerError::DevContainerParseFailed);
320 }
321 ConfigStatus::VariableParsed(dev_container) => dev_container,
322 };
323 let root_image_tag = self.get_base_image_from_config().await?;
324 let root_image = self.docker_client.inspect(&root_image_tag).await?;
325
326 let temp_base = std::env::temp_dir().join("devcontainer-zed");
327 let timestamp = std::time::SystemTime::now()
328 .duration_since(std::time::UNIX_EPOCH)
329 .map(|d| d.as_millis())
330 .unwrap_or(0);
331
332 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
333 let empty_context_dir = temp_base.join("empty-folder");
334
335 self.fs
336 .create_dir(&features_content_dir)
337 .await
338 .map_err(|e| {
339 log::error!("Failed to create features content dir: {e}");
340 DevContainerError::FilesystemError
341 })?;
342
343 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
344 log::error!("Failed to create empty context dir: {e}");
345 DevContainerError::FilesystemError
346 })?;
347
348 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
349 let image_tag =
350 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
351
352 let build_info = FeaturesBuildInfo {
353 dockerfile_path,
354 features_content_dir,
355 empty_context_dir,
356 build_image: dev_container.image.clone(),
357 image_tag,
358 };
359
360 let features = match &dev_container.features {
361 Some(features) => features,
362 None => &HashMap::new(),
363 };
364
365 let container_user = get_container_user_from_config(&root_image, self)?;
366 let remote_user = get_remote_user_from_config(&root_image, self)?;
367
368 let builtin_env_content = format!(
369 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
370 container_user, remote_user
371 );
372
373 let builtin_env_path = build_info
374 .features_content_dir
375 .join("devcontainer-features.builtin.env");
376
377 self.fs
378 .write(&builtin_env_path, &builtin_env_content.as_bytes())
379 .await
380 .map_err(|e| {
381 log::error!("Failed to write builtin env file: {e}");
382 DevContainerError::FilesystemError
383 })?;
384
385 let ordered_features =
386 resolve_feature_order(features, &dev_container.override_feature_install_order);
387
388 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
389 if matches!(options, FeatureOptions::Bool(false)) {
390 log::debug!(
391 "Feature '{}' is disabled (set to false), skipping",
392 feature_ref
393 );
394 continue;
395 }
396
397 let feature_id = extract_feature_id(feature_ref);
398 let consecutive_id = format!("{}_{}", feature_id, index);
399 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
400
401 self.fs.create_dir(&feature_dir).await.map_err(|e| {
402 log::error!(
403 "Failed to create feature directory for {}: {e}",
404 feature_ref
405 );
406 DevContainerError::FilesystemError
407 })?;
408
409 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
410 log::error!(
411 "Feature '{}' is not a supported OCI feature reference",
412 feature_ref
413 );
414 DevContainerError::DevContainerParseFailed
415 })?;
416 let TokenResponse { token } =
417 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
418 .await
419 .map_err(|e| {
420 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
421 DevContainerError::ResourceFetchFailed
422 })?;
423 let manifest = get_oci_manifest(
424 &oci_ref.registry,
425 &oci_ref.path,
426 &token,
427 &self.http_client,
428 &oci_ref.version,
429 None,
430 )
431 .await
432 .map_err(|e| {
433 log::error!(
434 "Failed to fetch OCI manifest for feature '{}': {e}",
435 feature_ref
436 );
437 DevContainerError::ResourceFetchFailed
438 })?;
439 let digest = &manifest
440 .layers
441 .first()
442 .ok_or_else(|| {
443 log::error!(
444 "OCI manifest for feature '{}' contains no layers",
445 feature_ref
446 );
447 DevContainerError::ResourceFetchFailed
448 })?
449 .digest;
450 download_oci_tarball(
451 &token,
452 &oci_ref.registry,
453 &oci_ref.path,
454 digest,
455 "application/vnd.devcontainers.layer.v1+tar",
456 &feature_dir,
457 &self.http_client,
458 &self.fs,
459 None,
460 )
461 .await?;
462
463 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
464 if !self.fs.is_file(feature_json_path).await {
465 let message = format!(
466 "No devcontainer-feature.json found in {:?}, no defaults to apply",
467 feature_json_path
468 );
469 log::error!("{}", &message);
470 return Err(DevContainerError::ResourceFetchFailed);
471 }
472
473 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
474 log::error!("error reading devcontainer-feature.json: {:?}", e);
475 DevContainerError::FilesystemError
476 })?;
477
478 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
479
480 let feature_json: DevContainerFeatureJson =
481 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
482 log::error!("Failed to parse devcontainer-feature.json: {e}");
483 DevContainerError::ResourceFetchFailed
484 })?;
485
486 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
487
488 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
489
490 let env_content = feature_manifest
491 .write_feature_env(&self.fs, options)
492 .await?;
493
494 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
495
496 self.fs
497 .write(
498 &feature_manifest
499 .file_path()
500 .join("devcontainer-features-install.sh"),
501 &wrapper_content.as_bytes(),
502 )
503 .await
504 .map_err(|e| {
505 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
506 DevContainerError::FilesystemError
507 })?;
508
509 self.features.push(feature_manifest);
510 }
511
512 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
513
514 let is_compose = match dev_container.build_type() {
515 DevContainerBuildType::DockerCompose => true,
516 _ => false,
517 };
518 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
519
520 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
521 self.fs.load(location).await.log_err()
522 } else {
523 None
524 };
525
526 let build_target = if is_compose {
527 find_primary_service(&self.docker_compose_manifest().await?, self)?
528 .1
529 .build
530 .and_then(|b| b.target)
531 } else {
532 dev_container.build.as_ref().and_then(|b| b.target.clone())
533 };
534
535 let dockerfile_content = dockerfile_base_content
536 .map(|content| {
537 dockerfile_inject_alias(
538 &content,
539 "dev_container_auto_added_stage_label",
540 build_target,
541 )
542 })
543 .unwrap_or_default();
544
545 let dockerfile_content = self.generate_dockerfile_extended(
546 &container_user,
547 &remote_user,
548 dockerfile_content,
549 use_buildkit,
550 );
551
552 self.fs
553 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
554 .await
555 .map_err(|e| {
556 log::error!("Failed to write Dockerfile.extended: {e}");
557 DevContainerError::FilesystemError
558 })?;
559
560 log::debug!(
561 "Features build resources written to {:?}",
562 build_info.features_content_dir
563 );
564
565 self.root_image = Some(root_image);
566 self.features_build_info = Some(build_info);
567
568 Ok(())
569 }
570
571 fn generate_dockerfile_extended(
572 &self,
573 container_user: &str,
574 remote_user: &str,
575 dockerfile_content: String,
576 use_buildkit: bool,
577 ) -> String {
578 #[cfg(not(target_os = "windows"))]
579 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
580 #[cfg(target_os = "windows")]
581 let update_remote_user_uid = false;
582 let feature_layers: String = self
583 .features
584 .iter()
585 .map(|manifest| {
586 manifest.generate_dockerfile_feature_layer(
587 use_buildkit,
588 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
589 )
590 })
591 .collect();
592
593 let container_home_cmd = get_ent_passwd_shell_command(container_user);
594 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
595
596 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
597
598 let feature_content_source_stage = if use_buildkit {
599 "".to_string()
600 } else {
601 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
602 .to_string()
603 };
604
605 let builtin_env_source_path = if use_buildkit {
606 "./devcontainer-features.builtin.env"
607 } else {
608 "/tmp/build-features/devcontainer-features.builtin.env"
609 };
610
611 let mut extended_dockerfile = format!(
612 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
613
614{dockerfile_content}
615{feature_content_source_stage}
616FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
617USER root
618COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
619RUN chmod -R 0755 /tmp/build-features/
620
621FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
622
623USER root
624
625RUN mkdir -p {dest}
626COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
627
628RUN \
629echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
630echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
631
632{feature_layers}
633
634ARG _DEV_CONTAINERS_IMAGE_USER=root
635USER $_DEV_CONTAINERS_IMAGE_USER
636"#
637 );
638
639 // If we're not adding a uid update layer, then we should add env vars to this layer instead
640 if !update_remote_user_uid {
641 extended_dockerfile = format!(
642 r#"{extended_dockerfile}
643# Ensure that /etc/profile does not clobber the existing path
644RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
645"#
646 );
647
648 for feature in &self.features {
649 let container_env_layer = feature.generate_dockerfile_env();
650 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
651 }
652
653 if let Some(env) = &self.dev_container().container_env {
654 for (key, value) in env {
655 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
656 }
657 }
658 }
659
660 extended_dockerfile
661 }
662
663 fn build_merged_resources(
664 &self,
665 base_image: DockerInspect,
666 ) -> Result<DockerBuildResources, DevContainerError> {
667 let dev_container = match &self.config {
668 ConfigStatus::Deserialized(_) => {
669 log::error!(
670 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
671 );
672 return Err(DevContainerError::DevContainerParseFailed);
673 }
674 ConfigStatus::VariableParsed(dev_container) => dev_container,
675 };
676 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
677
678 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
679
680 mounts.append(&mut feature_mounts);
681
682 let privileged = dev_container.privileged.unwrap_or(false)
683 || self.features.iter().any(|f| f.privileged());
684
685 let mut entrypoint_script_lines = vec![
686 "echo Container started".to_string(),
687 "trap \"exit 0\" 15".to_string(),
688 ];
689
690 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
691 entrypoint_script_lines.push(entrypoint.clone());
692 }
693 entrypoint_script_lines.append(&mut vec![
694 "exec \"$@\"".to_string(),
695 "while sleep 1 & wait $!; do :; done".to_string(),
696 ]);
697
698 Ok(DockerBuildResources {
699 image: base_image,
700 additional_mounts: mounts,
701 privileged,
702 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
703 })
704 }
705
706 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
707 if let ConfigStatus::Deserialized(_) = &self.config {
708 log::error!(
709 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
710 );
711 return Err(DevContainerError::DevContainerParseFailed);
712 }
713 let dev_container = self.dev_container();
714 match dev_container.build_type() {
715 DevContainerBuildType::Image(base_image) => {
716 let built_docker_image = self.build_docker_image().await?;
717
718 let built_docker_image = self
719 .update_remote_user_uid(built_docker_image, &base_image)
720 .await?;
721
722 let resources = self.build_merged_resources(built_docker_image)?;
723 Ok(DevContainerBuildResources::Docker(resources))
724 }
725 DevContainerBuildType::Dockerfile(_) => {
726 let built_docker_image = self.build_docker_image().await?;
727 let Some(features_build_info) = &self.features_build_info else {
728 log::error!(
729 "Can't attempt to build update UID dockerfile before initial docker build"
730 );
731 return Err(DevContainerError::DevContainerParseFailed);
732 };
733 let built_docker_image = self
734 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
735 .await?;
736
737 let resources = self.build_merged_resources(built_docker_image)?;
738 Ok(DevContainerBuildResources::Docker(resources))
739 }
740 DevContainerBuildType::DockerCompose => {
741 log::debug!("Using docker compose. Building extended compose files");
742 let docker_compose_resources = self.build_and_extend_compose_files().await?;
743
744 return Ok(DevContainerBuildResources::DockerCompose(
745 docker_compose_resources,
746 ));
747 }
748 DevContainerBuildType::None => {
749 return Err(DevContainerError::DevContainerParseFailed);
750 }
751 }
752 }
753
754 async fn run_dev_container(
755 &self,
756 build_resources: DevContainerBuildResources,
757 ) -> Result<DevContainerUp, DevContainerError> {
758 let ConfigStatus::VariableParsed(_) = &self.config else {
759 log::error!(
760 "Variables have not been parsed; cannot proceed with running the dev container"
761 );
762 return Err(DevContainerError::DevContainerParseFailed);
763 };
764 let running_container = match build_resources {
765 DevContainerBuildResources::DockerCompose(resources) => {
766 self.run_docker_compose(resources).await?
767 }
768 DevContainerBuildResources::Docker(resources) => {
769 self.run_docker_image(resources).await?
770 }
771 };
772
773 let remote_user = get_remote_user_from_config(&running_container, self)?;
774 let remote_workspace_folder = self.remote_workspace_folder()?;
775
776 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
777
778 Ok(DevContainerUp {
779 container_id: running_container.id,
780 remote_user,
781 remote_workspace_folder: remote_workspace_folder.display().to_string(),
782 extension_ids: self.extension_ids(),
783 remote_env,
784 })
785 }
786
787 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
788 let dev_container = match &self.config {
789 ConfigStatus::Deserialized(_) => {
790 log::error!(
791 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
792 );
793 return Err(DevContainerError::DevContainerParseFailed);
794 }
795 ConfigStatus::VariableParsed(dev_container) => dev_container,
796 };
797 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
798 return Err(DevContainerError::DevContainerParseFailed);
799 };
800 let docker_compose_full_paths = docker_compose_files
801 .iter()
802 .map(|relative| self.config_directory.join(relative))
803 .collect::<Vec<PathBuf>>();
804
805 let Some(config) = self
806 .docker_client
807 .get_docker_compose_config(&docker_compose_full_paths)
808 .await?
809 else {
810 log::error!("Output could not deserialize into DockerComposeConfig");
811 return Err(DevContainerError::DevContainerParseFailed);
812 };
813 Ok(DockerComposeResources {
814 files: docker_compose_full_paths,
815 config,
816 })
817 }
818
819 async fn build_and_extend_compose_files(
820 &self,
821 ) -> Result<DockerComposeResources, DevContainerError> {
822 let dev_container = match &self.config {
823 ConfigStatus::Deserialized(_) => {
824 log::error!(
825 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
826 );
827 return Err(DevContainerError::DevContainerParseFailed);
828 }
829 ConfigStatus::VariableParsed(dev_container) => dev_container,
830 };
831
832 let Some(features_build_info) = &self.features_build_info else {
833 log::error!(
834 "Cannot build and extend compose files: features build info is not yet constructed"
835 );
836 return Err(DevContainerError::DevContainerParseFailed);
837 };
838 let mut docker_compose_resources = self.docker_compose_manifest().await?;
839 let supports_buildkit = self.docker_client.supports_compose_buildkit();
840
841 let (main_service_name, main_service) =
842 find_primary_service(&docker_compose_resources, self)?;
843 let (built_service_image, built_service_image_tag) = if main_service
844 .build
845 .as_ref()
846 .map(|b| b.dockerfile.as_ref())
847 .is_some()
848 {
849 if !supports_buildkit {
850 self.build_feature_content_image().await?;
851 }
852
853 let dockerfile_path = &features_build_info.dockerfile_path;
854
855 let build_args = if !supports_buildkit {
856 HashMap::from([
857 (
858 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
859 "dev_container_auto_added_stage_label".to_string(),
860 ),
861 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
862 ])
863 } else {
864 HashMap::from([
865 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
866 (
867 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
868 "dev_container_auto_added_stage_label".to_string(),
869 ),
870 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
871 ])
872 };
873
874 let additional_contexts = if !supports_buildkit {
875 None
876 } else {
877 Some(HashMap::from([(
878 "dev_containers_feature_content_source".to_string(),
879 features_build_info
880 .features_content_dir
881 .display()
882 .to_string(),
883 )]))
884 };
885
886 let build_override = DockerComposeConfig {
887 name: None,
888 services: HashMap::from([(
889 main_service_name.clone(),
890 DockerComposeService {
891 image: Some(features_build_info.image_tag.clone()),
892 entrypoint: None,
893 cap_add: None,
894 security_opt: None,
895 labels: None,
896 build: Some(DockerComposeServiceBuild {
897 context: Some(
898 main_service
899 .build
900 .as_ref()
901 .and_then(|b| b.context.clone())
902 .unwrap_or_else(|| {
903 features_build_info.empty_context_dir.display().to_string()
904 }),
905 ),
906 dockerfile: Some(dockerfile_path.display().to_string()),
907 target: Some("dev_containers_target_stage".to_string()),
908 args: Some(build_args),
909 additional_contexts,
910 }),
911 volumes: Vec::new(),
912 ..Default::default()
913 },
914 )]),
915 volumes: HashMap::new(),
916 };
917
918 let temp_base = std::env::temp_dir().join("devcontainer-zed");
919 let config_location = temp_base.join("docker_compose_build.json");
920
921 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
922 log::error!("Error serializing docker compose runtime override: {e}");
923 DevContainerError::DevContainerParseFailed
924 })?;
925
926 self.fs
927 .write(&config_location, config_json.as_bytes())
928 .await
929 .map_err(|e| {
930 log::error!("Error writing the runtime override file: {e}");
931 DevContainerError::FilesystemError
932 })?;
933
934 docker_compose_resources.files.push(config_location);
935
936 self.docker_client
937 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
938 .await?;
939 (
940 self.docker_client
941 .inspect(&features_build_info.image_tag)
942 .await?,
943 &features_build_info.image_tag,
944 )
945 } else if let Some(image) = &main_service.image {
946 if dev_container
947 .features
948 .as_ref()
949 .is_none_or(|features| features.is_empty())
950 {
951 (self.docker_client.inspect(image).await?, image)
952 } else {
953 if !supports_buildkit {
954 self.build_feature_content_image().await?;
955 }
956
957 let dockerfile_path = &features_build_info.dockerfile_path;
958
959 let build_args = if !supports_buildkit {
960 HashMap::from([
961 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
962 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
963 ])
964 } else {
965 HashMap::from([
966 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
967 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
968 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
969 ])
970 };
971
972 let additional_contexts = if !supports_buildkit {
973 None
974 } else {
975 Some(HashMap::from([(
976 "dev_containers_feature_content_source".to_string(),
977 features_build_info
978 .features_content_dir
979 .display()
980 .to_string(),
981 )]))
982 };
983
984 let build_override = DockerComposeConfig {
985 name: None,
986 services: HashMap::from([(
987 main_service_name.clone(),
988 DockerComposeService {
989 image: Some(features_build_info.image_tag.clone()),
990 entrypoint: None,
991 cap_add: None,
992 security_opt: None,
993 labels: None,
994 build: Some(DockerComposeServiceBuild {
995 context: Some(
996 features_build_info.empty_context_dir.display().to_string(),
997 ),
998 dockerfile: Some(dockerfile_path.display().to_string()),
999 target: Some("dev_containers_target_stage".to_string()),
1000 args: Some(build_args),
1001 additional_contexts,
1002 }),
1003 volumes: Vec::new(),
1004 ..Default::default()
1005 },
1006 )]),
1007 volumes: HashMap::new(),
1008 };
1009
1010 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1011 let config_location = temp_base.join("docker_compose_build.json");
1012
1013 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1014 log::error!("Error serializing docker compose runtime override: {e}");
1015 DevContainerError::DevContainerParseFailed
1016 })?;
1017
1018 self.fs
1019 .write(&config_location, config_json.as_bytes())
1020 .await
1021 .map_err(|e| {
1022 log::error!("Error writing the runtime override file: {e}");
1023 DevContainerError::FilesystemError
1024 })?;
1025
1026 docker_compose_resources.files.push(config_location);
1027
1028 self.docker_client
1029 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1030 .await?;
1031
1032 (
1033 self.docker_client
1034 .inspect(&features_build_info.image_tag)
1035 .await?,
1036 &features_build_info.image_tag,
1037 )
1038 }
1039 } else {
1040 log::error!("Docker compose must have either image or dockerfile defined");
1041 return Err(DevContainerError::DevContainerParseFailed);
1042 };
1043
1044 let built_service_image = self
1045 .update_remote_user_uid(built_service_image, built_service_image_tag)
1046 .await?;
1047
1048 let resources = self.build_merged_resources(built_service_image)?;
1049
1050 let network_mode = main_service.network_mode.as_ref();
1051 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1052 let runtime_override_file = self
1053 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1054 .await?;
1055
1056 docker_compose_resources.files.push(runtime_override_file);
1057
1058 Ok(docker_compose_resources)
1059 }
1060
1061 async fn write_runtime_override_file(
1062 &self,
1063 main_service_name: &str,
1064 network_mode_service: Option<&str>,
1065 resources: DockerBuildResources,
1066 ) -> Result<PathBuf, DevContainerError> {
1067 let config =
1068 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1069 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1070 let config_location = temp_base.join("docker_compose_runtime.json");
1071
1072 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1073 log::error!("Error serializing docker compose runtime override: {e}");
1074 DevContainerError::DevContainerParseFailed
1075 })?;
1076
1077 self.fs
1078 .write(&config_location, config_json.as_bytes())
1079 .await
1080 .map_err(|e| {
1081 log::error!("Error writing the runtime override file: {e}");
1082 DevContainerError::FilesystemError
1083 })?;
1084
1085 Ok(config_location)
1086 }
1087
1088 fn build_runtime_override(
1089 &self,
1090 main_service_name: &str,
1091 network_mode_service: Option<&str>,
1092 resources: DockerBuildResources,
1093 ) -> Result<DockerComposeConfig, DevContainerError> {
1094 let mut runtime_labels = HashMap::new();
1095
1096 if let Some(metadata) = &resources.image.config.labels.metadata {
1097 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1098 log::error!("Error serializing docker image metadata: {e}");
1099 DevContainerError::ContainerNotValid(resources.image.id.clone())
1100 })?;
1101
1102 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1103 }
1104
1105 for (k, v) in self.identifying_labels() {
1106 runtime_labels.insert(k.to_string(), v.to_string());
1107 }
1108
1109 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1110 .additional_mounts
1111 .iter()
1112 .filter_map(|mount| {
1113 if let Some(mount_type) = &mount.mount_type
1114 && mount_type.to_lowercase() == "volume"
1115 && let Some(source) = &mount.source
1116 {
1117 Some((
1118 source.clone(),
1119 DockerComposeVolume {
1120 name: source.clone(),
1121 },
1122 ))
1123 } else {
1124 None
1125 }
1126 })
1127 .collect();
1128
1129 let volumes: Vec<MountDefinition> = resources
1130 .additional_mounts
1131 .iter()
1132 .map(|v| MountDefinition {
1133 source: v.source.clone(),
1134 target: v.target.clone(),
1135 mount_type: v.mount_type.clone(),
1136 })
1137 .collect();
1138
1139 let mut main_service = DockerComposeService {
1140 entrypoint: Some(vec![
1141 "/bin/sh".to_string(),
1142 "-c".to_string(),
1143 resources.entrypoint_script,
1144 "-".to_string(),
1145 ]),
1146 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1147 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1148 labels: Some(runtime_labels),
1149 volumes,
1150 privileged: Some(resources.privileged),
1151 ..Default::default()
1152 };
1153 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1154 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1155 if let Some(forward_ports) = &self.dev_container().forward_ports {
1156 let main_service_ports: Vec<String> = forward_ports
1157 .iter()
1158 .filter_map(|f| match f {
1159 ForwardPort::Number(port) => Some(port.to_string()),
1160 ForwardPort::String(port) => {
1161 let parts: Vec<&str> = port.split(":").collect();
1162 if parts.len() <= 1 {
1163 Some(port.to_string())
1164 } else if parts.len() == 2 {
1165 if parts[0] == main_service_name {
1166 Some(parts[1].to_string())
1167 } else {
1168 None
1169 }
1170 } else {
1171 None
1172 }
1173 }
1174 })
1175 .collect();
1176 for port in main_service_ports {
1177 // If the main service uses a different service's network bridge, append to that service's ports instead
1178 if let Some(network_service_name) = network_mode_service {
1179 if let Some(service) = service_declarations.get_mut(network_service_name) {
1180 service.ports.push(DockerComposeServicePort {
1181 target: port.clone(),
1182 published: port.clone(),
1183 ..Default::default()
1184 });
1185 } else {
1186 service_declarations.insert(
1187 network_service_name.to_string(),
1188 DockerComposeService {
1189 ports: vec![DockerComposeServicePort {
1190 target: port.clone(),
1191 published: port.clone(),
1192 ..Default::default()
1193 }],
1194 ..Default::default()
1195 },
1196 );
1197 }
1198 } else {
1199 main_service.ports.push(DockerComposeServicePort {
1200 target: port.clone(),
1201 published: port.clone(),
1202 ..Default::default()
1203 });
1204 }
1205 }
1206 let other_service_ports: Vec<(&str, &str)> = forward_ports
1207 .iter()
1208 .filter_map(|f| match f {
1209 ForwardPort::Number(_) => None,
1210 ForwardPort::String(port) => {
1211 let parts: Vec<&str> = port.split(":").collect();
1212 if parts.len() != 2 {
1213 None
1214 } else {
1215 if parts[0] == main_service_name {
1216 None
1217 } else {
1218 Some((parts[0], parts[1]))
1219 }
1220 }
1221 }
1222 })
1223 .collect();
1224 for (service_name, port) in other_service_ports {
1225 if let Some(service) = service_declarations.get_mut(service_name) {
1226 service.ports.push(DockerComposeServicePort {
1227 target: port.to_string(),
1228 published: port.to_string(),
1229 ..Default::default()
1230 });
1231 } else {
1232 service_declarations.insert(
1233 service_name.to_string(),
1234 DockerComposeService {
1235 ports: vec![DockerComposeServicePort {
1236 target: port.to_string(),
1237 published: port.to_string(),
1238 ..Default::default()
1239 }],
1240 ..Default::default()
1241 },
1242 );
1243 }
1244 }
1245 }
1246
1247 service_declarations.insert(main_service_name.to_string(), main_service);
1248 let new_docker_compose_config = DockerComposeConfig {
1249 name: None,
1250 services: service_declarations,
1251 volumes: config_volumes,
1252 };
1253
1254 Ok(new_docker_compose_config)
1255 }
1256
1257 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1258 let dev_container = match &self.config {
1259 ConfigStatus::Deserialized(_) => {
1260 log::error!(
1261 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1262 );
1263 return Err(DevContainerError::DevContainerParseFailed);
1264 }
1265 ConfigStatus::VariableParsed(dev_container) => dev_container,
1266 };
1267
1268 match dev_container.build_type() {
1269 DevContainerBuildType::Image(image_tag) => {
1270 let base_image = self.docker_client.inspect(&image_tag).await?;
1271 if dev_container
1272 .features
1273 .as_ref()
1274 .is_none_or(|features| features.is_empty())
1275 {
1276 log::debug!("No features to add. Using base image");
1277 return Ok(base_image);
1278 }
1279 }
1280 DevContainerBuildType::Dockerfile(_) => {}
1281 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1282 return Err(DevContainerError::DevContainerParseFailed);
1283 }
1284 };
1285
1286 let mut command = self.create_docker_build()?;
1287
1288 let output = self
1289 .command_runner
1290 .run_command(&mut command)
1291 .await
1292 .map_err(|e| {
1293 log::error!("Error building docker image: {e}");
1294 DevContainerError::CommandFailed(command.get_program().display().to_string())
1295 })?;
1296
1297 if !output.status.success() {
1298 let stderr = String::from_utf8_lossy(&output.stderr);
1299 log::error!("docker buildx build failed: {stderr}");
1300 return Err(DevContainerError::CommandFailed(
1301 command.get_program().display().to_string(),
1302 ));
1303 }
1304
1305 // After a successful build, inspect the newly tagged image to get its metadata
1306 let Some(features_build_info) = &self.features_build_info else {
1307 log::error!("Features build info expected, but not created");
1308 return Err(DevContainerError::DevContainerParseFailed);
1309 };
1310 let image = self
1311 .docker_client
1312 .inspect(&features_build_info.image_tag)
1313 .await?;
1314
1315 Ok(image)
1316 }
1317
1318 #[cfg(target_os = "windows")]
1319 async fn update_remote_user_uid(
1320 &self,
1321 image: DockerInspect,
1322 _base_image: &str,
1323 ) -> Result<DockerInspect, DevContainerError> {
1324 Ok(image)
1325 }
1326 #[cfg(not(target_os = "windows"))]
1327 async fn update_remote_user_uid(
1328 &self,
1329 image: DockerInspect,
1330 base_image: &str,
1331 ) -> Result<DockerInspect, DevContainerError> {
1332 let dev_container = self.dev_container();
1333
1334 let Some(features_build_info) = &self.features_build_info else {
1335 return Ok(image);
1336 };
1337
1338 // updateRemoteUserUID defaults to true per the devcontainers spec
1339 if dev_container.update_remote_user_uid == Some(false) {
1340 return Ok(image);
1341 }
1342
1343 let remote_user = get_remote_user_from_config(&image, self)?;
1344 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1345 return Ok(image);
1346 }
1347
1348 let image_user = image
1349 .config
1350 .image_user
1351 .as_deref()
1352 .unwrap_or("root")
1353 .to_string();
1354
1355 let host_uid = Command::new("id")
1356 .arg("-u")
1357 .output()
1358 .await
1359 .map_err(|e| {
1360 log::error!("Failed to get host UID: {e}");
1361 DevContainerError::CommandFailed("id -u".to_string())
1362 })
1363 .and_then(|output| {
1364 String::from_utf8_lossy(&output.stdout)
1365 .trim()
1366 .parse::<u32>()
1367 .map_err(|e| {
1368 log::error!("Failed to parse host UID: {e}");
1369 DevContainerError::CommandFailed("id -u".to_string())
1370 })
1371 })?;
1372
1373 let host_gid = Command::new("id")
1374 .arg("-g")
1375 .output()
1376 .await
1377 .map_err(|e| {
1378 log::error!("Failed to get host GID: {e}");
1379 DevContainerError::CommandFailed("id -g".to_string())
1380 })
1381 .and_then(|output| {
1382 String::from_utf8_lossy(&output.stdout)
1383 .trim()
1384 .parse::<u32>()
1385 .map_err(|e| {
1386 log::error!("Failed to parse host GID: {e}");
1387 DevContainerError::CommandFailed("id -g".to_string())
1388 })
1389 })?;
1390
1391 let dockerfile_content = self.generate_update_uid_dockerfile();
1392
1393 let dockerfile_path = features_build_info
1394 .features_content_dir
1395 .join("updateUID.Dockerfile");
1396 self.fs
1397 .write(&dockerfile_path, dockerfile_content.as_bytes())
1398 .await
1399 .map_err(|e| {
1400 log::error!("Failed to write updateUID Dockerfile: {e}");
1401 DevContainerError::FilesystemError
1402 })?;
1403
1404 let updated_image_tag = features_build_info.image_tag.clone();
1405
1406 let mut command = Command::new(self.docker_client.docker_cli());
1407 command.args(["build"]);
1408 command.args(["-f", &dockerfile_path.display().to_string()]);
1409 command.args(["-t", &updated_image_tag]);
1410 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1411 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1412 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1413 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1414 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1415 command.arg(features_build_info.empty_context_dir.display().to_string());
1416
1417 let output = self
1418 .command_runner
1419 .run_command(&mut command)
1420 .await
1421 .map_err(|e| {
1422 log::error!("Error building UID update image: {e}");
1423 DevContainerError::CommandFailed(command.get_program().display().to_string())
1424 })?;
1425
1426 if !output.status.success() {
1427 let stderr = String::from_utf8_lossy(&output.stderr);
1428 log::error!("UID update build failed: {stderr}");
1429 return Err(DevContainerError::CommandFailed(
1430 command.get_program().display().to_string(),
1431 ));
1432 }
1433
1434 self.docker_client.inspect(&updated_image_tag).await
1435 }
1436
1437 #[cfg(not(target_os = "windows"))]
1438 fn generate_update_uid_dockerfile(&self) -> String {
1439 let mut dockerfile = r#"ARG BASE_IMAGE
1440FROM $BASE_IMAGE
1441
1442USER root
1443
1444ARG REMOTE_USER
1445ARG NEW_UID
1446ARG NEW_GID
1447SHELL ["/bin/sh", "-c"]
1448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1449 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1450 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1451 if [ -z "$OLD_UID" ]; then \
1452 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1453 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1454 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1455 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1456 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1457 else \
1458 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1459 FREE_GID=65532; \
1460 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1461 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1462 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1463 fi; \
1464 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1465 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1466 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1467 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1468 fi; \
1469 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1470 fi;
1471
1472ARG IMAGE_USER
1473USER $IMAGE_USER
1474
1475# Ensure that /etc/profile does not clobber the existing path
1476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1477"#.to_string();
1478 for feature in &self.features {
1479 let container_env_layer = feature.generate_dockerfile_env();
1480 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1481 }
1482
1483 if let Some(env) = &self.dev_container().container_env {
1484 for (key, value) in env {
1485 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1486 }
1487 }
1488 dockerfile
1489 }
1490
1491 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1492 let Some(features_build_info) = &self.features_build_info else {
1493 log::error!("Features build info not available for building feature content image");
1494 return Err(DevContainerError::DevContainerParseFailed);
1495 };
1496 let features_content_dir = &features_build_info.features_content_dir;
1497
1498 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1499 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1500
1501 self.fs
1502 .write(&dockerfile_path, dockerfile_content.as_bytes())
1503 .await
1504 .map_err(|e| {
1505 log::error!("Failed to write feature content Dockerfile: {e}");
1506 DevContainerError::FilesystemError
1507 })?;
1508
1509 let mut command = Command::new(self.docker_client.docker_cli());
1510 command.args([
1511 "build",
1512 "-t",
1513 "dev_container_feature_content_temp",
1514 "-f",
1515 &dockerfile_path.display().to_string(),
1516 &features_content_dir.display().to_string(),
1517 ]);
1518
1519 let output = self
1520 .command_runner
1521 .run_command(&mut command)
1522 .await
1523 .map_err(|e| {
1524 log::error!("Error building feature content image: {e}");
1525 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1526 })?;
1527
1528 if !output.status.success() {
1529 let stderr = String::from_utf8_lossy(&output.stderr);
1530 log::error!("Feature content image build failed: {stderr}");
1531 return Err(DevContainerError::CommandFailed(
1532 self.docker_client.docker_cli(),
1533 ));
1534 }
1535
1536 Ok(())
1537 }
1538
1539 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1540 let dev_container = match &self.config {
1541 ConfigStatus::Deserialized(_) => {
1542 log::error!(
1543 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1544 );
1545 return Err(DevContainerError::DevContainerParseFailed);
1546 }
1547 ConfigStatus::VariableParsed(dev_container) => dev_container,
1548 };
1549
1550 let Some(features_build_info) = &self.features_build_info else {
1551 log::error!(
1552 "Cannot create docker build command; features build info has not been constructed"
1553 );
1554 return Err(DevContainerError::DevContainerParseFailed);
1555 };
1556 let mut command = Command::new(self.docker_client.docker_cli());
1557
1558 command.args(["buildx", "build"]);
1559
1560 // --load is short for --output=docker, loading the built image into the local docker images
1561 command.arg("--load");
1562
1563 // BuildKit build context: provides the features content directory as a named context
1564 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1565 command.args([
1566 "--build-context",
1567 &format!(
1568 "dev_containers_feature_content_source={}",
1569 features_build_info.features_content_dir.display()
1570 ),
1571 ]);
1572
1573 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1574 if let Some(build_image) = &features_build_info.build_image {
1575 command.args([
1576 "--build-arg",
1577 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1578 ]);
1579 } else {
1580 command.args([
1581 "--build-arg",
1582 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1583 ]);
1584 }
1585
1586 command.args([
1587 "--build-arg",
1588 &format!(
1589 "_DEV_CONTAINERS_IMAGE_USER={}",
1590 self.root_image
1591 .as_ref()
1592 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1593 .unwrap_or(&"root".to_string())
1594 ),
1595 ]);
1596
1597 command.args([
1598 "--build-arg",
1599 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1600 ]);
1601
1602 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1603 for (key, value) in args {
1604 command.args(["--build-arg", &format!("{}={}", key, value)]);
1605 }
1606 }
1607
1608 command.args(["--target", "dev_containers_target_stage"]);
1609
1610 command.args([
1611 "-f",
1612 &features_build_info.dockerfile_path.display().to_string(),
1613 ]);
1614
1615 command.args(["-t", &features_build_info.image_tag]);
1616
1617 if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1618 command.arg(self.config_directory.display().to_string());
1619 } else {
1620 // Use an empty folder as the build context to avoid pulling in unneeded files.
1621 // The actual feature content is supplied via the BuildKit build context above.
1622 command.arg(features_build_info.empty_context_dir.display().to_string());
1623 }
1624
1625 Ok(command)
1626 }
1627
1628 async fn run_docker_compose(
1629 &self,
1630 resources: DockerComposeResources,
1631 ) -> Result<DockerInspect, DevContainerError> {
1632 let mut command = Command::new(self.docker_client.docker_cli());
1633 command.args(&["compose", "--project-name", &self.project_name()]);
1634 for docker_compose_file in resources.files {
1635 command.args(&["-f", &docker_compose_file.display().to_string()]);
1636 }
1637 command.args(&["up", "-d"]);
1638
1639 let output = self
1640 .command_runner
1641 .run_command(&mut command)
1642 .await
1643 .map_err(|e| {
1644 log::error!("Error running docker compose up: {e}");
1645 DevContainerError::CommandFailed(command.get_program().display().to_string())
1646 })?;
1647
1648 if !output.status.success() {
1649 let stderr = String::from_utf8_lossy(&output.stderr);
1650 log::error!("Non-success status from docker compose up: {}", stderr);
1651 return Err(DevContainerError::CommandFailed(
1652 command.get_program().display().to_string(),
1653 ));
1654 }
1655
1656 if let Some(docker_ps) = self.check_for_existing_container().await? {
1657 log::debug!("Found newly created dev container");
1658 return self.docker_client.inspect(&docker_ps.id).await;
1659 }
1660
1661 log::error!("Could not find existing container after docker compose up");
1662
1663 Err(DevContainerError::DevContainerParseFailed)
1664 }
1665
1666 async fn run_docker_image(
1667 &self,
1668 build_resources: DockerBuildResources,
1669 ) -> Result<DockerInspect, DevContainerError> {
1670 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1671
1672 let output = self
1673 .command_runner
1674 .run_command(&mut docker_run_command)
1675 .await
1676 .map_err(|e| {
1677 log::error!("Error running docker run: {e}");
1678 DevContainerError::CommandFailed(
1679 docker_run_command.get_program().display().to_string(),
1680 )
1681 })?;
1682
1683 if !output.status.success() {
1684 let std_err = String::from_utf8_lossy(&output.stderr);
1685 log::error!("Non-success status from docker run. StdErr: {std_err}");
1686 return Err(DevContainerError::CommandFailed(
1687 docker_run_command.get_program().display().to_string(),
1688 ));
1689 }
1690
1691 log::debug!("Checking for container that was started");
1692 let Some(docker_ps) = self.check_for_existing_container().await? else {
1693 log::error!("Could not locate container just created");
1694 return Err(DevContainerError::DevContainerParseFailed);
1695 };
1696 self.docker_client.inspect(&docker_ps.id).await
1697 }
1698
1699 fn local_workspace_folder(&self) -> String {
1700 self.local_project_directory.display().to_string()
1701 }
1702 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1703 self.local_project_directory
1704 .file_name()
1705 .map(|f| f.display().to_string())
1706 .ok_or(DevContainerError::DevContainerParseFailed)
1707 }
1708
1709 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1710 self.dev_container()
1711 .workspace_folder
1712 .as_ref()
1713 .map(|folder| PathBuf::from(folder))
1714 .or(Some(
1715 // We explicitly use "/" here, instead of PathBuf::join
1716 // because we want remote targets to use unix-style filepaths,
1717 // even on a Windows host
1718 PathBuf::from(format!(
1719 "{}/{}",
1720 DEFAULT_REMOTE_PROJECT_DIR,
1721 self.local_workspace_base_name()?
1722 )),
1723 ))
1724 .ok_or(DevContainerError::DevContainerParseFailed)
1725 }
1726 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1727 self.remote_workspace_folder().and_then(|f| {
1728 f.file_name()
1729 .map(|file_name| file_name.display().to_string())
1730 .ok_or(DevContainerError::DevContainerParseFailed)
1731 })
1732 }
1733
1734 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1735 if let Some(mount) = &self.dev_container().workspace_mount {
1736 return Ok(mount.clone());
1737 }
1738 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1739 return Err(DevContainerError::DevContainerParseFailed);
1740 };
1741
1742 Ok(MountDefinition {
1743 source: Some(self.local_workspace_folder()),
1744 // We explicitly use "/" here, instead of PathBuf::join
1745 // because we want the remote target to use unix-style filepaths,
1746 // even on a Windows host
1747 target: format!(
1748 "{}/{}",
1749 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1750 project_directory_name.display()
1751 ),
1752 mount_type: None,
1753 })
1754 }
1755
1756 fn create_docker_run_command(
1757 &self,
1758 build_resources: DockerBuildResources,
1759 ) -> Result<Command, DevContainerError> {
1760 let remote_workspace_mount = self.remote_workspace_mount()?;
1761
1762 let docker_cli = self.docker_client.docker_cli();
1763 let mut command = Command::new(&docker_cli);
1764
1765 command.arg("run");
1766
1767 if build_resources.privileged {
1768 command.arg("--privileged");
1769 }
1770
1771 let run_args = match &self.dev_container().run_args {
1772 Some(run_args) => run_args,
1773 None => &Vec::new(),
1774 };
1775
1776 for arg in run_args {
1777 command.arg(arg);
1778 }
1779
1780 let run_if_missing = {
1781 |arg_name: &str, arg: &str, command: &mut Command| {
1782 if !run_args
1783 .iter()
1784 .any(|arg| arg.strip_prefix(arg_name).is_some())
1785 {
1786 command.arg(arg);
1787 }
1788 }
1789 };
1790
1791 if &docker_cli == "podman" {
1792 run_if_missing(
1793 "--security-opt",
1794 "--security-opt=label=disable",
1795 &mut command,
1796 );
1797 run_if_missing("--userns", "--userns=keep-id", &mut command);
1798 }
1799
1800 run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1801 command.arg("-d");
1802 command.arg("--mount");
1803 command.arg(remote_workspace_mount.to_string());
1804
1805 for mount in &build_resources.additional_mounts {
1806 command.arg("--mount");
1807 command.arg(mount.to_string());
1808 }
1809
1810 for (key, val) in self.identifying_labels() {
1811 command.arg("-l");
1812 command.arg(format!("{}={}", key, val));
1813 }
1814
1815 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1816 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1817 log::error!("Problem serializing image metadata: {e}");
1818 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1819 })?;
1820 command.arg("-l");
1821 command.arg(format!(
1822 "{}={}",
1823 "devcontainer.metadata", serialized_metadata
1824 ));
1825 }
1826
1827 if let Some(forward_ports) = &self.dev_container().forward_ports {
1828 for port in forward_ports {
1829 if let ForwardPort::Number(port_number) = port {
1830 command.arg("-p");
1831 command.arg(format!("{port_number}:{port_number}"));
1832 }
1833 }
1834 }
1835 for app_port in &self.dev_container().app_port {
1836 command.arg("-p");
1837 command.arg(app_port);
1838 }
1839
1840 command.arg("--entrypoint");
1841 command.arg("/bin/sh");
1842 command.arg(&build_resources.image.id);
1843 command.arg("-c");
1844
1845 command.arg(build_resources.entrypoint_script);
1846 command.arg("-");
1847
1848 Ok(command)
1849 }
1850
1851 fn extension_ids(&self) -> Vec<String> {
1852 self.dev_container()
1853 .customizations
1854 .as_ref()
1855 .map(|c| c.zed.extensions.clone())
1856 .unwrap_or_default()
1857 }
1858
1859 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1860 self.dev_container().validate_devcontainer_contents()?;
1861
1862 self.run_initialize_commands().await?;
1863
1864 self.download_feature_and_dockerfile_resources().await?;
1865
1866 let build_resources = self.build_resources().await?;
1867
1868 let devcontainer_up = self.run_dev_container(build_resources).await?;
1869
1870 self.run_remote_scripts(&devcontainer_up, true).await?;
1871
1872 Ok(devcontainer_up)
1873 }
1874
1875 async fn run_remote_scripts(
1876 &self,
1877 devcontainer_up: &DevContainerUp,
1878 new_container: bool,
1879 ) -> Result<(), DevContainerError> {
1880 let ConfigStatus::VariableParsed(config) = &self.config else {
1881 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1882 return Err(DevContainerError::DevContainerScriptsFailed);
1883 };
1884 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1885
1886 if new_container {
1887 if let Some(on_create_command) = &config.on_create_command {
1888 for (command_name, command) in on_create_command.script_commands() {
1889 log::debug!("Running on create command {command_name}");
1890 self.docker_client
1891 .run_docker_exec(
1892 &devcontainer_up.container_id,
1893 &remote_folder,
1894 &devcontainer_up.remote_user,
1895 &devcontainer_up.remote_env,
1896 command,
1897 )
1898 .await?;
1899 }
1900 }
1901 if let Some(update_content_command) = &config.update_content_command {
1902 for (command_name, command) in update_content_command.script_commands() {
1903 log::debug!("Running update content command {command_name}");
1904 self.docker_client
1905 .run_docker_exec(
1906 &devcontainer_up.container_id,
1907 &remote_folder,
1908 &devcontainer_up.remote_user,
1909 &devcontainer_up.remote_env,
1910 command,
1911 )
1912 .await?;
1913 }
1914 }
1915
1916 if let Some(post_create_command) = &config.post_create_command {
1917 for (command_name, command) in post_create_command.script_commands() {
1918 log::debug!("Running post create command {command_name}");
1919 self.docker_client
1920 .run_docker_exec(
1921 &devcontainer_up.container_id,
1922 &remote_folder,
1923 &devcontainer_up.remote_user,
1924 &devcontainer_up.remote_env,
1925 command,
1926 )
1927 .await?;
1928 }
1929 }
1930 if let Some(post_start_command) = &config.post_start_command {
1931 for (command_name, command) in post_start_command.script_commands() {
1932 log::debug!("Running post start command {command_name}");
1933 self.docker_client
1934 .run_docker_exec(
1935 &devcontainer_up.container_id,
1936 &remote_folder,
1937 &devcontainer_up.remote_user,
1938 &devcontainer_up.remote_env,
1939 command,
1940 )
1941 .await?;
1942 }
1943 }
1944 }
1945 if let Some(post_attach_command) = &config.post_attach_command {
1946 for (command_name, command) in post_attach_command.script_commands() {
1947 log::debug!("Running post attach command {command_name}");
1948 self.docker_client
1949 .run_docker_exec(
1950 &devcontainer_up.container_id,
1951 &remote_folder,
1952 &devcontainer_up.remote_user,
1953 &devcontainer_up.remote_env,
1954 command,
1955 )
1956 .await?;
1957 }
1958 }
1959
1960 Ok(())
1961 }
1962
1963 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1964 let ConfigStatus::VariableParsed(config) = &self.config else {
1965 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1966 return Err(DevContainerError::DevContainerParseFailed);
1967 };
1968
1969 if let Some(initialize_command) = &config.initialize_command {
1970 log::debug!("Running initialize command");
1971 initialize_command
1972 .run(&self.command_runner, &self.local_project_directory)
1973 .await
1974 } else {
1975 log::warn!("No initialize command found");
1976 Ok(())
1977 }
1978 }
1979
1980 async fn check_for_existing_devcontainer(
1981 &self,
1982 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1983 if let Some(docker_ps) = self.check_for_existing_container().await? {
1984 log::debug!("Dev container already found. Proceeding with it");
1985
1986 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1987
1988 if !docker_inspect.is_running() {
1989 log::debug!("Container not running. Will attempt to start, and then proceed");
1990 self.docker_client.start_container(&docker_ps.id).await?;
1991 }
1992
1993 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1994
1995 let remote_folder = self.remote_workspace_folder()?;
1996
1997 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1998
1999 let dev_container_up = DevContainerUp {
2000 container_id: docker_ps.id,
2001 remote_user: remote_user,
2002 remote_workspace_folder: remote_folder.display().to_string(),
2003 extension_ids: self.extension_ids(),
2004 remote_env,
2005 };
2006
2007 self.run_remote_scripts(&dev_container_up, false).await?;
2008
2009 Ok(Some(dev_container_up))
2010 } else {
2011 log::debug!("Existing container not found.");
2012
2013 Ok(None)
2014 }
2015 }
2016
2017 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2018 self.docker_client
2019 .find_process_by_filters(
2020 self.identifying_labels()
2021 .iter()
2022 .map(|(k, v)| format!("label={k}={v}"))
2023 .collect(),
2024 )
2025 .await
2026 }
2027
2028 fn project_name(&self) -> String {
2029 if let Some(name) = &self.dev_container().name {
2030 safe_id_lower(name)
2031 } else {
2032 let alternate_name = &self
2033 .local_workspace_base_name()
2034 .unwrap_or(self.local_workspace_folder());
2035 safe_id_lower(alternate_name)
2036 }
2037 }
2038
2039 async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2040 let Some(dockerfile_path) = self.dockerfile_location().await else {
2041 log::error!("Tried to expand dockerfile for an image-type config");
2042 return Err(DevContainerError::DevContainerParseFailed);
2043 };
2044
2045 let devcontainer_args = self
2046 .dev_container()
2047 .build
2048 .as_ref()
2049 .and_then(|b| b.args.clone())
2050 .unwrap_or_default();
2051 let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2052 log::error!("Failed to load Dockerfile: {e}");
2053 DevContainerError::FilesystemError
2054 })?;
2055 let mut parsed_lines: Vec<String> = Vec::new();
2056 let mut inline_args: Vec<(String, String)> = Vec::new();
2057 let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2058
2059 for line in contents.lines() {
2060 let mut parsed_line = line.to_string();
2061 // Replace from devcontainer args first, since they take precedence
2062 for (key, value) in &devcontainer_args {
2063 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2064 }
2065 for (key, value) in &inline_args {
2066 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2067 }
2068 if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2069 let trimmed = arg_directives.trim();
2070 let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2071 for (i, captures) in key_matches.iter().enumerate() {
2072 let key = captures[1].to_string();
2073 // Insert the devcontainer overrides here if needed
2074 let value_start = captures.get(0).expect("full match").end();
2075 let value_end = if i + 1 < key_matches.len() {
2076 key_matches[i + 1].get(0).expect("full match").start()
2077 } else {
2078 trimmed.len()
2079 };
2080 let raw_value = trimmed[value_start..value_end].trim();
2081 let value = if raw_value.starts_with('"')
2082 && raw_value.ends_with('"')
2083 && raw_value.len() > 1
2084 {
2085 &raw_value[1..raw_value.len() - 1]
2086 } else {
2087 raw_value
2088 };
2089 inline_args.push((key, value.to_string()));
2090 }
2091 }
2092 parsed_lines.push(parsed_line);
2093 }
2094
2095 Ok(parsed_lines.join("\n"))
2096 }
2097}
2098
2099/// Holds all the information needed to construct a `docker buildx build` command
2100/// that extends a base image with dev container features.
2101///
2102/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2103/// (cli/src/spec-node/containerFeatures.ts).
2104#[derive(Debug, Eq, PartialEq)]
2105pub(crate) struct FeaturesBuildInfo {
2106 /// Path to the generated Dockerfile.extended
2107 pub dockerfile_path: PathBuf,
2108 /// Path to the features content directory (used as a BuildKit build context)
2109 pub features_content_dir: PathBuf,
2110 /// Path to an empty directory used as the Docker build context
2111 pub empty_context_dir: PathBuf,
2112 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2113 pub build_image: Option<String>,
2114 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2115 pub image_tag: String,
2116}
2117
2118pub(crate) async fn read_devcontainer_configuration(
2119 config: DevContainerConfig,
2120 context: &DevContainerContext,
2121 environment: HashMap<String, String>,
2122) -> Result<DevContainer, DevContainerError> {
2123 let docker = if context.use_podman {
2124 Docker::new("podman").await
2125 } else {
2126 Docker::new("docker").await
2127 };
2128 let mut dev_container = DevContainerManifest::new(
2129 context,
2130 environment,
2131 Arc::new(docker),
2132 Arc::new(DefaultCommandRunner::new()),
2133 config,
2134 &context.project_directory.as_ref(),
2135 )
2136 .await?;
2137 dev_container.parse_nonremote_vars()?;
2138 Ok(dev_container.dev_container().clone())
2139}
2140
2141pub(crate) async fn spawn_dev_container(
2142 context: &DevContainerContext,
2143 environment: HashMap<String, String>,
2144 config: DevContainerConfig,
2145 local_project_path: &Path,
2146) -> Result<DevContainerUp, DevContainerError> {
2147 let docker = if context.use_podman {
2148 Docker::new("podman").await
2149 } else {
2150 Docker::new("docker").await
2151 };
2152 let mut devcontainer_manifest = DevContainerManifest::new(
2153 context,
2154 environment,
2155 Arc::new(docker),
2156 Arc::new(DefaultCommandRunner::new()),
2157 config,
2158 local_project_path,
2159 )
2160 .await?;
2161
2162 devcontainer_manifest.parse_nonremote_vars()?;
2163
2164 log::debug!("Checking for existing container");
2165 if let Some(devcontainer) = devcontainer_manifest
2166 .check_for_existing_devcontainer()
2167 .await?
2168 {
2169 Ok(devcontainer)
2170 } else {
2171 log::debug!("Existing container not found. Building");
2172
2173 devcontainer_manifest.build_and_run().await
2174 }
2175}
2176
2177#[derive(Debug)]
2178struct DockerBuildResources {
2179 image: DockerInspect,
2180 additional_mounts: Vec<MountDefinition>,
2181 privileged: bool,
2182 entrypoint_script: String,
2183}
2184
2185#[derive(Debug)]
2186enum DevContainerBuildResources {
2187 DockerCompose(DockerComposeResources),
2188 Docker(DockerBuildResources),
2189}
2190
2191fn find_primary_service(
2192 docker_compose: &DockerComposeResources,
2193 devcontainer: &DevContainerManifest,
2194) -> Result<(String, DockerComposeService), DevContainerError> {
2195 let Some(service_name) = &devcontainer.dev_container().service else {
2196 return Err(DevContainerError::DevContainerParseFailed);
2197 };
2198
2199 match docker_compose.config.services.get(service_name) {
2200 Some(service) => Ok((service_name.clone(), service.clone())),
2201 None => Err(DevContainerError::DevContainerParseFailed),
2202 }
2203}
2204
2205/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2206/// `dockerfile` is relative to the build `context`, and `context` is relative to
2207/// the compose file's directory.
2208fn resolve_compose_dockerfile(
2209 compose_file: &Path,
2210 context: Option<&str>,
2211 dockerfile: &str,
2212) -> Option<PathBuf> {
2213 let dockerfile = PathBuf::from(dockerfile);
2214 if dockerfile.is_absolute() {
2215 return Some(dockerfile);
2216 }
2217 let compose_dir = compose_file.parent()?;
2218 let context_dir = match context {
2219 Some(ctx) => {
2220 let ctx = PathBuf::from(ctx);
2221 if ctx.is_absolute() {
2222 ctx
2223 } else {
2224 normalize_path(&compose_dir.join(ctx))
2225 }
2226 }
2227 None => compose_dir.to_path_buf(),
2228 };
2229 Some(context_dir.join(dockerfile))
2230}
2231
2232/// Destination folder inside the container where feature content is staged during build.
2233/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2234const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2235
2236/// Escapes regex special characters in a string.
2237fn escape_regex_chars(input: &str) -> String {
2238 let mut result = String::with_capacity(input.len() * 2);
2239 for c in input.chars() {
2240 if ".*+?^${}()|[]\\".contains(c) {
2241 result.push('\\');
2242 }
2243 result.push(c);
2244 }
2245 result
2246}
2247
2248/// Extracts the short feature ID from a full feature reference string.
2249///
2250/// Examples:
2251/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2252/// - `ghcr.io/user/repo/go` → `go`
2253/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2254/// - `./myFeature` → `myFeature`
2255fn extract_feature_id(feature_ref: &str) -> &str {
2256 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2257 &feature_ref[..at_idx]
2258 } else {
2259 let last_slash = feature_ref.rfind('/');
2260 let last_colon = feature_ref.rfind(':');
2261 match (last_slash, last_colon) {
2262 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2263 _ => feature_ref,
2264 }
2265 };
2266 match without_version.rfind('/') {
2267 Some(idx) => &without_version[idx + 1..],
2268 None => without_version,
2269 }
2270}
2271
2272/// Generates a shell command that looks up a user's passwd entry.
2273///
2274/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2275/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2276fn get_ent_passwd_shell_command(user: &str) -> String {
2277 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2278 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2279 format!(
2280 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2281 shell = escaped_for_shell,
2282 re = escaped_for_regex,
2283 )
2284}
2285
2286/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2287///
2288/// Features listed in the override come first (in the specified order), followed
2289/// by any remaining features sorted lexicographically by their full reference ID.
2290fn resolve_feature_order<'a>(
2291 features: &'a HashMap<String, FeatureOptions>,
2292 override_order: &Option<Vec<String>>,
2293) -> Vec<(&'a String, &'a FeatureOptions)> {
2294 if let Some(order) = override_order {
2295 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2296 for ordered_id in order {
2297 if let Some((key, options)) = features.get_key_value(ordered_id) {
2298 ordered.push((key, options));
2299 }
2300 }
2301 let mut remaining: Vec<_> = features
2302 .iter()
2303 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2304 .collect();
2305 remaining.sort_by_key(|(id, _)| id.as_str());
2306 ordered.extend(remaining);
2307 ordered
2308 } else {
2309 let mut entries: Vec<_> = features.iter().collect();
2310 entries.sort_by_key(|(id, _)| id.as_str());
2311 entries
2312 }
2313}
2314
2315/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2316///
2317/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2318/// `containerFeaturesConfiguration.ts`.
2319fn generate_install_wrapper(
2320 feature_ref: &str,
2321 feature_id: &str,
2322 env_variables: &str,
2323) -> Result<String, DevContainerError> {
2324 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2325 log::error!("Error escaping feature ref {feature_ref}: {e}");
2326 DevContainerError::DevContainerParseFailed
2327 })?;
2328 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2329 log::error!("Error escaping feature {feature_id}: {e}");
2330 DevContainerError::DevContainerParseFailed
2331 })?;
2332 let options_indented: String = env_variables
2333 .lines()
2334 .filter(|l| !l.is_empty())
2335 .map(|l| format!(" {}", l))
2336 .collect::<Vec<_>>()
2337 .join("\n");
2338 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2339 log::error!("Error escaping options {options_indented}: {e}");
2340 DevContainerError::DevContainerParseFailed
2341 })?;
2342
2343 let script = format!(
2344 r#"#!/bin/sh
2345set -e
2346
2347on_exit () {{
2348 [ $? -eq 0 ] && exit
2349 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2350}}
2351
2352trap on_exit EXIT
2353
2354echo ===========================================================================
2355echo 'Feature : {escaped_name}'
2356echo 'Id : {escaped_id}'
2357echo 'Options :'
2358echo {escaped_options}
2359echo ===========================================================================
2360
2361set -a
2362. ../devcontainer-features.builtin.env
2363. ./devcontainer-features.env
2364set +a
2365
2366chmod +x ./install.sh
2367./install.sh
2368"#
2369 );
2370
2371 Ok(script)
2372}
2373
2374fn dockerfile_inject_alias(
2375 dockerfile_content: &str,
2376 alias: &str,
2377 build_target: Option<String>,
2378) -> String {
2379 match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2380 Some(target) => format!(
2381 r#"{dockerfile_content}
2382FROM {target} AS {alias}"#
2383 ),
2384 None => dockerfile_content.to_string(),
2385 }
2386}
2387
2388fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2389 dockerfile_contents
2390 .lines()
2391 .filter(|line| line.starts_with("FROM"))
2392 .rfind(|from_line| match &target {
2393 Some(target) => {
2394 let parts = from_line.split(' ').collect::<Vec<&str>>();
2395 if parts.len() >= 3
2396 && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2397 {
2398 parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2399 } else {
2400 false
2401 }
2402 }
2403 None => true,
2404 })
2405 .and_then(|from_line| {
2406 from_line
2407 .split(' ')
2408 .collect::<Vec<&str>>()
2409 .get(1)
2410 .map(|s| s.to_string())
2411 })
2412}
2413
2414fn get_remote_user_from_config(
2415 docker_config: &DockerInspect,
2416 devcontainer: &DevContainerManifest,
2417) -> Result<String, DevContainerError> {
2418 if let DevContainer {
2419 remote_user: Some(user),
2420 ..
2421 } = &devcontainer.dev_container()
2422 {
2423 return Ok(user.clone());
2424 }
2425 if let Some(metadata) = &docker_config.config.labels.metadata {
2426 for metadatum in metadata {
2427 if let Some(remote_user) = metadatum.get("remoteUser") {
2428 if let Some(remote_user_str) = remote_user.as_str() {
2429 return Ok(remote_user_str.to_string());
2430 }
2431 }
2432 }
2433 }
2434 if let Some(image_user) = &docker_config.config.image_user {
2435 if !image_user.is_empty() {
2436 return Ok(image_user.to_string());
2437 }
2438 }
2439 Ok("root".to_string())
2440}
2441
2442// This should come from spec - see the docs
2443fn get_container_user_from_config(
2444 docker_config: &DockerInspect,
2445 devcontainer: &DevContainerManifest,
2446) -> Result<String, DevContainerError> {
2447 if let Some(user) = &devcontainer.dev_container().container_user {
2448 return Ok(user.to_string());
2449 }
2450 if let Some(metadata) = &docker_config.config.labels.metadata {
2451 for metadatum in metadata {
2452 if let Some(container_user) = metadatum.get("containerUser") {
2453 if let Some(container_user_str) = container_user.as_str() {
2454 return Ok(container_user_str.to_string());
2455 }
2456 }
2457 }
2458 }
2459 if let Some(image_user) = &docker_config.config.image_user {
2460 return Ok(image_user.to_string());
2461 }
2462
2463 Ok("root".to_string())
2464}
2465
2466#[cfg(test)]
2467mod test {
2468 use std::{
2469 collections::HashMap,
2470 ffi::OsStr,
2471 path::{Path, PathBuf},
2472 process::{ExitStatus, Output},
2473 sync::{Arc, Mutex},
2474 };
2475
2476 use async_trait::async_trait;
2477 use fs::{FakeFs, Fs};
2478 use gpui::{AppContext, TestAppContext};
2479 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2480 use project::{
2481 ProjectEnvironment,
2482 worktree_store::{WorktreeIdCounter, WorktreeStore},
2483 };
2484 use serde_json_lenient::Value;
2485 use util::{command::Command, paths::SanitizedPath};
2486
2487 #[cfg(not(target_os = "windows"))]
2488 use crate::docker::DockerComposeServicePort;
2489 use crate::{
2490 DevContainerConfig, DevContainerContext,
2491 command_json::CommandRunner,
2492 devcontainer_api::DevContainerError,
2493 devcontainer_json::MountDefinition,
2494 devcontainer_manifest::{
2495 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2496 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2497 image_from_dockerfile, resolve_compose_dockerfile,
2498 },
2499 docker::{
2500 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2501 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2502 DockerPs,
2503 },
2504 oci::TokenResponse,
2505 };
2506 #[cfg(not(target_os = "windows"))]
2507 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2508 #[cfg(target_os = "windows")]
2509 const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2510
2511 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2512 let buffer = futures::io::Cursor::new(Vec::new());
2513 let mut builder = async_tar::Builder::new(buffer);
2514 for (file_name, content) in content {
2515 if content.is_empty() {
2516 let mut header = async_tar::Header::new_gnu();
2517 header.set_size(0);
2518 header.set_mode(0o755);
2519 header.set_entry_type(async_tar::EntryType::Directory);
2520 header.set_cksum();
2521 builder
2522 .append_data(&mut header, file_name, &[] as &[u8])
2523 .await
2524 .unwrap();
2525 } else {
2526 let data = content.as_bytes();
2527 let mut header = async_tar::Header::new_gnu();
2528 header.set_size(data.len() as u64);
2529 header.set_mode(0o755);
2530 header.set_entry_type(async_tar::EntryType::Regular);
2531 header.set_cksum();
2532 builder
2533 .append_data(&mut header, file_name, data)
2534 .await
2535 .unwrap();
2536 }
2537 }
2538 let buffer = builder.into_inner().await.unwrap();
2539 buffer.into_inner()
2540 }
2541
2542 fn test_project_filename() -> String {
2543 PathBuf::from(TEST_PROJECT_PATH)
2544 .file_name()
2545 .expect("is valid")
2546 .display()
2547 .to_string()
2548 }
2549
2550 async fn init_devcontainer_config(
2551 fs: &Arc<FakeFs>,
2552 devcontainer_contents: &str,
2553 ) -> DevContainerConfig {
2554 fs.insert_tree(
2555 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2556 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2557 )
2558 .await;
2559
2560 DevContainerConfig::default_config()
2561 }
2562
2563 struct TestDependencies {
2564 fs: Arc<FakeFs>,
2565 _http_client: Arc<dyn HttpClient>,
2566 docker: Arc<FakeDocker>,
2567 command_runner: Arc<TestCommandRunner>,
2568 }
2569
2570 async fn init_default_devcontainer_manifest(
2571 cx: &mut TestAppContext,
2572 devcontainer_contents: &str,
2573 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2574 let fs = FakeFs::new(cx.executor());
2575 let http_client = fake_http_client();
2576 let command_runner = Arc::new(TestCommandRunner::new());
2577 let docker = Arc::new(FakeDocker::new());
2578 let environment = HashMap::new();
2579
2580 init_devcontainer_manifest(
2581 cx,
2582 fs,
2583 http_client,
2584 docker,
2585 command_runner,
2586 environment,
2587 devcontainer_contents,
2588 )
2589 .await
2590 }
2591
2592 async fn init_devcontainer_manifest(
2593 cx: &mut TestAppContext,
2594 fs: Arc<FakeFs>,
2595 http_client: Arc<dyn HttpClient>,
2596 docker_client: Arc<FakeDocker>,
2597 command_runner: Arc<TestCommandRunner>,
2598 environment: HashMap<String, String>,
2599 devcontainer_contents: &str,
2600 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2601 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2602 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2603 let worktree_store =
2604 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2605 let project_environment =
2606 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2607
2608 let context = DevContainerContext {
2609 project_directory: SanitizedPath::cast_arc(project_path),
2610 use_podman: false,
2611 fs: fs.clone(),
2612 http_client: http_client.clone(),
2613 environment: project_environment.downgrade(),
2614 };
2615
2616 let test_dependencies = TestDependencies {
2617 fs: fs.clone(),
2618 _http_client: http_client.clone(),
2619 docker: docker_client.clone(),
2620 command_runner: command_runner.clone(),
2621 };
2622 let manifest = DevContainerManifest::new(
2623 &context,
2624 environment,
2625 docker_client,
2626 command_runner,
2627 local_config,
2628 &PathBuf::from(TEST_PROJECT_PATH),
2629 )
2630 .await?;
2631
2632 Ok((test_dependencies, manifest))
2633 }
2634
2635 #[gpui::test]
2636 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2637 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2638 cx,
2639 r#"
2640// These are some external comments. serde_lenient should handle them
2641{
2642 // These are some internal comments
2643 "image": "image",
2644 "remoteUser": "root",
2645}
2646 "#,
2647 )
2648 .await
2649 .unwrap();
2650
2651 let mut metadata = HashMap::new();
2652 metadata.insert(
2653 "remoteUser".to_string(),
2654 serde_json_lenient::Value::String("vsCode".to_string()),
2655 );
2656 let given_docker_config = DockerInspect {
2657 id: "docker_id".to_string(),
2658 config: DockerInspectConfig {
2659 labels: DockerConfigLabels {
2660 metadata: Some(vec![metadata]),
2661 },
2662 image_user: None,
2663 env: Vec::new(),
2664 },
2665 mounts: None,
2666 state: None,
2667 };
2668
2669 let remote_user =
2670 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2671
2672 assert_eq!(remote_user, "root".to_string())
2673 }
2674
2675 #[gpui::test]
2676 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2677 let (_, devcontainer_manifest) =
2678 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2679 let mut metadata = HashMap::new();
2680 metadata.insert(
2681 "remoteUser".to_string(),
2682 serde_json_lenient::Value::String("vsCode".to_string()),
2683 );
2684 let given_docker_config = DockerInspect {
2685 id: "docker_id".to_string(),
2686 config: DockerInspectConfig {
2687 labels: DockerConfigLabels {
2688 metadata: Some(vec![metadata]),
2689 },
2690 image_user: None,
2691 env: Vec::new(),
2692 },
2693 mounts: None,
2694 state: None,
2695 };
2696
2697 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2698
2699 assert!(remote_user.is_ok());
2700 let remote_user = remote_user.expect("ok");
2701 assert_eq!(&remote_user, "vsCode")
2702 }
2703
2704 #[test]
2705 fn should_extract_feature_id_from_references() {
2706 assert_eq!(
2707 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2708 "aws-cli"
2709 );
2710 assert_eq!(
2711 extract_feature_id("ghcr.io/devcontainers/features/go"),
2712 "go"
2713 );
2714 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2715 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2716 assert_eq!(
2717 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2718 "rust"
2719 );
2720 }
2721
2722 #[gpui::test]
2723 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2724 let mut metadata = HashMap::new();
2725 metadata.insert(
2726 "remoteUser".to_string(),
2727 serde_json_lenient::Value::String("vsCode".to_string()),
2728 );
2729
2730 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2731 cx,
2732 r#"{
2733 "name": "TODO"
2734 }"#,
2735 )
2736 .await
2737 .unwrap();
2738 let build_resources = DockerBuildResources {
2739 image: DockerInspect {
2740 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2741 config: DockerInspectConfig {
2742 labels: DockerConfigLabels { metadata: None },
2743 image_user: None,
2744 env: Vec::new(),
2745 },
2746 mounts: None,
2747 state: None,
2748 },
2749 additional_mounts: vec![],
2750 privileged: false,
2751 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2752 };
2753 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2754
2755 assert!(docker_run_command.is_ok());
2756 let docker_run_command = docker_run_command.expect("ok");
2757
2758 assert_eq!(docker_run_command.get_program(), "docker");
2759 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2760 .join(".devcontainer")
2761 .join("devcontainer.json");
2762 let expected_config_file_label = expected_config_file_label.display();
2763 assert_eq!(
2764 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2765 vec![
2766 OsStr::new("run"),
2767 OsStr::new("--sig-proxy=false"),
2768 OsStr::new("-d"),
2769 OsStr::new("--mount"),
2770 OsStr::new(&format!(
2771 "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
2772 )),
2773 OsStr::new("-l"),
2774 OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
2775 OsStr::new("-l"),
2776 OsStr::new(&format!(
2777 "devcontainer.config_file={expected_config_file_label}"
2778 )),
2779 OsStr::new("--entrypoint"),
2780 OsStr::new("/bin/sh"),
2781 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2782 OsStr::new("-c"),
2783 OsStr::new(
2784 "
2785 echo Container started
2786 trap \"exit 0\" 15
2787 exec \"$@\"
2788 while sleep 1 & wait $!; do :; done
2789 "
2790 .trim()
2791 ),
2792 OsStr::new("-"),
2793 ]
2794 )
2795 }
2796
2797 #[gpui::test]
2798 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2799 // State where service not defined in dev container
2800 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2801 let given_docker_compose_config = DockerComposeResources {
2802 config: DockerComposeConfig {
2803 name: Some("devcontainers".to_string()),
2804 services: HashMap::new(),
2805 ..Default::default()
2806 },
2807 ..Default::default()
2808 };
2809
2810 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2811
2812 assert!(bad_result.is_err());
2813
2814 // State where service defined in devcontainer, not found in DockerCompose config
2815 let (_, given_dev_container) =
2816 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2817 .await
2818 .unwrap();
2819 let given_docker_compose_config = DockerComposeResources {
2820 config: DockerComposeConfig {
2821 name: Some("devcontainers".to_string()),
2822 services: HashMap::new(),
2823 ..Default::default()
2824 },
2825 ..Default::default()
2826 };
2827
2828 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2829
2830 assert!(bad_result.is_err());
2831 // State where service defined in devcontainer and in DockerCompose config
2832
2833 let (_, given_dev_container) =
2834 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2835 .await
2836 .unwrap();
2837 let given_docker_compose_config = DockerComposeResources {
2838 config: DockerComposeConfig {
2839 name: Some("devcontainers".to_string()),
2840 services: HashMap::from([(
2841 "found_service".to_string(),
2842 DockerComposeService {
2843 ..Default::default()
2844 },
2845 )]),
2846 ..Default::default()
2847 },
2848 ..Default::default()
2849 };
2850
2851 let (service_name, _) =
2852 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2853
2854 assert_eq!(service_name, "found_service".to_string());
2855 }
2856
2857 #[gpui::test]
2858 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2859 let fs = FakeFs::new(cx.executor());
2860 let given_devcontainer_contents = r#"
2861// These are some external comments. serde_lenient should handle them
2862{
2863 // These are some internal comments
2864 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2865 "name": "myDevContainer-${devcontainerId}",
2866 "remoteUser": "root",
2867 "remoteEnv": {
2868 "DEVCONTAINER_ID": "${devcontainerId}",
2869 "MYVAR2": "myvarothervalue",
2870 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2871 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2872 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2873 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2874 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2875 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2876
2877 }
2878}
2879 "#;
2880 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2881 cx,
2882 fs,
2883 fake_http_client(),
2884 Arc::new(FakeDocker::new()),
2885 Arc::new(TestCommandRunner::new()),
2886 HashMap::from([
2887 ("local_env_1".to_string(), "local_env_value1".to_string()),
2888 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2889 ]),
2890 given_devcontainer_contents,
2891 )
2892 .await
2893 .unwrap();
2894
2895 devcontainer_manifest.parse_nonremote_vars().unwrap();
2896
2897 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2898 &devcontainer_manifest.config
2899 else {
2900 panic!("Config not parsed");
2901 };
2902
2903 // ${devcontainerId}
2904 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2905 assert_eq!(
2906 variable_replaced_devcontainer.name,
2907 Some(format!("myDevContainer-{devcontainer_id}"))
2908 );
2909 assert_eq!(
2910 variable_replaced_devcontainer
2911 .remote_env
2912 .as_ref()
2913 .and_then(|env| env.get("DEVCONTAINER_ID")),
2914 Some(&devcontainer_id)
2915 );
2916
2917 // ${containerWorkspaceFolderBasename}
2918 assert_eq!(
2919 variable_replaced_devcontainer
2920 .remote_env
2921 .as_ref()
2922 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2923 Some(&test_project_filename())
2924 );
2925
2926 // ${localWorkspaceFolderBasename}
2927 assert_eq!(
2928 variable_replaced_devcontainer
2929 .remote_env
2930 .as_ref()
2931 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2932 Some(&test_project_filename())
2933 );
2934
2935 // ${containerWorkspaceFolder}
2936 assert_eq!(
2937 variable_replaced_devcontainer
2938 .remote_env
2939 .as_ref()
2940 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2941 Some(&format!("/workspaces/{}", test_project_filename()))
2942 );
2943
2944 // ${localWorkspaceFolder}
2945 assert_eq!(
2946 variable_replaced_devcontainer
2947 .remote_env
2948 .as_ref()
2949 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2950 // We replace backslashes with forward slashes during variable replacement for JSON safety
2951 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
2952 );
2953
2954 // ${localEnv:VARIABLE_NAME}
2955 assert_eq!(
2956 variable_replaced_devcontainer
2957 .remote_env
2958 .as_ref()
2959 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2960 Some(&"local_env_value1".to_string())
2961 );
2962 assert_eq!(
2963 variable_replaced_devcontainer
2964 .remote_env
2965 .as_ref()
2966 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2967 Some(&"THISVALUEHERE".to_string())
2968 );
2969 }
2970
2971 #[gpui::test]
2972 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2973 let given_devcontainer_contents = r#"
2974 // These are some external comments. serde_lenient should handle them
2975 {
2976 // These are some internal comments
2977 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2978 "name": "myDevContainer-${devcontainerId}",
2979 "remoteUser": "root",
2980 "remoteEnv": {
2981 "DEVCONTAINER_ID": "${devcontainerId}",
2982 "MYVAR2": "myvarothervalue",
2983 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2984 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2985 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2986 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2987
2988 },
2989 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2990 "workspaceFolder": "/workspace/customfolder"
2991 }
2992 "#;
2993
2994 let (_, mut devcontainer_manifest) =
2995 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2996 .await
2997 .unwrap();
2998
2999 devcontainer_manifest.parse_nonremote_vars().unwrap();
3000
3001 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3002 &devcontainer_manifest.config
3003 else {
3004 panic!("Config not parsed");
3005 };
3006
3007 // ${devcontainerId}
3008 let devcontainer_id = devcontainer_manifest.devcontainer_id();
3009 assert_eq!(
3010 variable_replaced_devcontainer.name,
3011 Some(format!("myDevContainer-{devcontainer_id}"))
3012 );
3013 assert_eq!(
3014 variable_replaced_devcontainer
3015 .remote_env
3016 .as_ref()
3017 .and_then(|env| env.get("DEVCONTAINER_ID")),
3018 Some(&devcontainer_id)
3019 );
3020
3021 // ${containerWorkspaceFolderBasename}
3022 assert_eq!(
3023 variable_replaced_devcontainer
3024 .remote_env
3025 .as_ref()
3026 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3027 Some(&"customfolder".to_string())
3028 );
3029
3030 // ${localWorkspaceFolderBasename}
3031 assert_eq!(
3032 variable_replaced_devcontainer
3033 .remote_env
3034 .as_ref()
3035 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3036 Some(&"project".to_string())
3037 );
3038
3039 // ${containerWorkspaceFolder}
3040 assert_eq!(
3041 variable_replaced_devcontainer
3042 .remote_env
3043 .as_ref()
3044 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3045 Some(&"/workspace/customfolder".to_string())
3046 );
3047
3048 // ${localWorkspaceFolder}
3049 assert_eq!(
3050 variable_replaced_devcontainer
3051 .remote_env
3052 .as_ref()
3053 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3054 // We replace backslashes with forward slashes during variable replacement for JSON safety
3055 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3056 );
3057 }
3058
3059 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3060 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3061 #[cfg(not(target_os = "windows"))]
3062 #[gpui::test]
3063 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3064 cx.executor().allow_parking();
3065 env_logger::try_init().ok();
3066 let given_devcontainer_contents = r#"
3067 /*---------------------------------------------------------------------------------------------
3068 * Copyright (c) Microsoft Corporation. All rights reserved.
3069 * Licensed under the MIT License. See License.txt in the project root for license information.
3070 *--------------------------------------------------------------------------------------------*/
3071 {
3072 "name": "cli-${devcontainerId}",
3073 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3074 "build": {
3075 "dockerfile": "Dockerfile",
3076 "args": {
3077 "VARIANT": "18-bookworm",
3078 "FOO": "bar",
3079 },
3080 },
3081 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3082 "workspaceFolder": "/workspace2",
3083 "mounts": [
3084 // Keep command history across instances
3085 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3086 ],
3087
3088 "runArgs": [
3089 "--cap-add=SYS_PTRACE",
3090 "--sig-proxy=true",
3091 ],
3092
3093 "forwardPorts": [
3094 8082,
3095 8083,
3096 ],
3097 "appPort": [
3098 8084,
3099 "8085:8086",
3100 ],
3101
3102 "containerEnv": {
3103 "VARIABLE_VALUE": "value",
3104 },
3105
3106 "initializeCommand": "touch IAM.md",
3107
3108 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3109
3110 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3111
3112 "postCreateCommand": {
3113 "yarn": "yarn install",
3114 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3115 },
3116
3117 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3118
3119 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3120
3121 "remoteUser": "node",
3122
3123 "remoteEnv": {
3124 "PATH": "${containerEnv:PATH}:/some/other/path",
3125 "OTHER_ENV": "other_env_value"
3126 },
3127
3128 "features": {
3129 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3130 "moby": false,
3131 },
3132 "ghcr.io/devcontainers/features/go:1": {},
3133 },
3134
3135 "customizations": {
3136 "vscode": {
3137 "extensions": [
3138 "dbaeumer.vscode-eslint",
3139 "GitHub.vscode-pull-request-github",
3140 ],
3141 },
3142 "zed": {
3143 "extensions": ["vue", "ruby"],
3144 },
3145 "codespaces": {
3146 "repositories": {
3147 "devcontainers/features": {
3148 "permissions": {
3149 "contents": "write",
3150 "workflows": "write",
3151 },
3152 },
3153 },
3154 },
3155 },
3156 }
3157 "#;
3158
3159 let (test_dependencies, mut devcontainer_manifest) =
3160 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3161 .await
3162 .unwrap();
3163
3164 test_dependencies
3165 .fs
3166 .atomic_write(
3167 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3168 r#"
3169# Copyright (c) Microsoft Corporation. All rights reserved.
3170# Licensed under the MIT License. See License.txt in the project root for license information.
3171ARG VARIANT="16-bullseye"
3172FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3173
3174RUN mkdir -p /workspaces && chown node:node /workspaces
3175
3176ARG USERNAME=node
3177USER $USERNAME
3178
3179# Save command line history
3180RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3181&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3182&& mkdir -p /home/$USERNAME/commandhistory \
3183&& touch /home/$USERNAME/commandhistory/.bash_history \
3184&& chown -R $USERNAME /home/$USERNAME/commandhistory
3185 "#.trim().to_string(),
3186 )
3187 .await
3188 .unwrap();
3189
3190 devcontainer_manifest.parse_nonremote_vars().unwrap();
3191
3192 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3193
3194 assert_eq!(
3195 devcontainer_up.extension_ids,
3196 vec!["vue".to_string(), "ruby".to_string()]
3197 );
3198
3199 let files = test_dependencies.fs.files();
3200 let feature_dockerfile = files
3201 .iter()
3202 .find(|f| {
3203 f.file_name()
3204 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3205 })
3206 .expect("to be found");
3207 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3208 assert_eq!(
3209 &feature_dockerfile,
3210 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3211
3212# Copyright (c) Microsoft Corporation. All rights reserved.
3213# Licensed under the MIT License. See License.txt in the project root for license information.
3214ARG VARIANT="16-bullseye"
3215FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3216
3217RUN mkdir -p /workspaces && chown node:node /workspaces
3218
3219ARG USERNAME=node
3220USER $USERNAME
3221
3222# Save command line history
3223RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3224&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3225&& mkdir -p /home/$USERNAME/commandhistory \
3226&& touch /home/$USERNAME/commandhistory/.bash_history \
3227&& chown -R $USERNAME /home/$USERNAME/commandhistory
3228FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3229
3230FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3231USER root
3232COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3233RUN chmod -R 0755 /tmp/build-features/
3234
3235FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3236
3237USER root
3238
3239RUN mkdir -p /tmp/dev-container-features
3240COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3241
3242RUN \
3243echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3244echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3245
3246
3247RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3248cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3249&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3250&& cd /tmp/dev-container-features/docker-in-docker_0 \
3251&& chmod +x ./devcontainer-features-install.sh \
3252&& ./devcontainer-features-install.sh \
3253&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3254
3255RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3256cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3257&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3258&& cd /tmp/dev-container-features/go_1 \
3259&& chmod +x ./devcontainer-features-install.sh \
3260&& ./devcontainer-features-install.sh \
3261&& rm -rf /tmp/dev-container-features/go_1
3262
3263
3264ARG _DEV_CONTAINERS_IMAGE_USER=root
3265USER $_DEV_CONTAINERS_IMAGE_USER
3266"#
3267 );
3268
3269 let uid_dockerfile = files
3270 .iter()
3271 .find(|f| {
3272 f.file_name()
3273 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3274 })
3275 .expect("to be found");
3276 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3277
3278 assert_eq!(
3279 &uid_dockerfile,
3280 r#"ARG BASE_IMAGE
3281FROM $BASE_IMAGE
3282
3283USER root
3284
3285ARG REMOTE_USER
3286ARG NEW_UID
3287ARG NEW_GID
3288SHELL ["/bin/sh", "-c"]
3289RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3290 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3291 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3292 if [ -z "$OLD_UID" ]; then \
3293 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3294 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3295 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3296 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3297 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3298 else \
3299 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3300 FREE_GID=65532; \
3301 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3302 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3303 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3304 fi; \
3305 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3306 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3307 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3308 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3309 fi; \
3310 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3311 fi;
3312
3313ARG IMAGE_USER
3314USER $IMAGE_USER
3315
3316# Ensure that /etc/profile does not clobber the existing path
3317RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3318
3319ENV DOCKER_BUILDKIT=1
3320
3321ENV GOPATH=/go
3322ENV GOROOT=/usr/local/go
3323ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3324ENV VARIABLE_VALUE=value
3325"#
3326 );
3327
3328 let golang_install_wrapper = files
3329 .iter()
3330 .find(|f| {
3331 f.file_name()
3332 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3333 && f.to_str().is_some_and(|s| s.contains("/go_"))
3334 })
3335 .expect("to be found");
3336 let golang_install_wrapper = test_dependencies
3337 .fs
3338 .load(golang_install_wrapper)
3339 .await
3340 .unwrap();
3341 assert_eq!(
3342 &golang_install_wrapper,
3343 r#"#!/bin/sh
3344set -e
3345
3346on_exit () {
3347 [ $? -eq 0 ] && exit
3348 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3349}
3350
3351trap on_exit EXIT
3352
3353echo ===========================================================================
3354echo 'Feature : go'
3355echo 'Id : ghcr.io/devcontainers/features/go:1'
3356echo 'Options :'
3357echo ' GOLANGCILINTVERSION=latest
3358 VERSION=latest'
3359echo ===========================================================================
3360
3361set -a
3362. ../devcontainer-features.builtin.env
3363. ./devcontainer-features.env
3364set +a
3365
3366chmod +x ./install.sh
3367./install.sh
3368"#
3369 );
3370
3371 let docker_commands = test_dependencies
3372 .command_runner
3373 .commands_by_program("docker");
3374
3375 let docker_run_command = docker_commands
3376 .iter()
3377 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3378 .expect("found");
3379
3380 assert_eq!(
3381 docker_run_command.args,
3382 vec![
3383 "run".to_string(),
3384 "--privileged".to_string(),
3385 "--cap-add=SYS_PTRACE".to_string(),
3386 "--sig-proxy=true".to_string(),
3387 "-d".to_string(),
3388 "--mount".to_string(),
3389 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3390 "--mount".to_string(),
3391 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3392 "--mount".to_string(),
3393 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3394 "-l".to_string(),
3395 "devcontainer.local_folder=/path/to/local/project".to_string(),
3396 "-l".to_string(),
3397 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3398 "-l".to_string(),
3399 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3400 "-p".to_string(),
3401 "8082:8082".to_string(),
3402 "-p".to_string(),
3403 "8083:8083".to_string(),
3404 "-p".to_string(),
3405 "8084:8084".to_string(),
3406 "-p".to_string(),
3407 "8085:8086".to_string(),
3408 "--entrypoint".to_string(),
3409 "/bin/sh".to_string(),
3410 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3411 "-c".to_string(),
3412 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3413 "-".to_string()
3414 ]
3415 );
3416
3417 let docker_exec_commands = test_dependencies
3418 .docker
3419 .exec_commands_recorded
3420 .lock()
3421 .unwrap();
3422
3423 assert!(docker_exec_commands.iter().all(|exec| {
3424 exec.env
3425 == HashMap::from([
3426 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3427 (
3428 "PATH".to_string(),
3429 "/initial/path:/some/other/path".to_string(),
3430 ),
3431 ])
3432 }))
3433 }
3434
3435 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3436 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3437 #[cfg(not(target_os = "windows"))]
3438 #[gpui::test]
3439 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3440 cx.executor().allow_parking();
3441 env_logger::try_init().ok();
3442 let given_devcontainer_contents = r#"
3443 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3444 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3445 {
3446 "features": {
3447 "ghcr.io/devcontainers/features/aws-cli:1": {},
3448 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3449 },
3450 "name": "Rust and PostgreSQL",
3451 "dockerComposeFile": "docker-compose.yml",
3452 "service": "app",
3453 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3454
3455 // Features to add to the dev container. More info: https://containers.dev/features.
3456 // "features": {},
3457
3458 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3459 "forwardPorts": [
3460 8083,
3461 "db:5432",
3462 "db:1234",
3463 ],
3464
3465 // Use 'postCreateCommand' to run commands after the container is created.
3466 // "postCreateCommand": "rustc --version",
3467
3468 // Configure tool-specific properties.
3469 // "customizations": {},
3470
3471 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3472 // "remoteUser": "root"
3473 }
3474 "#;
3475 let (test_dependencies, mut devcontainer_manifest) =
3476 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3477 .await
3478 .unwrap();
3479
3480 test_dependencies
3481 .fs
3482 .atomic_write(
3483 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3484 r#"
3485version: '3.8'
3486
3487volumes:
3488 postgres-data:
3489
3490services:
3491 app:
3492 build:
3493 context: .
3494 dockerfile: Dockerfile
3495 env_file:
3496 # Ensure that the variables in .env match the same variables in devcontainer.json
3497 - .env
3498
3499 volumes:
3500 - ../..:/workspaces:cached
3501
3502 # Overrides default command so things don't shut down after the process ends.
3503 command: sleep infinity
3504
3505 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3506 network_mode: service:db
3507
3508 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3509 # (Adding the "ports" property to this file will not forward from a Codespace.)
3510
3511 db:
3512 image: postgres:14.1
3513 restart: unless-stopped
3514 volumes:
3515 - postgres-data:/var/lib/postgresql/data
3516 env_file:
3517 # Ensure that the variables in .env match the same variables in devcontainer.json
3518 - .env
3519
3520 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3521 # (Adding the "ports" property to this file will not forward from a Codespace.)
3522 "#.trim().to_string(),
3523 )
3524 .await
3525 .unwrap();
3526
3527 test_dependencies.fs.atomic_write(
3528 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3529 r#"
3530FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3531
3532# Include lld linker to improve build times either by using environment variable
3533# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3534RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3535 && apt-get -y install clang lld \
3536 && apt-get autoremove -y && apt-get clean -y
3537 "#.trim().to_string()).await.unwrap();
3538
3539 devcontainer_manifest.parse_nonremote_vars().unwrap();
3540
3541 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3542
3543 let files = test_dependencies.fs.files();
3544 let feature_dockerfile = files
3545 .iter()
3546 .find(|f| {
3547 f.file_name()
3548 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3549 })
3550 .expect("to be found");
3551 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3552 assert_eq!(
3553 &feature_dockerfile,
3554 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3555
3556FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3557
3558# Include lld linker to improve build times either by using environment variable
3559# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3560RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3561 && apt-get -y install clang lld \
3562 && apt-get autoremove -y && apt-get clean -y
3563FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3564
3565FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3566USER root
3567COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3568RUN chmod -R 0755 /tmp/build-features/
3569
3570FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3571
3572USER root
3573
3574RUN mkdir -p /tmp/dev-container-features
3575COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3576
3577RUN \
3578echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3579echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3580
3581
3582RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3583cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3584&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3585&& cd /tmp/dev-container-features/aws-cli_0 \
3586&& chmod +x ./devcontainer-features-install.sh \
3587&& ./devcontainer-features-install.sh \
3588&& rm -rf /tmp/dev-container-features/aws-cli_0
3589
3590RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3591cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3592&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3593&& cd /tmp/dev-container-features/docker-in-docker_1 \
3594&& chmod +x ./devcontainer-features-install.sh \
3595&& ./devcontainer-features-install.sh \
3596&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3597
3598
3599ARG _DEV_CONTAINERS_IMAGE_USER=root
3600USER $_DEV_CONTAINERS_IMAGE_USER
3601"#
3602 );
3603
3604 let uid_dockerfile = files
3605 .iter()
3606 .find(|f| {
3607 f.file_name()
3608 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3609 })
3610 .expect("to be found");
3611 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3612
3613 assert_eq!(
3614 &uid_dockerfile,
3615 r#"ARG BASE_IMAGE
3616FROM $BASE_IMAGE
3617
3618USER root
3619
3620ARG REMOTE_USER
3621ARG NEW_UID
3622ARG NEW_GID
3623SHELL ["/bin/sh", "-c"]
3624RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3625 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3626 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3627 if [ -z "$OLD_UID" ]; then \
3628 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3629 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3630 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3631 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3632 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3633 else \
3634 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3635 FREE_GID=65532; \
3636 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3637 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3638 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3639 fi; \
3640 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3641 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3642 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3643 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3644 fi; \
3645 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3646 fi;
3647
3648ARG IMAGE_USER
3649USER $IMAGE_USER
3650
3651# Ensure that /etc/profile does not clobber the existing path
3652RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3653
3654
3655ENV DOCKER_BUILDKIT=1
3656"#
3657 );
3658
3659 let build_override = files
3660 .iter()
3661 .find(|f| {
3662 f.file_name()
3663 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3664 })
3665 .expect("to be found");
3666 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3667 let build_config: DockerComposeConfig =
3668 serde_json_lenient::from_str(&build_override).unwrap();
3669 let build_context = build_config
3670 .services
3671 .get("app")
3672 .and_then(|s| s.build.as_ref())
3673 .and_then(|b| b.context.clone())
3674 .expect("build override should have a context");
3675 assert_eq!(
3676 build_context, ".",
3677 "build override should preserve the original build context from docker-compose.yml"
3678 );
3679
3680 let runtime_override = files
3681 .iter()
3682 .find(|f| {
3683 f.file_name()
3684 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3685 })
3686 .expect("to be found");
3687 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3688
3689 let expected_runtime_override = DockerComposeConfig {
3690 name: None,
3691 services: HashMap::from([
3692 (
3693 "app".to_string(),
3694 DockerComposeService {
3695 entrypoint: Some(vec![
3696 "/bin/sh".to_string(),
3697 "-c".to_string(),
3698 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3699 "-".to_string(),
3700 ]),
3701 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3702 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3703 privileged: Some(true),
3704 labels: Some(HashMap::from([
3705 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3706 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3707 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3708 ])),
3709 volumes: vec![
3710 MountDefinition {
3711 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3712 target: "/var/lib/docker".to_string(),
3713 mount_type: Some("volume".to_string())
3714 }
3715 ],
3716 ..Default::default()
3717 },
3718 ),
3719 (
3720 "db".to_string(),
3721 DockerComposeService {
3722 ports: vec![
3723 DockerComposeServicePort {
3724 target: "8083".to_string(),
3725 published: "8083".to_string(),
3726 ..Default::default()
3727 },
3728 DockerComposeServicePort {
3729 target: "5432".to_string(),
3730 published: "5432".to_string(),
3731 ..Default::default()
3732 },
3733 DockerComposeServicePort {
3734 target: "1234".to_string(),
3735 published: "1234".to_string(),
3736 ..Default::default()
3737 },
3738 ],
3739 ..Default::default()
3740 },
3741 ),
3742 ]),
3743 volumes: HashMap::from([(
3744 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3745 DockerComposeVolume {
3746 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3747 },
3748 )]),
3749 };
3750
3751 assert_eq!(
3752 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3753 expected_runtime_override
3754 )
3755 }
3756
3757 #[test]
3758 fn test_resolve_compose_dockerfile() {
3759 let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3760
3761 // Bug case (#53473): context ".." with relative dockerfile
3762 assert_eq!(
3763 resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3764 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3765 );
3766
3767 // Compose path containing ".." (as docker_compose_manifest() produces)
3768 assert_eq!(
3769 resolve_compose_dockerfile(
3770 Path::new("/project/.devcontainer/../docker-compose.yml"),
3771 Some("."),
3772 "docker/Dockerfile",
3773 ),
3774 Some(PathBuf::from("/project/docker/Dockerfile")),
3775 );
3776
3777 // Absolute dockerfile returned as-is
3778 assert_eq!(
3779 resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3780 Some(PathBuf::from("/absolute/Dockerfile")),
3781 );
3782
3783 // Absolute context used directly
3784 assert_eq!(
3785 resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3786 Some(PathBuf::from("/abs/context/Dockerfile")),
3787 );
3788
3789 // No context defaults to compose file's directory
3790 assert_eq!(
3791 resolve_compose_dockerfile(compose, None, "Dockerfile"),
3792 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3793 );
3794 }
3795
3796 #[gpui::test]
3797 async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
3798 cx.executor().allow_parking();
3799 env_logger::try_init().ok();
3800
3801 let given_devcontainer_contents = r#"
3802 {
3803 "name": "Test",
3804 "dockerComposeFile": "docker-compose-context-parent.yml",
3805 "service": "app",
3806 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
3807 }
3808 "#;
3809 let (_, mut devcontainer_manifest) =
3810 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3811 .await
3812 .unwrap();
3813
3814 devcontainer_manifest.parse_nonremote_vars().unwrap();
3815
3816 let expected = PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile");
3817 assert_eq!(
3818 devcontainer_manifest.dockerfile_location().await,
3819 Some(expected)
3820 );
3821 }
3822
3823 #[gpui::test]
3824 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3825 cx: &mut TestAppContext,
3826 ) {
3827 cx.executor().allow_parking();
3828 env_logger::try_init().ok();
3829 let given_devcontainer_contents = r#"
3830 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3831 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3832 {
3833 "features": {
3834 "ghcr.io/devcontainers/features/aws-cli:1": {},
3835 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3836 },
3837 "name": "Rust and PostgreSQL",
3838 "dockerComposeFile": "docker-compose.yml",
3839 "service": "app",
3840 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3841
3842 // Features to add to the dev container. More info: https://containers.dev/features.
3843 // "features": {},
3844
3845 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3846 "forwardPorts": [
3847 8083,
3848 "db:5432",
3849 "db:1234",
3850 ],
3851 "updateRemoteUserUID": false,
3852 "appPort": "8084",
3853
3854 // Use 'postCreateCommand' to run commands after the container is created.
3855 // "postCreateCommand": "rustc --version",
3856
3857 // Configure tool-specific properties.
3858 // "customizations": {},
3859
3860 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3861 // "remoteUser": "root"
3862 }
3863 "#;
3864 let (test_dependencies, mut devcontainer_manifest) =
3865 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3866 .await
3867 .unwrap();
3868
3869 test_dependencies
3870 .fs
3871 .atomic_write(
3872 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3873 r#"
3874version: '3.8'
3875
3876volumes:
3877postgres-data:
3878
3879services:
3880app:
3881 build:
3882 context: .
3883 dockerfile: Dockerfile
3884 env_file:
3885 # Ensure that the variables in .env match the same variables in devcontainer.json
3886 - .env
3887
3888 volumes:
3889 - ../..:/workspaces:cached
3890
3891 # Overrides default command so things don't shut down after the process ends.
3892 command: sleep infinity
3893
3894 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3895 network_mode: service:db
3896
3897 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3898 # (Adding the "ports" property to this file will not forward from a Codespace.)
3899
3900db:
3901 image: postgres:14.1
3902 restart: unless-stopped
3903 volumes:
3904 - postgres-data:/var/lib/postgresql/data
3905 env_file:
3906 # Ensure that the variables in .env match the same variables in devcontainer.json
3907 - .env
3908
3909 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3910 # (Adding the "ports" property to this file will not forward from a Codespace.)
3911 "#.trim().to_string(),
3912 )
3913 .await
3914 .unwrap();
3915
3916 test_dependencies.fs.atomic_write(
3917 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3918 r#"
3919FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3920
3921# Include lld linker to improve build times either by using environment variable
3922# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3923RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3924&& apt-get -y install clang lld \
3925&& apt-get autoremove -y && apt-get clean -y
3926 "#.trim().to_string()).await.unwrap();
3927
3928 devcontainer_manifest.parse_nonremote_vars().unwrap();
3929
3930 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3931
3932 let files = test_dependencies.fs.files();
3933 let feature_dockerfile = files
3934 .iter()
3935 .find(|f| {
3936 f.file_name()
3937 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3938 })
3939 .expect("to be found");
3940 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3941 assert_eq!(
3942 &feature_dockerfile,
3943 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3944
3945FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3946
3947# Include lld linker to improve build times either by using environment variable
3948# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3949RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3950&& apt-get -y install clang lld \
3951&& apt-get autoremove -y && apt-get clean -y
3952FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3953
3954FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3955USER root
3956COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3957RUN chmod -R 0755 /tmp/build-features/
3958
3959FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3960
3961USER root
3962
3963RUN mkdir -p /tmp/dev-container-features
3964COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3965
3966RUN \
3967echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3968echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3969
3970
3971RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3972cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3973&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3974&& cd /tmp/dev-container-features/aws-cli_0 \
3975&& chmod +x ./devcontainer-features-install.sh \
3976&& ./devcontainer-features-install.sh \
3977&& rm -rf /tmp/dev-container-features/aws-cli_0
3978
3979RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3980cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3981&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3982&& cd /tmp/dev-container-features/docker-in-docker_1 \
3983&& chmod +x ./devcontainer-features-install.sh \
3984&& ./devcontainer-features-install.sh \
3985&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3986
3987
3988ARG _DEV_CONTAINERS_IMAGE_USER=root
3989USER $_DEV_CONTAINERS_IMAGE_USER
3990
3991# Ensure that /etc/profile does not clobber the existing path
3992RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3993
3994
3995ENV DOCKER_BUILDKIT=1
3996"#
3997 );
3998 }
3999
4000 #[cfg(not(target_os = "windows"))]
4001 #[gpui::test]
4002 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
4003 cx.executor().allow_parking();
4004 env_logger::try_init().ok();
4005 let given_devcontainer_contents = r#"
4006 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4007 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4008 {
4009 "features": {
4010 "ghcr.io/devcontainers/features/aws-cli:1": {},
4011 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4012 },
4013 "name": "Rust and PostgreSQL",
4014 "dockerComposeFile": "docker-compose.yml",
4015 "service": "app",
4016 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4017
4018 // Features to add to the dev container. More info: https://containers.dev/features.
4019 // "features": {},
4020
4021 // Use 'forwardPorts' to make a list of ports inside the container available locally.
4022 // "forwardPorts": [5432],
4023
4024 // Use 'postCreateCommand' to run commands after the container is created.
4025 // "postCreateCommand": "rustc --version",
4026
4027 // Configure tool-specific properties.
4028 // "customizations": {},
4029
4030 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4031 // "remoteUser": "root"
4032 }
4033 "#;
4034 let mut fake_docker = FakeDocker::new();
4035 fake_docker.set_podman(true);
4036 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4037 cx,
4038 FakeFs::new(cx.executor()),
4039 fake_http_client(),
4040 Arc::new(fake_docker),
4041 Arc::new(TestCommandRunner::new()),
4042 HashMap::new(),
4043 given_devcontainer_contents,
4044 )
4045 .await
4046 .unwrap();
4047
4048 test_dependencies
4049 .fs
4050 .atomic_write(
4051 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4052 r#"
4053version: '3.8'
4054
4055volumes:
4056postgres-data:
4057
4058services:
4059app:
4060build:
4061 context: .
4062 dockerfile: Dockerfile
4063env_file:
4064 # Ensure that the variables in .env match the same variables in devcontainer.json
4065 - .env
4066
4067volumes:
4068 - ../..:/workspaces:cached
4069
4070# Overrides default command so things don't shut down after the process ends.
4071command: sleep infinity
4072
4073# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4074network_mode: service:db
4075
4076# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4077# (Adding the "ports" property to this file will not forward from a Codespace.)
4078
4079db:
4080image: postgres:14.1
4081restart: unless-stopped
4082volumes:
4083 - postgres-data:/var/lib/postgresql/data
4084env_file:
4085 # Ensure that the variables in .env match the same variables in devcontainer.json
4086 - .env
4087
4088# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4089# (Adding the "ports" property to this file will not forward from a Codespace.)
4090 "#.trim().to_string(),
4091 )
4092 .await
4093 .unwrap();
4094
4095 test_dependencies.fs.atomic_write(
4096 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4097 r#"
4098FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4099
4100# Include lld linker to improve build times either by using environment variable
4101# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4102RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4103&& apt-get -y install clang lld \
4104&& apt-get autoremove -y && apt-get clean -y
4105 "#.trim().to_string()).await.unwrap();
4106
4107 devcontainer_manifest.parse_nonremote_vars().unwrap();
4108
4109 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4110
4111 let files = test_dependencies.fs.files();
4112
4113 let feature_dockerfile = files
4114 .iter()
4115 .find(|f| {
4116 f.file_name()
4117 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4118 })
4119 .expect("to be found");
4120 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4121 assert_eq!(
4122 &feature_dockerfile,
4123 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4124
4125FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4126
4127# Include lld linker to improve build times either by using environment variable
4128# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4129RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4130&& apt-get -y install clang lld \
4131&& apt-get autoremove -y && apt-get clean -y
4132FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4133
4134FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4135
4136FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4137USER root
4138COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4139RUN chmod -R 0755 /tmp/build-features/
4140
4141FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4142
4143USER root
4144
4145RUN mkdir -p /tmp/dev-container-features
4146COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4147
4148RUN \
4149echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4150echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4151
4152
4153COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4154RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4155&& cd /tmp/dev-container-features/aws-cli_0 \
4156&& chmod +x ./devcontainer-features-install.sh \
4157&& ./devcontainer-features-install.sh
4158
4159COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4160RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4161&& cd /tmp/dev-container-features/docker-in-docker_1 \
4162&& chmod +x ./devcontainer-features-install.sh \
4163&& ./devcontainer-features-install.sh
4164
4165
4166ARG _DEV_CONTAINERS_IMAGE_USER=root
4167USER $_DEV_CONTAINERS_IMAGE_USER
4168"#
4169 );
4170
4171 let uid_dockerfile = files
4172 .iter()
4173 .find(|f| {
4174 f.file_name()
4175 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4176 })
4177 .expect("to be found");
4178 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4179
4180 assert_eq!(
4181 &uid_dockerfile,
4182 r#"ARG BASE_IMAGE
4183FROM $BASE_IMAGE
4184
4185USER root
4186
4187ARG REMOTE_USER
4188ARG NEW_UID
4189ARG NEW_GID
4190SHELL ["/bin/sh", "-c"]
4191RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4192 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4193 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4194 if [ -z "$OLD_UID" ]; then \
4195 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4196 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4197 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4198 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4199 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4200 else \
4201 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4202 FREE_GID=65532; \
4203 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4204 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4205 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4206 fi; \
4207 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4208 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4209 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4210 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4211 fi; \
4212 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4213 fi;
4214
4215ARG IMAGE_USER
4216USER $IMAGE_USER
4217
4218# Ensure that /etc/profile does not clobber the existing path
4219RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4220
4221
4222ENV DOCKER_BUILDKIT=1
4223"#
4224 );
4225 }
4226
4227 #[gpui::test]
4228 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4229 cx.executor().allow_parking();
4230 env_logger::try_init().ok();
4231 let given_devcontainer_contents = r#"
4232 /*---------------------------------------------------------------------------------------------
4233 * Copyright (c) Microsoft Corporation. All rights reserved.
4234 * Licensed under the MIT License. See License.txt in the project root for license information.
4235 *--------------------------------------------------------------------------------------------*/
4236 {
4237 "name": "cli-${devcontainerId}",
4238 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4239 "build": {
4240 "dockerfile": "Dockerfile",
4241 "args": {
4242 "VARIANT": "18-bookworm",
4243 "FOO": "bar",
4244 },
4245 "target": "development",
4246 },
4247 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4248 "workspaceFolder": "/workspace2",
4249 "mounts": [
4250 // Keep command history across instances
4251 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4252 ],
4253
4254 "forwardPorts": [
4255 8082,
4256 8083,
4257 ],
4258 "appPort": "8084",
4259 "updateRemoteUserUID": false,
4260
4261 "containerEnv": {
4262 "VARIABLE_VALUE": "value",
4263 },
4264
4265 "initializeCommand": "touch IAM.md",
4266
4267 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4268
4269 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4270
4271 "postCreateCommand": {
4272 "yarn": "yarn install",
4273 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4274 },
4275
4276 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4277
4278 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4279
4280 "remoteUser": "node",
4281
4282 "remoteEnv": {
4283 "PATH": "${containerEnv:PATH}:/some/other/path",
4284 "OTHER_ENV": "other_env_value"
4285 },
4286
4287 "features": {
4288 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4289 "moby": false,
4290 },
4291 "ghcr.io/devcontainers/features/go:1": {},
4292 },
4293
4294 "customizations": {
4295 "vscode": {
4296 "extensions": [
4297 "dbaeumer.vscode-eslint",
4298 "GitHub.vscode-pull-request-github",
4299 ],
4300 },
4301 "zed": {
4302 "extensions": ["vue", "ruby"],
4303 },
4304 "codespaces": {
4305 "repositories": {
4306 "devcontainers/features": {
4307 "permissions": {
4308 "contents": "write",
4309 "workflows": "write",
4310 },
4311 },
4312 },
4313 },
4314 },
4315 }
4316 "#;
4317
4318 let (test_dependencies, mut devcontainer_manifest) =
4319 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4320 .await
4321 .unwrap();
4322
4323 test_dependencies
4324 .fs
4325 .atomic_write(
4326 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4327 r#"
4328# Copyright (c) Microsoft Corporation. All rights reserved.
4329# Licensed under the MIT License. See License.txt in the project root for license information.
4330ARG VARIANT="16-bullseye"
4331FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4332FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4333
4334RUN mkdir -p /workspaces && chown node:node /workspaces
4335
4336ARG USERNAME=node
4337USER $USERNAME
4338
4339# Save command line history
4340RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4341&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4342&& mkdir -p /home/$USERNAME/commandhistory \
4343&& touch /home/$USERNAME/commandhistory/.bash_history \
4344&& chown -R $USERNAME /home/$USERNAME/commandhistory
4345 "#.trim().to_string(),
4346 )
4347 .await
4348 .unwrap();
4349
4350 devcontainer_manifest.parse_nonremote_vars().unwrap();
4351
4352 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4353
4354 assert_eq!(
4355 devcontainer_up.extension_ids,
4356 vec!["vue".to_string(), "ruby".to_string()]
4357 );
4358
4359 let files = test_dependencies.fs.files();
4360 let feature_dockerfile = files
4361 .iter()
4362 .find(|f| {
4363 f.file_name()
4364 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4365 })
4366 .expect("to be found");
4367 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4368 assert_eq!(
4369 &feature_dockerfile,
4370 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4371
4372# Copyright (c) Microsoft Corporation. All rights reserved.
4373# Licensed under the MIT License. See License.txt in the project root for license information.
4374ARG VARIANT="16-bullseye"
4375FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4376FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4377
4378RUN mkdir -p /workspaces && chown node:node /workspaces
4379
4380ARG USERNAME=node
4381USER $USERNAME
4382
4383# Save command line history
4384RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4385&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4386&& mkdir -p /home/$USERNAME/commandhistory \
4387&& touch /home/$USERNAME/commandhistory/.bash_history \
4388&& chown -R $USERNAME /home/$USERNAME/commandhistory
4389FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4390
4391FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4392USER root
4393COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4394RUN chmod -R 0755 /tmp/build-features/
4395
4396FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4397
4398USER root
4399
4400RUN mkdir -p /tmp/dev-container-features
4401COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4402
4403RUN \
4404echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4405echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4406
4407
4408RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4409cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4410&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4411&& cd /tmp/dev-container-features/docker-in-docker_0 \
4412&& chmod +x ./devcontainer-features-install.sh \
4413&& ./devcontainer-features-install.sh \
4414&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4415
4416RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4417cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4418&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4419&& cd /tmp/dev-container-features/go_1 \
4420&& chmod +x ./devcontainer-features-install.sh \
4421&& ./devcontainer-features-install.sh \
4422&& rm -rf /tmp/dev-container-features/go_1
4423
4424
4425ARG _DEV_CONTAINERS_IMAGE_USER=root
4426USER $_DEV_CONTAINERS_IMAGE_USER
4427
4428# Ensure that /etc/profile does not clobber the existing path
4429RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4430
4431ENV DOCKER_BUILDKIT=1
4432
4433ENV GOPATH=/go
4434ENV GOROOT=/usr/local/go
4435ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4436ENV VARIABLE_VALUE=value
4437"#
4438 );
4439
4440 let golang_install_wrapper = files
4441 .iter()
4442 .find(|f| {
4443 f.file_name()
4444 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4445 && f.to_str().is_some_and(|s| s.contains("go_"))
4446 })
4447 .expect("to be found");
4448 let golang_install_wrapper = test_dependencies
4449 .fs
4450 .load(golang_install_wrapper)
4451 .await
4452 .unwrap();
4453 assert_eq!(
4454 &golang_install_wrapper,
4455 r#"#!/bin/sh
4456set -e
4457
4458on_exit () {
4459 [ $? -eq 0 ] && exit
4460 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4461}
4462
4463trap on_exit EXIT
4464
4465echo ===========================================================================
4466echo 'Feature : go'
4467echo 'Id : ghcr.io/devcontainers/features/go:1'
4468echo 'Options :'
4469echo ' GOLANGCILINTVERSION=latest
4470 VERSION=latest'
4471echo ===========================================================================
4472
4473set -a
4474. ../devcontainer-features.builtin.env
4475. ./devcontainer-features.env
4476set +a
4477
4478chmod +x ./install.sh
4479./install.sh
4480"#
4481 );
4482
4483 let docker_commands = test_dependencies
4484 .command_runner
4485 .commands_by_program("docker");
4486
4487 let docker_run_command = docker_commands
4488 .iter()
4489 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4490
4491 assert!(docker_run_command.is_some());
4492
4493 let docker_exec_commands = test_dependencies
4494 .docker
4495 .exec_commands_recorded
4496 .lock()
4497 .unwrap();
4498
4499 assert!(docker_exec_commands.iter().all(|exec| {
4500 exec.env
4501 == HashMap::from([
4502 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4503 (
4504 "PATH".to_string(),
4505 "/initial/path:/some/other/path".to_string(),
4506 ),
4507 ])
4508 }))
4509 }
4510
4511 #[cfg(not(target_os = "windows"))]
4512 #[gpui::test]
4513 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4514 cx.executor().allow_parking();
4515 env_logger::try_init().ok();
4516 let given_devcontainer_contents = r#"
4517 {
4518 "name": "cli-${devcontainerId}",
4519 "image": "test_image:latest",
4520 }
4521 "#;
4522
4523 let (test_dependencies, mut devcontainer_manifest) =
4524 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4525 .await
4526 .unwrap();
4527
4528 devcontainer_manifest.parse_nonremote_vars().unwrap();
4529
4530 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4531
4532 let files = test_dependencies.fs.files();
4533 let uid_dockerfile = files
4534 .iter()
4535 .find(|f| {
4536 f.file_name()
4537 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4538 })
4539 .expect("to be found");
4540 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4541
4542 assert_eq!(
4543 &uid_dockerfile,
4544 r#"ARG BASE_IMAGE
4545FROM $BASE_IMAGE
4546
4547USER root
4548
4549ARG REMOTE_USER
4550ARG NEW_UID
4551ARG NEW_GID
4552SHELL ["/bin/sh", "-c"]
4553RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4554 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4555 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4556 if [ -z "$OLD_UID" ]; then \
4557 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4558 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4559 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4560 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4561 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4562 else \
4563 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4564 FREE_GID=65532; \
4565 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4566 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4567 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4568 fi; \
4569 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4570 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4571 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4572 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4573 fi; \
4574 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4575 fi;
4576
4577ARG IMAGE_USER
4578USER $IMAGE_USER
4579
4580# Ensure that /etc/profile does not clobber the existing path
4581RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4582"#
4583 );
4584 }
4585
4586 #[cfg(target_os = "windows")]
4587 #[gpui::test]
4588 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4589 cx.executor().allow_parking();
4590 env_logger::try_init().ok();
4591 let given_devcontainer_contents = r#"
4592 {
4593 "name": "cli-${devcontainerId}",
4594 "image": "test_image:latest",
4595 }
4596 "#;
4597
4598 let (_, mut devcontainer_manifest) =
4599 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4600 .await
4601 .unwrap();
4602
4603 devcontainer_manifest.parse_nonremote_vars().unwrap();
4604
4605 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4606
4607 assert_eq!(
4608 devcontainer_up.remote_workspace_folder,
4609 "/workspaces/project"
4610 );
4611 }
4612
4613 #[cfg(not(target_os = "windows"))]
4614 #[gpui::test]
4615 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4616 cx.executor().allow_parking();
4617 env_logger::try_init().ok();
4618 let given_devcontainer_contents = r#"
4619 {
4620 "name": "cli-${devcontainerId}",
4621 "dockerComposeFile": "docker-compose-plain.yml",
4622 "service": "app",
4623 }
4624 "#;
4625
4626 let (test_dependencies, mut devcontainer_manifest) =
4627 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4628 .await
4629 .unwrap();
4630
4631 test_dependencies
4632 .fs
4633 .atomic_write(
4634 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4635 r#"
4636services:
4637 app:
4638 image: test_image:latest
4639 command: sleep infinity
4640 volumes:
4641 - ..:/workspace:cached
4642 "#
4643 .trim()
4644 .to_string(),
4645 )
4646 .await
4647 .unwrap();
4648
4649 devcontainer_manifest.parse_nonremote_vars().unwrap();
4650
4651 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4652
4653 let files = test_dependencies.fs.files();
4654 let uid_dockerfile = files
4655 .iter()
4656 .find(|f| {
4657 f.file_name()
4658 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4659 })
4660 .expect("to be found");
4661 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4662
4663 assert_eq!(
4664 &uid_dockerfile,
4665 r#"ARG BASE_IMAGE
4666FROM $BASE_IMAGE
4667
4668USER root
4669
4670ARG REMOTE_USER
4671ARG NEW_UID
4672ARG NEW_GID
4673SHELL ["/bin/sh", "-c"]
4674RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4675 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4676 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4677 if [ -z "$OLD_UID" ]; then \
4678 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4679 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4680 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4681 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4682 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4683 else \
4684 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4685 FREE_GID=65532; \
4686 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4687 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4688 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4689 fi; \
4690 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4691 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4692 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4693 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4694 fi; \
4695 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4696 fi;
4697
4698ARG IMAGE_USER
4699USER $IMAGE_USER
4700
4701# Ensure that /etc/profile does not clobber the existing path
4702RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4703"#
4704 );
4705 }
4706
4707 #[gpui::test]
4708 async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4709 cx.executor().allow_parking();
4710 env_logger::try_init().ok();
4711 let given_devcontainer_contents = r#"
4712 {
4713 "name": "cli-${devcontainerId}",
4714 "build": {
4715 "dockerfile": "Dockerfile",
4716 "args": {
4717 "VERSION": "1.22",
4718 }
4719 },
4720 }
4721 "#;
4722
4723 let (test_dependencies, mut devcontainer_manifest) =
4724 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4725 .await
4726 .unwrap();
4727
4728 test_dependencies
4729 .fs
4730 .atomic_write(
4731 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4732 r#"
4733FROM dontgrabme as build_context
4734ARG VERSION=1.21
4735ARG REPOSITORY=mybuild
4736ARG REGISTRY=docker.io/stuff
4737
4738ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4739
4740FROM ${IMAGE} AS devcontainer
4741 "#
4742 .trim()
4743 .to_string(),
4744 )
4745 .await
4746 .unwrap();
4747
4748 devcontainer_manifest.parse_nonremote_vars().unwrap();
4749
4750 let dockerfile_contents = devcontainer_manifest
4751 .expanded_dockerfile_content()
4752 .await
4753 .unwrap();
4754 let base_image = image_from_dockerfile(
4755 dockerfile_contents,
4756 &devcontainer_manifest
4757 .dev_container()
4758 .build
4759 .as_ref()
4760 .and_then(|b| b.target.clone()),
4761 )
4762 .unwrap();
4763
4764 assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4765 }
4766
4767 #[gpui::test]
4768 async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4769 cx.executor().allow_parking();
4770 env_logger::try_init().ok();
4771 let given_devcontainer_contents = r#"
4772 {
4773 "name": "cli-${devcontainerId}",
4774 "build": {
4775 "dockerfile": "Dockerfile",
4776 "args": {
4777 "VERSION": "1.22",
4778 },
4779 "target": "development"
4780 },
4781 }
4782 "#;
4783
4784 let (test_dependencies, mut devcontainer_manifest) =
4785 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4786 .await
4787 .unwrap();
4788
4789 test_dependencies
4790 .fs
4791 .atomic_write(
4792 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4793 r#"
4794FROM dontgrabme as build_context
4795ARG VERSION=1.21
4796ARG REPOSITORY=mybuild
4797ARG REGISTRY=docker.io/stuff
4798
4799ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4800ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4801
4802FROM ${DEV_IMAGE} AS development
4803FROM ${IMAGE} AS production
4804 "#
4805 .trim()
4806 .to_string(),
4807 )
4808 .await
4809 .unwrap();
4810
4811 devcontainer_manifest.parse_nonremote_vars().unwrap();
4812
4813 let dockerfile_contents = devcontainer_manifest
4814 .expanded_dockerfile_content()
4815 .await
4816 .unwrap();
4817 let base_image = image_from_dockerfile(
4818 dockerfile_contents,
4819 &devcontainer_manifest
4820 .dev_container()
4821 .build
4822 .as_ref()
4823 .and_then(|b| b.target.clone()),
4824 )
4825 .unwrap();
4826
4827 assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4828 }
4829
4830 #[gpui::test]
4831 async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4832 cx.executor().allow_parking();
4833 env_logger::try_init().ok();
4834 let given_devcontainer_contents = r#"
4835 {
4836 "name": "cli-${devcontainerId}",
4837 "build": {
4838 "dockerfile": "Dockerfile",
4839 "args": {
4840 "JSON_ARG": "some-value",
4841 "ELIXIR_VERSION": "1.21",
4842 }
4843 },
4844 }
4845 "#;
4846
4847 let (test_dependencies, mut devcontainer_manifest) =
4848 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4849 .await
4850 .unwrap();
4851
4852 test_dependencies
4853 .fs
4854 .atomic_write(
4855 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4856 r#"
4857ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4858ARG ELIXIR_VERSION=1.20.0-rc.4
4859ARG FOO=foo BAR=bar
4860ARG FOOBAR=${FOO}${BAR}
4861ARG OTP_VERSION=28.4.1
4862ARG DEBIAN_VERSION=trixie-20260316-slim
4863ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4864ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4865ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4866ARG FROM_JSON=${JSON_ARG}
4867
4868FROM ${IMAGE} AS devcontainer
4869 "#
4870 .trim()
4871 .to_string(),
4872 )
4873 .await
4874 .unwrap();
4875
4876 devcontainer_manifest.parse_nonremote_vars().unwrap();
4877
4878 let expanded_dockerfile = devcontainer_manifest
4879 .expanded_dockerfile_content()
4880 .await
4881 .unwrap();
4882
4883 assert_eq!(
4884 &expanded_dockerfile,
4885 r#"
4886ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4887ARG ELIXIR_VERSION=1.20.0-rc.4
4888ARG FOO=foo BAR=bar
4889ARG FOOBAR=foobar
4890ARG OTP_VERSION=28.4.1
4891ARG DEBIAN_VERSION=trixie-20260316-slim
4892ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4893ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4894ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4895ARG FROM_JSON=some-value
4896
4897FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4898 "#
4899 .trim()
4900 )
4901 }
4902
4903 #[test]
4904 fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4905
4906 #[test]
4907 fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4908
4909 #[test]
4910 fn test_aliases_dockerfile_with_build_target_specified() {}
4911
4912 pub(crate) struct RecordedExecCommand {
4913 pub(crate) _container_id: String,
4914 pub(crate) _remote_folder: String,
4915 pub(crate) _user: String,
4916 pub(crate) env: HashMap<String, String>,
4917 pub(crate) _inner_command: Command,
4918 }
4919
4920 pub(crate) struct FakeDocker {
4921 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4922 podman: bool,
4923 has_buildx: bool,
4924 }
4925
4926 impl FakeDocker {
4927 pub(crate) fn new() -> Self {
4928 Self {
4929 podman: false,
4930 has_buildx: true,
4931 exec_commands_recorded: Mutex::new(Vec::new()),
4932 }
4933 }
4934 #[cfg(not(target_os = "windows"))]
4935 fn set_podman(&mut self, podman: bool) {
4936 self.podman = podman;
4937 }
4938 }
4939
4940 #[async_trait]
4941 impl DockerClient for FakeDocker {
4942 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4943 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4944 return Ok(DockerInspect {
4945 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4946 .to_string(),
4947 config: DockerInspectConfig {
4948 labels: DockerConfigLabels {
4949 metadata: Some(vec![HashMap::from([(
4950 "remoteUser".to_string(),
4951 Value::String("node".to_string()),
4952 )])]),
4953 },
4954 env: Vec::new(),
4955 image_user: Some("root".to_string()),
4956 },
4957 mounts: None,
4958 state: None,
4959 });
4960 }
4961 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4962 return Ok(DockerInspect {
4963 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4964 .to_string(),
4965 config: DockerInspectConfig {
4966 labels: DockerConfigLabels {
4967 metadata: Some(vec![HashMap::from([(
4968 "remoteUser".to_string(),
4969 Value::String("vscode".to_string()),
4970 )])]),
4971 },
4972 image_user: Some("root".to_string()),
4973 env: Vec::new(),
4974 },
4975 mounts: None,
4976 state: None,
4977 });
4978 }
4979 if id.starts_with("cli_") {
4980 return Ok(DockerInspect {
4981 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4982 .to_string(),
4983 config: DockerInspectConfig {
4984 labels: DockerConfigLabels {
4985 metadata: Some(vec![HashMap::from([(
4986 "remoteUser".to_string(),
4987 Value::String("node".to_string()),
4988 )])]),
4989 },
4990 image_user: Some("root".to_string()),
4991 env: vec!["PATH=/initial/path".to_string()],
4992 },
4993 mounts: None,
4994 state: None,
4995 });
4996 }
4997 if id == "found_docker_ps" {
4998 return Ok(DockerInspect {
4999 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5000 .to_string(),
5001 config: DockerInspectConfig {
5002 labels: DockerConfigLabels {
5003 metadata: Some(vec![HashMap::from([(
5004 "remoteUser".to_string(),
5005 Value::String("node".to_string()),
5006 )])]),
5007 },
5008 image_user: Some("root".to_string()),
5009 env: vec!["PATH=/initial/path".to_string()],
5010 },
5011 mounts: Some(vec![DockerInspectMount {
5012 source: "/path/to/local/project".to_string(),
5013 destination: "/workspaces/project".to_string(),
5014 }]),
5015 state: None,
5016 });
5017 }
5018 if id.starts_with("rust_a-") {
5019 return Ok(DockerInspect {
5020 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
5021 .to_string(),
5022 config: DockerInspectConfig {
5023 labels: DockerConfigLabels {
5024 metadata: Some(vec![HashMap::from([(
5025 "remoteUser".to_string(),
5026 Value::String("vscode".to_string()),
5027 )])]),
5028 },
5029 image_user: Some("root".to_string()),
5030 env: Vec::new(),
5031 },
5032 mounts: None,
5033 state: None,
5034 });
5035 }
5036 if id == "test_image:latest" {
5037 return Ok(DockerInspect {
5038 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5039 .to_string(),
5040 config: DockerInspectConfig {
5041 labels: DockerConfigLabels {
5042 metadata: Some(vec![HashMap::from([(
5043 "remoteUser".to_string(),
5044 Value::String("node".to_string()),
5045 )])]),
5046 },
5047 env: Vec::new(),
5048 image_user: Some("root".to_string()),
5049 },
5050 mounts: None,
5051 state: None,
5052 });
5053 }
5054
5055 Err(DevContainerError::DockerNotAvailable)
5056 }
5057 async fn get_docker_compose_config(
5058 &self,
5059 config_files: &Vec<PathBuf>,
5060 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5061 let project_path = PathBuf::from(TEST_PROJECT_PATH);
5062 if config_files.len() == 1
5063 && config_files.get(0)
5064 == Some(
5065 &project_path
5066 .join(".devcontainer")
5067 .join("docker-compose.yml"),
5068 )
5069 {
5070 return Ok(Some(DockerComposeConfig {
5071 name: None,
5072 services: HashMap::from([
5073 (
5074 "app".to_string(),
5075 DockerComposeService {
5076 build: Some(DockerComposeServiceBuild {
5077 context: Some(".".to_string()),
5078 dockerfile: Some("Dockerfile".to_string()),
5079 args: None,
5080 additional_contexts: None,
5081 target: None,
5082 }),
5083 volumes: vec![MountDefinition {
5084 source: Some("../..".to_string()),
5085 target: "/workspaces".to_string(),
5086 mount_type: Some("bind".to_string()),
5087 }],
5088 network_mode: Some("service:db".to_string()),
5089 ..Default::default()
5090 },
5091 ),
5092 (
5093 "db".to_string(),
5094 DockerComposeService {
5095 image: Some("postgres:14.1".to_string()),
5096 volumes: vec![MountDefinition {
5097 source: Some("postgres-data".to_string()),
5098 target: "/var/lib/postgresql/data".to_string(),
5099 mount_type: Some("volume".to_string()),
5100 }],
5101 env_file: Some(vec![".env".to_string()]),
5102 ..Default::default()
5103 },
5104 ),
5105 ]),
5106 volumes: HashMap::from([(
5107 "postgres-data".to_string(),
5108 DockerComposeVolume::default(),
5109 )]),
5110 }));
5111 }
5112 if config_files.len() == 1
5113 && config_files.get(0)
5114 == Some(&PathBuf::from(
5115 "/path/to/local/project/.devcontainer/docker-compose-context-parent.yml",
5116 ))
5117 {
5118 return Ok(Some(DockerComposeConfig {
5119 name: None,
5120 services: HashMap::from([(
5121 "app".to_string(),
5122 DockerComposeService {
5123 build: Some(DockerComposeServiceBuild {
5124 context: Some("..".to_string()),
5125 dockerfile: Some(".devcontainer/Dockerfile".to_string()),
5126 args: None,
5127 additional_contexts: None,
5128 target: None,
5129 }),
5130 ..Default::default()
5131 },
5132 )]),
5133 volumes: HashMap::new(),
5134 }));
5135 }
5136 if config_files.len() == 1
5137 && config_files.get(0)
5138 == Some(&PathBuf::from(
5139 "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
5140 ))
5141 {
5142 return Ok(Some(DockerComposeConfig {
5143 name: None,
5144 services: HashMap::from([(
5145 "app".to_string(),
5146 DockerComposeService {
5147 image: Some("test_image:latest".to_string()),
5148 command: vec!["sleep".to_string(), "infinity".to_string()],
5149 ..Default::default()
5150 },
5151 )]),
5152 ..Default::default()
5153 }));
5154 }
5155 Err(DevContainerError::DockerNotAvailable)
5156 }
5157 async fn docker_compose_build(
5158 &self,
5159 _config_files: &Vec<PathBuf>,
5160 _project_name: &str,
5161 ) -> Result<(), DevContainerError> {
5162 Ok(())
5163 }
5164 async fn run_docker_exec(
5165 &self,
5166 container_id: &str,
5167 remote_folder: &str,
5168 user: &str,
5169 env: &HashMap<String, String>,
5170 inner_command: Command,
5171 ) -> Result<(), DevContainerError> {
5172 let mut record = self
5173 .exec_commands_recorded
5174 .lock()
5175 .expect("should be available");
5176 record.push(RecordedExecCommand {
5177 _container_id: container_id.to_string(),
5178 _remote_folder: remote_folder.to_string(),
5179 _user: user.to_string(),
5180 env: env.clone(),
5181 _inner_command: inner_command,
5182 });
5183 Ok(())
5184 }
5185 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5186 Err(DevContainerError::DockerNotAvailable)
5187 }
5188 async fn find_process_by_filters(
5189 &self,
5190 _filters: Vec<String>,
5191 ) -> Result<Option<DockerPs>, DevContainerError> {
5192 Ok(Some(DockerPs {
5193 id: "found_docker_ps".to_string(),
5194 }))
5195 }
5196 fn supports_compose_buildkit(&self) -> bool {
5197 !self.podman && self.has_buildx
5198 }
5199 fn docker_cli(&self) -> String {
5200 if self.podman {
5201 "podman".to_string()
5202 } else {
5203 "docker".to_string()
5204 }
5205 }
5206 }
5207
5208 #[derive(Debug, Clone)]
5209 pub(crate) struct TestCommand {
5210 pub(crate) program: String,
5211 pub(crate) args: Vec<String>,
5212 }
5213
5214 pub(crate) struct TestCommandRunner {
5215 commands_recorded: Mutex<Vec<TestCommand>>,
5216 }
5217
5218 impl TestCommandRunner {
5219 fn new() -> Self {
5220 Self {
5221 commands_recorded: Mutex::new(Vec::new()),
5222 }
5223 }
5224
5225 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5226 let record = self.commands_recorded.lock().expect("poisoned");
5227 record
5228 .iter()
5229 .filter(|r| r.program == program)
5230 .map(|r| r.clone())
5231 .collect()
5232 }
5233 }
5234
5235 #[async_trait]
5236 impl CommandRunner for TestCommandRunner {
5237 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5238 let mut record = self.commands_recorded.lock().expect("poisoned");
5239
5240 record.push(TestCommand {
5241 program: command.get_program().display().to_string(),
5242 args: command
5243 .get_args()
5244 .map(|a| a.display().to_string())
5245 .collect(),
5246 });
5247
5248 Ok(Output {
5249 status: ExitStatus::default(),
5250 stdout: vec![],
5251 stderr: vec![],
5252 })
5253 }
5254 }
5255
5256 fn fake_http_client() -> Arc<dyn HttpClient> {
5257 FakeHttpClient::create(|request| async move {
5258 let (parts, _body) = request.into_parts();
5259 if parts.uri.path() == "/token" {
5260 let token_response = TokenResponse {
5261 token: "token".to_string(),
5262 };
5263 return Ok(http::Response::builder()
5264 .status(200)
5265 .body(http_client::AsyncBody::from(
5266 serde_json_lenient::to_string(&token_response).unwrap(),
5267 ))
5268 .unwrap());
5269 }
5270
5271 // OCI specific things
5272 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5273 let response = r#"
5274 {
5275 "schemaVersion": 2,
5276 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5277 "config": {
5278 "mediaType": "application/vnd.devcontainers",
5279 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5280 "size": 2
5281 },
5282 "layers": [
5283 {
5284 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5285 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5286 "size": 59392,
5287 "annotations": {
5288 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5289 }
5290 }
5291 ],
5292 "annotations": {
5293 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5294 "com.github.package.type": "devcontainer_feature"
5295 }
5296 }
5297 "#;
5298 return Ok(http::Response::builder()
5299 .status(200)
5300 .body(http_client::AsyncBody::from(response))
5301 .unwrap());
5302 }
5303
5304 if parts.uri.path()
5305 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5306 {
5307 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5312 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5313 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5314 ```
5315 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5316 ```
5317 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5318
5319
5320 ## OS Support
5321
5322 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5323
5324 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5325
5326 `bash` is required to execute the `install.sh` script."#),
5327 ("./README.md", r#"
5328 # Docker (Docker-in-Docker) (docker-in-docker)
5329
5330 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5331
5332 ## Example Usage
5333
5334 ```json
5335 "features": {
5336 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5337 }
5338 ```
5339
5340 ## Options
5341
5342 | Options Id | Description | Type | Default Value |
5343 |-----|-----|-----|-----|
5344 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5345 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5346 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5347 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5348 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5349 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5350 | installDockerBuildx | Install Docker Buildx | boolean | true |
5351 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5352 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5353
5354 ## Customizations
5355
5356 ### VS Code Extensions
5357
5358 - `ms-azuretools.vscode-containers`
5359
5360 ## Limitations
5361
5362 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5363 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5364 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5365 ```
5366 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5367 ```
5368 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5369
5370
5371 ## OS Support
5372
5373 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5374
5375 `bash` is required to execute the `install.sh` script.
5376
5377
5378 ---
5379
5380 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
5381 ("./devcontainer-feature.json", r#"
5382 {
5383 "id": "docker-in-docker",
5384 "version": "2.16.1",
5385 "name": "Docker (Docker-in-Docker)",
5386 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5387 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5388 "options": {
5389 "version": {
5390 "type": "string",
5391 "proposals": [
5392 "latest",
5393 "none",
5394 "20.10"
5395 ],
5396 "default": "latest",
5397 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5398 },
5399 "moby": {
5400 "type": "boolean",
5401 "default": true,
5402 "description": "Install OSS Moby build instead of Docker CE"
5403 },
5404 "mobyBuildxVersion": {
5405 "type": "string",
5406 "default": "latest",
5407 "description": "Install a specific version of moby-buildx when using Moby"
5408 },
5409 "dockerDashComposeVersion": {
5410 "type": "string",
5411 "enum": [
5412 "none",
5413 "v1",
5414 "v2"
5415 ],
5416 "default": "v2",
5417 "description": "Default version of Docker Compose (v1, v2 or none)"
5418 },
5419 "azureDnsAutoDetection": {
5420 "type": "boolean",
5421 "default": true,
5422 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5423 },
5424 "dockerDefaultAddressPool": {
5425 "type": "string",
5426 "default": "",
5427 "proposals": [],
5428 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5429 },
5430 "installDockerBuildx": {
5431 "type": "boolean",
5432 "default": true,
5433 "description": "Install Docker Buildx"
5434 },
5435 "installDockerComposeSwitch": {
5436 "type": "boolean",
5437 "default": false,
5438 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5439 },
5440 "disableIp6tables": {
5441 "type": "boolean",
5442 "default": false,
5443 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5444 }
5445 },
5446 "entrypoint": "/usr/local/share/docker-init.sh",
5447 "privileged": true,
5448 "containerEnv": {
5449 "DOCKER_BUILDKIT": "1"
5450 },
5451 "customizations": {
5452 "vscode": {
5453 "extensions": [
5454 "ms-azuretools.vscode-containers"
5455 ],
5456 "settings": {
5457 "github.copilot.chat.codeGeneration.instructions": [
5458 {
5459 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5460 }
5461 ]
5462 }
5463 }
5464 },
5465 "mounts": [
5466 {
5467 "source": "dind-var-lib-docker-${devcontainerId}",
5468 "target": "/var/lib/docker",
5469 "type": "volume"
5470 }
5471 ],
5472 "installsAfter": [
5473 "ghcr.io/devcontainers/features/common-utils"
5474 ]
5475 }"#),
5476 ("./install.sh", r#"
5477 #!/usr/bin/env bash
5478 #-------------------------------------------------------------------------------------------------------------
5479 # Copyright (c) Microsoft Corporation. All rights reserved.
5480 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5481 #-------------------------------------------------------------------------------------------------------------
5482 #
5483 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5484 # Maintainer: The Dev Container spec maintainers
5485
5486
5487 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5488 USE_MOBY="${MOBY:-"true"}"
5489 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5490 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5491 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5492 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5493 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5494 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5495 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5496 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5497 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5498 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5499 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5500 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5501
5502 # Default: Exit on any failure.
5503 set -e
5504
5505 # Clean up
5506 rm -rf /var/lib/apt/lists/*
5507
5508 # Setup STDERR.
5509 err() {
5510 echo "(!) $*" >&2
5511 }
5512
5513 if [ "$(id -u)" -ne 0 ]; then
5514 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5515 exit 1
5516 fi
5517
5518 ###################
5519 # Helper Functions
5520 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5521 ###################
5522
5523 # Determine the appropriate non-root user
5524 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5525 USERNAME=""
5526 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5527 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5528 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5529 USERNAME=${CURRENT_USER}
5530 break
5531 fi
5532 done
5533 if [ "${USERNAME}" = "" ]; then
5534 USERNAME=root
5535 fi
5536 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5537 USERNAME=root
5538 fi
5539
5540 # Package manager update function
5541 pkg_mgr_update() {
5542 case ${ADJUSTED_ID} in
5543 debian)
5544 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5545 echo "Running apt-get update..."
5546 apt-get update -y
5547 fi
5548 ;;
5549 rhel)
5550 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5551 cache_check_dir="/var/cache/yum"
5552 else
5553 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5554 fi
5555 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5556 echo "Running ${PKG_MGR_CMD} makecache ..."
5557 ${PKG_MGR_CMD} makecache
5558 fi
5559 ;;
5560 esac
5561 }
5562
5563 # Checks if packages are installed and installs them if not
5564 check_packages() {
5565 case ${ADJUSTED_ID} in
5566 debian)
5567 if ! dpkg -s "$@" > /dev/null 2>&1; then
5568 pkg_mgr_update
5569 apt-get -y install --no-install-recommends "$@"
5570 fi
5571 ;;
5572 rhel)
5573 if ! rpm -q "$@" > /dev/null 2>&1; then
5574 pkg_mgr_update
5575 ${PKG_MGR_CMD} -y install "$@"
5576 fi
5577 ;;
5578 esac
5579 }
5580
5581 # Figure out correct version of a three part version number is not passed
5582 find_version_from_git_tags() {
5583 local variable_name=$1
5584 local requested_version=${!variable_name}
5585 if [ "${requested_version}" = "none" ]; then return; fi
5586 local repository=$2
5587 local prefix=${3:-"tags/v"}
5588 local separator=${4:-"."}
5589 local last_part_optional=${5:-"false"}
5590 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5591 local escaped_separator=${separator//./\\.}
5592 local last_part
5593 if [ "${last_part_optional}" = "true" ]; then
5594 last_part="(${escaped_separator}[0-9]+)?"
5595 else
5596 last_part="${escaped_separator}[0-9]+"
5597 fi
5598 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5599 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5600 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5601 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5602 else
5603 set +e
5604 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5605 set -e
5606 fi
5607 fi
5608 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5609 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5610 exit 1
5611 fi
5612 echo "${variable_name}=${!variable_name}"
5613 }
5614
5615 # Use semver logic to decrement a version number then look for the closest match
5616 find_prev_version_from_git_tags() {
5617 local variable_name=$1
5618 local current_version=${!variable_name}
5619 local repository=$2
5620 # Normally a "v" is used before the version number, but support alternate cases
5621 local prefix=${3:-"tags/v"}
5622 # Some repositories use "_" instead of "." for version number part separation, support that
5623 local separator=${4:-"."}
5624 # Some tools release versions that omit the last digit (e.g. go)
5625 local last_part_optional=${5:-"false"}
5626 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5627 local version_suffix_regex=$6
5628 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5629 set +e
5630 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5631 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5632 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5633
5634 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5635 ((major=major-1))
5636 declare -g ${variable_name}="${major}"
5637 # Look for latest version from previous major release
5638 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5639 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5640 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5641 ((minor=minor-1))
5642 declare -g ${variable_name}="${major}.${minor}"
5643 # Look for latest version from previous minor release
5644 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5645 else
5646 ((breakfix=breakfix-1))
5647 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5648 declare -g ${variable_name}="${major}.${minor}"
5649 else
5650 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5651 fi
5652 fi
5653 set -e
5654 }
5655
5656 # Function to fetch the version released prior to the latest version
5657 get_previous_version() {
5658 local url=$1
5659 local repo_url=$2
5660 local variable_name=$3
5661 prev_version=${!variable_name}
5662
5663 output=$(curl -s "$repo_url");
5664 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5665 message=$(echo "$output" | jq -r '.message')
5666
5667 if [[ $message == "API rate limit exceeded"* ]]; then
5668 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5669 echo -e "\nAttempting to find latest version using GitHub tags."
5670 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5671 declare -g ${variable_name}="${prev_version}"
5672 fi
5673 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5674 echo -e "\nAttempting to find latest version using GitHub Api."
5675 version=$(echo "$output" | jq -r '.[1].tag_name')
5676 declare -g ${variable_name}="${version#v}"
5677 fi
5678 echo "${variable_name}=${!variable_name}"
5679 }
5680
5681 get_github_api_repo_url() {
5682 local url=$1
5683 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5684 }
5685
5686 ###########################################
5687 # Start docker-in-docker installation
5688 ###########################################
5689
5690 # Ensure apt is in non-interactive to avoid prompts
5691 export DEBIAN_FRONTEND=noninteractive
5692
5693 # Source /etc/os-release to get OS info
5694 . /etc/os-release
5695
5696 # Determine adjusted ID and package manager
5697 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5698 ADJUSTED_ID="debian"
5699 PKG_MGR_CMD="apt-get"
5700 # Use dpkg for Debian-based systems
5701 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5702 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5703 ADJUSTED_ID="rhel"
5704 # Determine the appropriate package manager for RHEL-based systems
5705 for pkg_mgr in tdnf dnf microdnf yum; do
5706 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5707 PKG_MGR_CMD="$pkg_mgr"
5708 break
5709 fi
5710 done
5711
5712 if [ -z "${PKG_MGR_CMD}" ]; then
5713 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5714 exit 1
5715 fi
5716
5717 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5718 else
5719 err "Linux distro ${ID} not supported."
5720 exit 1
5721 fi
5722
5723 # Azure Linux specific setup
5724 if [ "${ID}" = "azurelinux" ]; then
5725 VERSION_CODENAME="azurelinux${VERSION_ID}"
5726 fi
5727
5728 # Prevent attempting to install Moby on Debian trixie (packages removed)
5729 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5730 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5731 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5732 exit 1
5733 fi
5734
5735 # Check if distro is supported
5736 if [ "${USE_MOBY}" = "true" ]; then
5737 if [ "${ADJUSTED_ID}" = "debian" ]; then
5738 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5739 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5740 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5741 exit 1
5742 fi
5743 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5744 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5745 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5746 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5747 else
5748 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5749 fi
5750 fi
5751 else
5752 if [ "${ADJUSTED_ID}" = "debian" ]; then
5753 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5754 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5755 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5756 exit 1
5757 fi
5758 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5759 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5760
5761 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5762 fi
5763 fi
5764
5765 # Install base dependencies
5766 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5767 case ${ADJUSTED_ID} in
5768 debian)
5769 check_packages apt-transport-https $base_packages dirmngr
5770 ;;
5771 rhel)
5772 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5773
5774 ;;
5775 esac
5776
5777 # Install git if not already present
5778 if ! command -v git >/dev/null 2>&1; then
5779 check_packages git
5780 fi
5781
5782 # Update CA certificates to ensure HTTPS connections work properly
5783 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5784 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5785 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5786 update-ca-certificates
5787 fi
5788
5789 # Swap to legacy iptables for compatibility (Debian only)
5790 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5791 update-alternatives --set iptables /usr/sbin/iptables-legacy
5792 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5793 fi
5794
5795 # Set up the necessary repositories
5796 if [ "${USE_MOBY}" = "true" ]; then
5797 # Name of open source engine/cli
5798 engine_package_name="moby-engine"
5799 cli_package_name="moby-cli"
5800
5801 case ${ADJUSTED_ID} in
5802 debian)
5803 # Import key safely and import Microsoft apt repo
5804 {
5805 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5806 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5807 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5808 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5809 ;;
5810 rhel)
5811 echo "(*) ${ID} detected - checking for Moby packages..."
5812
5813 # Check if moby packages are available in default repos
5814 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5815 echo "(*) Using built-in ${ID} Moby packages"
5816 else
5817 case "${ID}" in
5818 azurelinux)
5819 echo "(*) Moby packages not found in Azure Linux repositories"
5820 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5821 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5822 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5823 exit 1
5824 ;;
5825 mariner)
5826 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5827 # Add Microsoft repository if packages aren't available locally
5828 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5829 cat > /etc/yum.repos.d/microsoft.repo << EOF
5830 [microsoft]
5831 name=Microsoft Repository
5832 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5833 enabled=1
5834 gpgcheck=1
5835 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5836 EOF
5837 # Verify packages are available after adding repo
5838 pkg_mgr_update
5839 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5840 echo "(*) Moby packages not found in Microsoft repository either"
5841 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5842 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5843 exit 1
5844 fi
5845 ;;
5846 *)
5847 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5848 exit 1
5849 ;;
5850 esac
5851 fi
5852 ;;
5853 esac
5854 else
5855 # Name of licensed engine/cli
5856 engine_package_name="docker-ce"
5857 cli_package_name="docker-ce-cli"
5858 case ${ADJUSTED_ID} in
5859 debian)
5860 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5861 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5862 ;;
5863 rhel)
5864 # Docker CE repository setup for RHEL-based systems
5865 setup_docker_ce_repo() {
5866 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5867 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5868 [docker-ce-stable]
5869 name=Docker CE Stable
5870 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5871 enabled=1
5872 gpgcheck=1
5873 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5874 skip_if_unavailable=1
5875 module_hotfixes=1
5876 EOF
5877 }
5878 install_azure_linux_deps() {
5879 echo "(*) Installing device-mapper libraries for Docker CE..."
5880 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5881 echo "(*) Installing additional Docker CE dependencies..."
5882 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5883 echo "(*) Some optional dependencies could not be installed, continuing..."
5884 }
5885 }
5886 setup_selinux_context() {
5887 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5888 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5889 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5890 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5891 fi
5892 }
5893
5894 # Special handling for RHEL Docker CE installation
5895 case "${ID}" in
5896 azurelinux|mariner)
5897 echo "(*) ${ID} detected"
5898 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5899 echo "(*) Setting up Docker CE repository..."
5900
5901 setup_docker_ce_repo
5902 install_azure_linux_deps
5903
5904 if [ "${USE_MOBY}" != "true" ]; then
5905 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5906 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5907 setup_selinux_context
5908 else
5909 echo "(*) Using Moby - container-selinux not required"
5910 fi
5911 ;;
5912 *)
5913 # Standard RHEL/CentOS/Fedora approach
5914 if command -v dnf >/dev/null 2>&1; then
5915 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5916 elif command -v yum-config-manager >/dev/null 2>&1; then
5917 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5918 else
5919 # Manual fallback
5920 setup_docker_ce_repo
5921 fi
5922 ;;
5923 esac
5924 ;;
5925 esac
5926 fi
5927
5928 # Refresh package database
5929 case ${ADJUSTED_ID} in
5930 debian)
5931 apt-get update
5932 ;;
5933 rhel)
5934 pkg_mgr_update
5935 ;;
5936 esac
5937
5938 # Soft version matching
5939 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5940 # Empty, meaning grab whatever "latest" is in apt repo
5941 engine_version_suffix=""
5942 cli_version_suffix=""
5943 else
5944 case ${ADJUSTED_ID} in
5945 debian)
5946 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5947 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5948 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5949 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5950 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5951 set +e # Don't exit if finding version fails - will handle gracefully
5952 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5953 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5954 set -e
5955 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5956 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5957 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5958 exit 1
5959 fi
5960 ;;
5961 rhel)
5962 # For RHEL-based systems, use dnf/yum to find versions
5963 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5964 set +e # Don't exit if finding version fails - will handle gracefully
5965 if [ "${USE_MOBY}" = "true" ]; then
5966 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5967 else
5968 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5969 fi
5970 set -e
5971 if [ -n "${available_versions}" ]; then
5972 engine_version_suffix="-${available_versions}"
5973 cli_version_suffix="-${available_versions}"
5974 else
5975 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5976 engine_version_suffix=""
5977 cli_version_suffix=""
5978 fi
5979 ;;
5980 esac
5981 fi
5982
5983 # Version matching for moby-buildx
5984 if [ "${USE_MOBY}" = "true" ]; then
5985 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5986 # Empty, meaning grab whatever "latest" is in apt repo
5987 buildx_version_suffix=""
5988 else
5989 case ${ADJUSTED_ID} in
5990 debian)
5991 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5992 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5993 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5994 set +e
5995 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5996 set -e
5997 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5998 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5999 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6000 exit 1
6001 fi
6002 ;;
6003 rhel)
6004 # For RHEL-based systems, try to find buildx version or use latest
6005 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6006 set +e
6007 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
6008 set -e
6009 if [ -n "${available_buildx}" ]; then
6010 buildx_version_suffix="-${available_buildx}"
6011 else
6012 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
6013 buildx_version_suffix=""
6014 fi
6015 ;;
6016 esac
6017 echo "buildx_version_suffix ${buildx_version_suffix}"
6018 fi
6019 fi
6020
6021 # Install Docker / Moby CLI if not already installed
6022 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
6023 echo "Docker / Moby CLI and Engine already installed."
6024 else
6025 case ${ADJUSTED_ID} in
6026 debian)
6027 if [ "${USE_MOBY}" = "true" ]; then
6028 # Install engine
6029 set +e # Handle error gracefully
6030 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6031 exit_code=$?
6032 set -e
6033
6034 if [ ${exit_code} -ne 0 ]; then
6035 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6036 exit 1
6037 fi
6038
6039 # Install compose
6040 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6041 else
6042 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6043 # Install compose
6044 apt-mark hold docker-ce docker-ce-cli
6045 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6046 fi
6047 ;;
6048 rhel)
6049 if [ "${USE_MOBY}" = "true" ]; then
6050 set +e # Handle error gracefully
6051 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6052 exit_code=$?
6053 set -e
6054
6055 if [ ${exit_code} -ne 0 ]; then
6056 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6057 exit 1
6058 fi
6059
6060 # Install compose
6061 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6062 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6063 fi
6064 else
6065 # Special handling for Azure Linux Docker CE installation
6066 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6067 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6068
6069 # Use rpm with --force and --nodeps for Azure Linux
6070 set +e # Don't exit on error for this section
6071 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6072 install_result=$?
6073 set -e
6074
6075 if [ $install_result -ne 0 ]; then
6076 echo "(*) Standard installation failed, trying manual installation..."
6077
6078 echo "(*) Standard installation failed, trying manual installation..."
6079
6080 # Create directory for downloading packages
6081 mkdir -p /tmp/docker-ce-install
6082
6083 # Download packages manually using curl since tdnf doesn't support download
6084 echo "(*) Downloading Docker CE packages manually..."
6085
6086 # Get the repository baseurl
6087 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6088
6089 # Download packages directly
6090 cd /tmp/docker-ce-install
6091
6092 # Get package names with versions
6093 if [ -n "${cli_version_suffix}" ]; then
6094 docker_ce_version="${cli_version_suffix#-}"
6095 docker_cli_version="${engine_version_suffix#-}"
6096 else
6097 # Get latest version from repository
6098 docker_ce_version="latest"
6099 fi
6100
6101 echo "(*) Attempting to download Docker CE packages from repository..."
6102
6103 # Try to download latest packages if specific version fails
6104 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6105 # Fallback: try to get latest available version
6106 echo "(*) Specific version not found, trying latest..."
6107 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6108 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6109 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6110
6111 if [ -n "${latest_docker}" ]; then
6112 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6113 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6114 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6115 else
6116 echo "(*) ERROR: Could not find Docker CE packages in repository"
6117 echo "(*) Please check repository configuration or use 'moby': true"
6118 exit 1
6119 fi
6120 fi
6121 # Install systemd libraries required by Docker CE
6122 echo "(*) Installing systemd libraries required by Docker CE..."
6123 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6124 echo "(*) WARNING: Could not install systemd libraries"
6125 echo "(*) Docker may fail to start without these"
6126 }
6127
6128 # Install with rpm --force --nodeps
6129 echo "(*) Installing Docker CE packages with dependency override..."
6130 rpm -Uvh --force --nodeps *.rpm
6131
6132 # Cleanup
6133 cd /
6134 rm -rf /tmp/docker-ce-install
6135
6136 echo "(*) Docker CE installation completed with dependency bypass"
6137 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6138 fi
6139 else
6140 # Standard installation for other RHEL-based systems
6141 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6142 fi
6143 # Install compose
6144 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6145 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6146 fi
6147 fi
6148 ;;
6149 esac
6150 fi
6151
6152 echo "Finished installing docker / moby!"
6153
6154 docker_home="/usr/libexec/docker"
6155 cli_plugins_dir="${docker_home}/cli-plugins"
6156
6157 # fallback for docker-compose
6158 fallback_compose(){
6159 local url=$1
6160 local repo_url=$(get_github_api_repo_url "$url")
6161 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6162 get_previous_version "${url}" "${repo_url}" compose_version
6163 echo -e "\nAttempting to install v${compose_version}"
6164 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6165 }
6166
6167 # If 'docker-compose' command is to be included
6168 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6169 case "${architecture}" in
6170 amd64|x86_64) target_compose_arch=x86_64 ;;
6171 arm64|aarch64) target_compose_arch=aarch64 ;;
6172 *)
6173 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6174 exit 1
6175 esac
6176
6177 docker_compose_path="/usr/local/bin/docker-compose"
6178 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6179 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6180 INSTALL_DOCKER_COMPOSE_SWITCH="false"
6181
6182 if [ "${target_compose_arch}" = "x86_64" ]; then
6183 echo "(*) Installing docker compose v1..."
6184 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6185 chmod +x ${docker_compose_path}
6186
6187 # Download the SHA256 checksum
6188 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6189 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6190 sha256sum -c docker-compose.sha256sum --ignore-missing
6191 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6192 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6193 exit 1
6194 else
6195 # Use pip to get a version that runs on this architecture
6196 check_packages python3-minimal python3-pip libffi-dev python3-venv
6197 echo "(*) Installing docker compose v1 via pip..."
6198 export PYTHONUSERBASE=/usr/local
6199 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6200 fi
6201 else
6202 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6203 docker_compose_url="https://github.com/docker/compose"
6204 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6205 echo "(*) Installing docker-compose ${compose_version}..."
6206 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6207 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6208 fallback_compose "$docker_compose_url"
6209 }
6210
6211 chmod +x ${docker_compose_path}
6212
6213 # Download the SHA256 checksum
6214 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6215 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6216 sha256sum -c docker-compose.sha256sum --ignore-missing
6217
6218 mkdir -p ${cli_plugins_dir}
6219 cp ${docker_compose_path} ${cli_plugins_dir}
6220 fi
6221 fi
6222
6223 # fallback method for compose-switch
6224 fallback_compose-switch() {
6225 local url=$1
6226 local repo_url=$(get_github_api_repo_url "$url")
6227 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6228 get_previous_version "$url" "$repo_url" compose_switch_version
6229 echo -e "\nAttempting to install v${compose_switch_version}"
6230 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6231 }
6232 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6233 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6234 if type docker-compose > /dev/null 2>&1; then
6235 echo "(*) Installing compose-switch..."
6236 current_compose_path="$(command -v docker-compose)"
6237 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6238 compose_switch_version="latest"
6239 compose_switch_url="https://github.com/docker/compose-switch"
6240 # Try to get latest version, fallback to known stable version if GitHub API fails
6241 set +e
6242 find_version_from_git_tags compose_switch_version "$compose_switch_url"
6243 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6244 echo "(*) GitHub API rate limited or failed, using fallback method"
6245 fallback_compose-switch "$compose_switch_url"
6246 fi
6247 set -e
6248
6249 # Map architecture for compose-switch downloads
6250 case "${architecture}" in
6251 amd64|x86_64) target_switch_arch=amd64 ;;
6252 arm64|aarch64) target_switch_arch=arm64 ;;
6253 *) target_switch_arch=${architecture} ;;
6254 esac
6255 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6256 chmod +x /usr/local/bin/compose-switch
6257 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6258 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6259 mv "${current_compose_path}" "${target_compose_path}"
6260 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6261 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6262 else
6263 err "Skipping installation of compose-switch as docker compose is unavailable..."
6264 fi
6265 fi
6266
6267 # If init file already exists, exit
6268 if [ -f "/usr/local/share/docker-init.sh" ]; then
6269 echo "/usr/local/share/docker-init.sh already exists, so exiting."
6270 # Clean up
6271 rm -rf /var/lib/apt/lists/*
6272 exit 0
6273 fi
6274 echo "docker-init doesn't exist, adding..."
6275
6276 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6277 groupadd -r docker
6278 fi
6279
6280 usermod -aG docker ${USERNAME}
6281
6282 # fallback for docker/buildx
6283 fallback_buildx() {
6284 local url=$1
6285 local repo_url=$(get_github_api_repo_url "$url")
6286 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6287 get_previous_version "$url" "$repo_url" buildx_version
6288 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6289 echo -e "\nAttempting to install v${buildx_version}"
6290 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6291 }
6292
6293 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6294 buildx_version="latest"
6295 docker_buildx_url="https://github.com/docker/buildx"
6296 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6297 echo "(*) Installing buildx ${buildx_version}..."
6298
6299 # Map architecture for buildx downloads
6300 case "${architecture}" in
6301 amd64|x86_64) target_buildx_arch=amd64 ;;
6302 arm64|aarch64) target_buildx_arch=arm64 ;;
6303 *) target_buildx_arch=${architecture} ;;
6304 esac
6305
6306 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6307
6308 cd /tmp
6309 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6310
6311 docker_home="/usr/libexec/docker"
6312 cli_plugins_dir="${docker_home}/cli-plugins"
6313
6314 mkdir -p ${cli_plugins_dir}
6315 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6316 chmod +x ${cli_plugins_dir}/docker-buildx
6317
6318 chown -R "${USERNAME}:docker" "${docker_home}"
6319 chmod -R g+r+w "${docker_home}"
6320 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6321 fi
6322
6323 DOCKER_DEFAULT_IP6_TABLES=""
6324 if [ "$DISABLE_IP6_TABLES" == true ]; then
6325 requested_version=""
6326 # checking whether the version requested either is in semver format or just a number denoting the major version
6327 # and, extracting the major version number out of the two scenarios
6328 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6329 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6330 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6331 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6332 requested_version=$DOCKER_VERSION
6333 fi
6334 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6335 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6336 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6337 fi
6338 fi
6339
6340 if [ ! -d /usr/local/share ]; then
6341 mkdir -p /usr/local/share
6342 fi
6343
6344 tee /usr/local/share/docker-init.sh > /dev/null \
6345 << EOF
6346 #!/bin/sh
6347 #-------------------------------------------------------------------------------------------------------------
6348 # Copyright (c) Microsoft Corporation. All rights reserved.
6349 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6350 #-------------------------------------------------------------------------------------------------------------
6351
6352 set -e
6353
6354 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6355 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6356 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6357 EOF
6358
6359 tee -a /usr/local/share/docker-init.sh > /dev/null \
6360 << 'EOF'
6361 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6362 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6363 find /run /var/run -iname 'docker*.pid' -delete || :
6364 find /run /var/run -iname 'container*.pid' -delete || :
6365
6366 # -- Start: dind wrapper script --
6367 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6368
6369 export container=docker
6370
6371 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6372 mount -t securityfs none /sys/kernel/security || {
6373 echo >&2 'Could not mount /sys/kernel/security.'
6374 echo >&2 'AppArmor detection and --privileged mode might break.'
6375 }
6376 fi
6377
6378 # Mount /tmp (conditionally)
6379 if ! mountpoint -q /tmp; then
6380 mount -t tmpfs none /tmp
6381 fi
6382
6383 set_cgroup_nesting()
6384 {
6385 # cgroup v2: enable nesting
6386 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6387 # move the processes from the root group to the /init group,
6388 # otherwise writing subtree_control fails with EBUSY.
6389 # An error during moving non-existent process (i.e., "cat") is ignored.
6390 mkdir -p /sys/fs/cgroup/init
6391 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6392 # enable controllers
6393 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6394 > /sys/fs/cgroup/cgroup.subtree_control
6395 fi
6396 }
6397
6398 # Set cgroup nesting, retrying if necessary
6399 retry_cgroup_nesting=0
6400
6401 until [ "${retry_cgroup_nesting}" -eq "5" ];
6402 do
6403 set +e
6404 set_cgroup_nesting
6405
6406 if [ $? -ne 0 ]; then
6407 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6408 else
6409 break
6410 fi
6411
6412 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6413 set -e
6414 done
6415
6416 # -- End: dind wrapper script --
6417
6418 # Handle DNS
6419 set +e
6420 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6421 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6422 then
6423 echo "Setting dockerd Azure DNS."
6424 CUSTOMDNS="--dns 168.63.129.16"
6425 else
6426 echo "Not setting dockerd DNS manually."
6427 CUSTOMDNS=""
6428 fi
6429 set -e
6430
6431 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6432 then
6433 DEFAULT_ADDRESS_POOL=""
6434 else
6435 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6436 fi
6437
6438 # Start docker/moby engine
6439 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6440 INNEREOF
6441 )"
6442
6443 sudo_if() {
6444 COMMAND="$*"
6445
6446 if [ "$(id -u)" -ne 0 ]; then
6447 sudo $COMMAND
6448 else
6449 $COMMAND
6450 fi
6451 }
6452
6453 retry_docker_start_count=0
6454 docker_ok="false"
6455
6456 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6457 do
6458 # Start using sudo if not invoked as root
6459 if [ "$(id -u)" -ne 0 ]; then
6460 sudo /bin/sh -c "${dockerd_start}"
6461 else
6462 eval "${dockerd_start}"
6463 fi
6464
6465 retry_count=0
6466 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6467 do
6468 sleep 1s
6469 set +e
6470 docker info > /dev/null 2>&1 && docker_ok="true"
6471 set -e
6472
6473 retry_count=`expr $retry_count + 1`
6474 done
6475
6476 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6477 echo "(*) Failed to start docker, retrying..."
6478 set +e
6479 sudo_if pkill dockerd
6480 sudo_if pkill containerd
6481 set -e
6482 fi
6483
6484 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6485 done
6486
6487 # Execute whatever commands were passed in (if any). This allows us
6488 # to set this script to ENTRYPOINT while still executing the default CMD.
6489 exec "$@"
6490 EOF
6491
6492 chmod +x /usr/local/share/docker-init.sh
6493 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6494
6495 # Clean up
6496 rm -rf /var/lib/apt/lists/*
6497
6498 echo 'docker-in-docker-debian script has completed!'"#),
6499 ]).await;
6500
6501 return Ok(http::Response::builder()
6502 .status(200)
6503 .body(AsyncBody::from(response))
6504 .unwrap());
6505 }
6506 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6507 let response = r#"
6508 {
6509 "schemaVersion": 2,
6510 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6511 "config": {
6512 "mediaType": "application/vnd.devcontainers",
6513 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6514 "size": 2
6515 },
6516 "layers": [
6517 {
6518 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6519 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6520 "size": 20992,
6521 "annotations": {
6522 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6523 }
6524 }
6525 ],
6526 "annotations": {
6527 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6528 "com.github.package.type": "devcontainer_feature"
6529 }
6530 }
6531 "#;
6532
6533 return Ok(http::Response::builder()
6534 .status(200)
6535 .body(http_client::AsyncBody::from(response))
6536 .unwrap());
6537 }
6538 if parts.uri.path()
6539 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6540 {
6541 let response = build_tarball(vec![
6542 ("./devcontainer-feature.json", r#"
6543 {
6544 "id": "go",
6545 "version": "1.3.3",
6546 "name": "Go",
6547 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6548 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6549 "options": {
6550 "version": {
6551 "type": "string",
6552 "proposals": [
6553 "latest",
6554 "none",
6555 "1.24",
6556 "1.23"
6557 ],
6558 "default": "latest",
6559 "description": "Select or enter a Go version to install"
6560 },
6561 "golangciLintVersion": {
6562 "type": "string",
6563 "default": "latest",
6564 "description": "Version of golangci-lint to install"
6565 }
6566 },
6567 "init": true,
6568 "customizations": {
6569 "vscode": {
6570 "extensions": [
6571 "golang.Go"
6572 ],
6573 "settings": {
6574 "github.copilot.chat.codeGeneration.instructions": [
6575 {
6576 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6577 }
6578 ]
6579 }
6580 }
6581 },
6582 "containerEnv": {
6583 "GOROOT": "/usr/local/go",
6584 "GOPATH": "/go",
6585 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6586 },
6587 "capAdd": [
6588 "SYS_PTRACE"
6589 ],
6590 "securityOpt": [
6591 "seccomp=unconfined"
6592 ],
6593 "installsAfter": [
6594 "ghcr.io/devcontainers/features/common-utils"
6595 ]
6596 }
6597 "#),
6598 ("./install.sh", r#"
6599 #!/usr/bin/env bash
6600 #-------------------------------------------------------------------------------------------------------------
6601 # Copyright (c) Microsoft Corporation. All rights reserved.
6602 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6603 #-------------------------------------------------------------------------------------------------------------
6604 #
6605 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6606 # Maintainer: The VS Code and Codespaces Teams
6607
6608 TARGET_GO_VERSION="${VERSION:-"latest"}"
6609 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6610
6611 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6612 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6613 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6614 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6615
6616 # https://www.google.com/linuxrepositories/
6617 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6618
6619 set -e
6620
6621 if [ "$(id -u)" -ne 0 ]; then
6622 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6623 exit 1
6624 fi
6625
6626 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6627 . /etc/os-release
6628 # Get an adjusted ID independent of distro variants
6629 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6630 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6631 ADJUSTED_ID="debian"
6632 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6633 ADJUSTED_ID="rhel"
6634 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6635 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6636 else
6637 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6638 fi
6639 else
6640 echo "Linux distro ${ID} not supported."
6641 exit 1
6642 fi
6643
6644 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6645 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6646 # Update the repo files to reference vault.centos.org.
6647 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6648 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6649 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6650 fi
6651
6652 # Setup INSTALL_CMD & PKG_MGR_CMD
6653 if type apt-get > /dev/null 2>&1; then
6654 PKG_MGR_CMD=apt-get
6655 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6656 elif type microdnf > /dev/null 2>&1; then
6657 PKG_MGR_CMD=microdnf
6658 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6659 elif type dnf > /dev/null 2>&1; then
6660 PKG_MGR_CMD=dnf
6661 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6662 else
6663 PKG_MGR_CMD=yum
6664 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6665 fi
6666
6667 # Clean up
6668 clean_up() {
6669 case ${ADJUSTED_ID} in
6670 debian)
6671 rm -rf /var/lib/apt/lists/*
6672 ;;
6673 rhel)
6674 rm -rf /var/cache/dnf/* /var/cache/yum/*
6675 rm -rf /tmp/yum.log
6676 rm -rf ${GPG_INSTALL_PATH}
6677 ;;
6678 esac
6679 }
6680 clean_up
6681
6682
6683 # Figure out correct version of a three part version number is not passed
6684 find_version_from_git_tags() {
6685 local variable_name=$1
6686 local requested_version=${!variable_name}
6687 if [ "${requested_version}" = "none" ]; then return; fi
6688 local repository=$2
6689 local prefix=${3:-"tags/v"}
6690 local separator=${4:-"."}
6691 local last_part_optional=${5:-"false"}
6692 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6693 local escaped_separator=${separator//./\\.}
6694 local last_part
6695 if [ "${last_part_optional}" = "true" ]; then
6696 last_part="(${escaped_separator}[0-9]+)?"
6697 else
6698 last_part="${escaped_separator}[0-9]+"
6699 fi
6700 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6701 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6702 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6703 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6704 else
6705 set +e
6706 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6707 set -e
6708 fi
6709 fi
6710 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6711 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6712 exit 1
6713 fi
6714 echo "${variable_name}=${!variable_name}"
6715 }
6716
6717 pkg_mgr_update() {
6718 case $ADJUSTED_ID in
6719 debian)
6720 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6721 echo "Running apt-get update..."
6722 ${PKG_MGR_CMD} update -y
6723 fi
6724 ;;
6725 rhel)
6726 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6727 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6728 echo "Running ${PKG_MGR_CMD} makecache ..."
6729 ${PKG_MGR_CMD} makecache
6730 fi
6731 else
6732 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6733 echo "Running ${PKG_MGR_CMD} check-update ..."
6734 set +e
6735 ${PKG_MGR_CMD} check-update
6736 rc=$?
6737 if [ $rc != 0 ] && [ $rc != 100 ]; then
6738 exit 1
6739 fi
6740 set -e
6741 fi
6742 fi
6743 ;;
6744 esac
6745 }
6746
6747 # Checks if packages are installed and installs them if not
6748 check_packages() {
6749 case ${ADJUSTED_ID} in
6750 debian)
6751 if ! dpkg -s "$@" > /dev/null 2>&1; then
6752 pkg_mgr_update
6753 ${INSTALL_CMD} "$@"
6754 fi
6755 ;;
6756 rhel)
6757 if ! rpm -q "$@" > /dev/null 2>&1; then
6758 pkg_mgr_update
6759 ${INSTALL_CMD} "$@"
6760 fi
6761 ;;
6762 esac
6763 }
6764
6765 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6766 rm -f /etc/profile.d/00-restore-env.sh
6767 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6768 chmod +x /etc/profile.d/00-restore-env.sh
6769
6770 # Some distributions do not install awk by default (e.g. Mariner)
6771 if ! type awk >/dev/null 2>&1; then
6772 check_packages awk
6773 fi
6774
6775 # Determine the appropriate non-root user
6776 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6777 USERNAME=""
6778 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6779 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6780 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6781 USERNAME=${CURRENT_USER}
6782 break
6783 fi
6784 done
6785 if [ "${USERNAME}" = "" ]; then
6786 USERNAME=root
6787 fi
6788 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6789 USERNAME=root
6790 fi
6791
6792 export DEBIAN_FRONTEND=noninteractive
6793
6794 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6795
6796 if [ $ADJUSTED_ID = "debian" ]; then
6797 check_packages g++ libc6-dev
6798 else
6799 check_packages gcc-c++ glibc-devel
6800 fi
6801 # Install curl, git, other dependencies if missing
6802 if ! type curl > /dev/null 2>&1; then
6803 check_packages curl
6804 fi
6805 if ! type git > /dev/null 2>&1; then
6806 check_packages git
6807 fi
6808 # Some systems, e.g. Mariner, still a few more packages
6809 if ! type as > /dev/null 2>&1; then
6810 check_packages binutils
6811 fi
6812 if ! [ -f /usr/include/linux/errno.h ]; then
6813 check_packages kernel-headers
6814 fi
6815 # Minimal RHEL install may need findutils installed
6816 if ! [ -f /usr/bin/find ]; then
6817 check_packages findutils
6818 fi
6819
6820 # Get closest match for version number specified
6821 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6822
6823 architecture="$(uname -m)"
6824 case $architecture in
6825 x86_64) architecture="amd64";;
6826 aarch64 | armv8*) architecture="arm64";;
6827 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6828 i?86) architecture="386";;
6829 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6830 esac
6831
6832 # Install Go
6833 umask 0002
6834 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6835 groupadd -r golang
6836 fi
6837 usermod -a -G golang "${USERNAME}"
6838 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6839
6840 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6841 # Use a temporary location for gpg keys to avoid polluting image
6842 export GNUPGHOME="/tmp/tmp-gnupg"
6843 mkdir -p ${GNUPGHOME}
6844 chmod 700 ${GNUPGHOME}
6845 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6846 gpg -q --import /tmp/tmp-gnupg/golang_key
6847 echo "Downloading Go ${TARGET_GO_VERSION}..."
6848 set +e
6849 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6850 exit_code=$?
6851 set -e
6852 if [ "$exit_code" != "0" ]; then
6853 echo "(!) Download failed."
6854 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6855 set +e
6856 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6857 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6858 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6859 # Handle Go's odd version pattern where "0" releases omit the last part
6860 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6861 ((minor=minor-1))
6862 TARGET_GO_VERSION="${major}.${minor}"
6863 # Look for latest version from previous minor release
6864 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6865 else
6866 ((breakfix=breakfix-1))
6867 if [ "${breakfix}" = "0" ]; then
6868 TARGET_GO_VERSION="${major}.${minor}"
6869 else
6870 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6871 fi
6872 fi
6873 set -e
6874 echo "Trying ${TARGET_GO_VERSION}..."
6875 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6876 fi
6877 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6878 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6879 echo "Extracting Go ${TARGET_GO_VERSION}..."
6880 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6881 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6882 else
6883 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6884 fi
6885
6886 # Install Go tools that are isImportant && !replacedByGopls based on
6887 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6888 GO_TOOLS="\
6889 golang.org/x/tools/gopls@latest \
6890 honnef.co/go/tools/cmd/staticcheck@latest \
6891 golang.org/x/lint/golint@latest \
6892 github.com/mgechev/revive@latest \
6893 github.com/go-delve/delve/cmd/dlv@latest \
6894 github.com/fatih/gomodifytags@latest \
6895 github.com/haya14busa/goplay/cmd/goplay@latest \
6896 github.com/cweill/gotests/gotests@latest \
6897 github.com/josharian/impl@latest"
6898
6899 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6900 echo "Installing common Go tools..."
6901 export PATH=${TARGET_GOROOT}/bin:${PATH}
6902 export GOPATH=/tmp/gotools
6903 export GOCACHE="${GOPATH}/cache"
6904
6905 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6906 cd "${GOPATH}"
6907
6908 # Use go get for versions of go under 1.16
6909 go_install_command=install
6910 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6911 export GO111MODULE=on
6912 go_install_command=get
6913 echo "Go version < 1.16, using go get."
6914 fi
6915
6916 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6917
6918 # Move Go tools into path
6919 if [ -d "${GOPATH}/bin" ]; then
6920 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6921 fi
6922
6923 # Install golangci-lint from precompiled binaries
6924 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6925 echo "Installing golangci-lint latest..."
6926 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6927 sh -s -- -b "${TARGET_GOPATH}/bin"
6928 else
6929 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6930 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6931 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6932 fi
6933
6934 # Remove Go tools temp directory
6935 rm -rf "${GOPATH}"
6936 fi
6937
6938
6939 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6940 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6941 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6942 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6943
6944 # Clean up
6945 clean_up
6946
6947 echo "Done!"
6948 "#),
6949 ])
6950 .await;
6951 return Ok(http::Response::builder()
6952 .status(200)
6953 .body(AsyncBody::from(response))
6954 .unwrap());
6955 }
6956 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6957 let response = r#"
6958 {
6959 "schemaVersion": 2,
6960 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6961 "config": {
6962 "mediaType": "application/vnd.devcontainers",
6963 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6964 "size": 2
6965 },
6966 "layers": [
6967 {
6968 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6969 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6970 "size": 19968,
6971 "annotations": {
6972 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6973 }
6974 }
6975 ],
6976 "annotations": {
6977 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6978 "com.github.package.type": "devcontainer_feature"
6979 }
6980 }"#;
6981 return Ok(http::Response::builder()
6982 .status(200)
6983 .body(AsyncBody::from(response))
6984 .unwrap());
6985 }
6986 if parts.uri.path()
6987 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6988 {
6989 let response = build_tarball(vec![
6990 (
6991 "./devcontainer-feature.json",
6992 r#"
6993{
6994 "id": "aws-cli",
6995 "version": "1.1.3",
6996 "name": "AWS CLI",
6997 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6998 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6999 "options": {
7000 "version": {
7001 "type": "string",
7002 "proposals": [
7003 "latest"
7004 ],
7005 "default": "latest",
7006 "description": "Select or enter an AWS CLI version."
7007 },
7008 "verbose": {
7009 "type": "boolean",
7010 "default": true,
7011 "description": "Suppress verbose output."
7012 }
7013 },
7014 "customizations": {
7015 "vscode": {
7016 "extensions": [
7017 "AmazonWebServices.aws-toolkit-vscode"
7018 ],
7019 "settings": {
7020 "github.copilot.chat.codeGeneration.instructions": [
7021 {
7022 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
7023 }
7024 ]
7025 }
7026 }
7027 },
7028 "installsAfter": [
7029 "ghcr.io/devcontainers/features/common-utils"
7030 ]
7031}
7032 "#,
7033 ),
7034 (
7035 "./install.sh",
7036 r#"#!/usr/bin/env bash
7037 #-------------------------------------------------------------------------------------------------------------
7038 # Copyright (c) Microsoft Corporation. All rights reserved.
7039 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7040 #-------------------------------------------------------------------------------------------------------------
7041 #
7042 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7043 # Maintainer: The VS Code and Codespaces Teams
7044
7045 set -e
7046
7047 # Clean up
7048 rm -rf /var/lib/apt/lists/*
7049
7050 VERSION=${VERSION:-"latest"}
7051 VERBOSE=${VERBOSE:-"true"}
7052
7053 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7054 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7055
7056 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7057 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7058 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7059 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7060 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7061 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7062 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7063 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7064 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7065 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7066 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7067 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7068 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7069 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7070 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7071 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7072 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7073 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7074 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7075 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7076 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7077 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7078 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7079 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7080 YLZATHZKTJyiqA==
7081 =vYOk
7082 -----END PGP PUBLIC KEY BLOCK-----"
7083
7084 if [ "$(id -u)" -ne 0 ]; then
7085 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7086 exit 1
7087 fi
7088
7089 apt_get_update()
7090 {
7091 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7092 echo "Running apt-get update..."
7093 apt-get update -y
7094 fi
7095 }
7096
7097 # Checks if packages are installed and installs them if not
7098 check_packages() {
7099 if ! dpkg -s "$@" > /dev/null 2>&1; then
7100 apt_get_update
7101 apt-get -y install --no-install-recommends "$@"
7102 fi
7103 }
7104
7105 export DEBIAN_FRONTEND=noninteractive
7106
7107 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7108
7109 verify_aws_cli_gpg_signature() {
7110 local filePath=$1
7111 local sigFilePath=$2
7112 local awsGpgKeyring=aws-cli-public-key.gpg
7113
7114 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7115 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7116 local status=$?
7117
7118 rm "./${awsGpgKeyring}"
7119
7120 return ${status}
7121 }
7122
7123 install() {
7124 local scriptZipFile=awscli.zip
7125 local scriptSigFile=awscli.sig
7126
7127 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7128 if [ "${VERSION}" != "latest" ]; then
7129 local versionStr=-${VERSION}
7130 fi
7131 architecture=$(dpkg --print-architecture)
7132 case "${architecture}" in
7133 amd64) architectureStr=x86_64 ;;
7134 arm64) architectureStr=aarch64 ;;
7135 *)
7136 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7137 exit 1
7138 esac
7139 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7140 curl "${scriptUrl}" -o "${scriptZipFile}"
7141 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7142
7143 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7144 if (( $? > 0 )); then
7145 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7146 exit 1
7147 fi
7148
7149 if [ "${VERBOSE}" = "false" ]; then
7150 unzip -q "${scriptZipFile}"
7151 else
7152 unzip "${scriptZipFile}"
7153 fi
7154
7155 ./aws/install
7156
7157 # kubectl bash completion
7158 mkdir -p /etc/bash_completion.d
7159 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7160
7161 # kubectl zsh completion
7162 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7163 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7164 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7165 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7166 fi
7167
7168 rm -rf ./aws
7169 }
7170
7171 echo "(*) Installing AWS CLI..."
7172
7173 install
7174
7175 # Clean up
7176 rm -rf /var/lib/apt/lists/*
7177
7178 echo "Done!""#,
7179 ),
7180 ("./scripts/", r#""#),
7181 (
7182 "./scripts/fetch-latest-completer-scripts.sh",
7183 r#"
7184 #!/bin/bash
7185 #-------------------------------------------------------------------------------------------------------------
7186 # Copyright (c) Microsoft Corporation. All rights reserved.
7187 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7188 #-------------------------------------------------------------------------------------------------------------
7189 #
7190 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7191 # Maintainer: The Dev Container spec maintainers
7192 #
7193 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7194 #
7195 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7196 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7197 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7198
7199 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7200 chmod +x "$BASH_COMPLETER_SCRIPT"
7201
7202 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7203 chmod +x "$ZSH_COMPLETER_SCRIPT"
7204 "#,
7205 ),
7206 ("./scripts/vendor/", r#""#),
7207 (
7208 "./scripts/vendor/aws_bash_completer",
7209 r#"
7210 # Typically that would be added under one of the following paths:
7211 # - /etc/bash_completion.d
7212 # - /usr/local/etc/bash_completion.d
7213 # - /usr/share/bash-completion/completions
7214
7215 complete -C aws_completer aws
7216 "#,
7217 ),
7218 (
7219 "./scripts/vendor/aws_zsh_completer.sh",
7220 r#"
7221 # Source this file to activate auto completion for zsh using the bash
7222 # compatibility helper. Make sure to run `compinit` before, which should be
7223 # given usually.
7224 #
7225 # % source /path/to/zsh_complete.sh
7226 #
7227 # Typically that would be called somewhere in your .zshrc.
7228 #
7229 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7230 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7231 #
7232 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7233 #
7234 # zsh releases prior to that version do not export the required env variables!
7235
7236 autoload -Uz bashcompinit
7237 bashcompinit -i
7238
7239 _bash_complete() {
7240 local ret=1
7241 local -a suf matches
7242 local -x COMP_POINT COMP_CWORD
7243 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7244 local -x COMP_LINE="$words"
7245 local -A savejobstates savejobtexts
7246
7247 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7248 (( COMP_CWORD = CURRENT - 1))
7249 COMP_WORDS=( $words )
7250 BASH_VERSINFO=( 2 05b 0 1 release )
7251
7252 savejobstates=( ${(kv)jobstates} )
7253 savejobtexts=( ${(kv)jobtexts} )
7254
7255 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7256
7257 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7258
7259 if [[ -n $matches ]]; then
7260 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7261 compset -P '*/' && matches=( ${matches##*/} )
7262 compset -S '/*' && matches=( ${matches%%/*} )
7263 compadd -Q -f "${suf[@]}" -a matches && ret=0
7264 else
7265 compadd -Q "${suf[@]}" -a matches && ret=0
7266 fi
7267 fi
7268
7269 if (( ret )); then
7270 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7271 _default "${suf[@]}" && ret=0
7272 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7273 _directories "${suf[@]}" && ret=0
7274 fi
7275 fi
7276
7277 return ret
7278 }
7279
7280 complete -C aws_completer aws
7281 "#,
7282 ),
7283 ]).await;
7284
7285 return Ok(http::Response::builder()
7286 .status(200)
7287 .body(AsyncBody::from(response))
7288 .unwrap());
7289 }
7290
7291 Ok(http::Response::builder()
7292 .status(404)
7293 .body(http_client::AsyncBody::default())
7294 .unwrap())
7295 })
7296 }
7297}