1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use regex::Regex;
10
11use fs::Fs;
12use http_client::HttpClient;
13use util::{ResultExt, command::Command, normalize_path};
14
15use crate::{
16 DevContainerConfig, DevContainerContext,
17 command_json::{CommandRunner, DefaultCommandRunner},
18 devcontainer_api::{DevContainerError, DevContainerUp},
19 devcontainer_json::{
20 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
21 deserialize_devcontainer_json,
22 },
23 docker::{
24 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
25 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
26 },
27 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
28 get_oci_token,
29 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
30 safe_id_lower,
31};
32
33enum ConfigStatus {
34 Deserialized(DevContainer),
35 VariableParsed(DevContainer),
36}
37
38#[derive(Debug, Clone, Eq, PartialEq, Default)]
39pub(crate) struct DockerComposeResources {
40 files: Vec<PathBuf>,
41 config: DockerComposeConfig,
42}
43
44struct DevContainerManifest {
45 http_client: Arc<dyn HttpClient>,
46 fs: Arc<dyn Fs>,
47 docker_client: Arc<dyn DockerClient>,
48 command_runner: Arc<dyn CommandRunner>,
49 raw_config: String,
50 config: ConfigStatus,
51 local_environment: HashMap<String, String>,
52 local_project_directory: PathBuf,
53 config_directory: PathBuf,
54 file_name: String,
55 root_image: Option<DockerInspect>,
56 features_build_info: Option<FeaturesBuildInfo>,
57 features: Vec<FeatureManifest>,
58}
59const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
60impl DevContainerManifest {
61 async fn new(
62 context: &DevContainerContext,
63 environment: HashMap<String, String>,
64 docker_client: Arc<dyn DockerClient>,
65 command_runner: Arc<dyn CommandRunner>,
66 local_config: DevContainerConfig,
67 local_project_path: &Path,
68 ) -> Result<Self, DevContainerError> {
69 let config_path = local_project_path.join(local_config.config_path.clone());
70 log::debug!("parsing devcontainer json found in {:?}", &config_path);
71 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
72 log::error!("Unable to read devcontainer contents: {e}");
73 DevContainerError::DevContainerParseFailed
74 })?;
75
76 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
77
78 let devcontainer_directory = config_path.parent().ok_or_else(|| {
79 log::error!("Dev container file should be in a directory");
80 DevContainerError::NotInValidProject
81 })?;
82 let file_name = config_path
83 .file_name()
84 .and_then(|f| f.to_str())
85 .ok_or_else(|| {
86 log::error!("Dev container file has no file name, or is invalid unicode");
87 DevContainerError::DevContainerParseFailed
88 })?;
89
90 Ok(Self {
91 fs: context.fs.clone(),
92 http_client: context.http_client.clone(),
93 docker_client,
94 command_runner,
95 raw_config: devcontainer_contents,
96 config: ConfigStatus::Deserialized(devcontainer),
97 local_project_directory: local_project_path.to_path_buf(),
98 local_environment: environment,
99 config_directory: devcontainer_directory.to_path_buf(),
100 file_name: file_name.to_string(),
101 root_image: None,
102 features_build_info: None,
103 features: Vec::new(),
104 })
105 }
106
107 fn devcontainer_id(&self) -> String {
108 let mut labels = self.identifying_labels();
109 labels.sort_by_key(|(key, _)| *key);
110
111 let mut hasher = DefaultHasher::new();
112 for (key, value) in &labels {
113 key.hash(&mut hasher);
114 value.hash(&mut hasher);
115 }
116
117 format!("{:016x}", hasher.finish())
118 }
119
120 fn identifying_labels(&self) -> Vec<(&str, String)> {
121 let labels = vec![
122 (
123 "devcontainer.local_folder",
124 (self.local_project_directory.display()).to_string(),
125 ),
126 (
127 "devcontainer.config_file",
128 (self.config_file().display()).to_string(),
129 ),
130 ];
131 labels
132 }
133
134 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
135 let mut replaced_content = content
136 .replace("${devcontainerId}", &self.devcontainer_id())
137 .replace(
138 "${containerWorkspaceFolderBasename}",
139 &self.remote_workspace_base_name().unwrap_or_default(),
140 )
141 .replace(
142 "${localWorkspaceFolderBasename}",
143 &self.local_workspace_base_name()?,
144 )
145 .replace(
146 "${containerWorkspaceFolder}",
147 &self
148 .remote_workspace_folder()
149 .map(|path| path.display().to_string())
150 .unwrap_or_default()
151 .replace('\\', "/"),
152 )
153 .replace(
154 "${localWorkspaceFolder}",
155 &self.local_workspace_folder().replace('\\', "/"),
156 );
157 for (k, v) in &self.local_environment {
158 let find = format!("${{localEnv:{k}}}");
159 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
160 }
161
162 Ok(replaced_content)
163 }
164
165 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
166 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
167 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
168
169 self.config = ConfigStatus::VariableParsed(parsed_config);
170
171 Ok(())
172 }
173
174 fn runtime_remote_env(
175 &self,
176 container_env: &HashMap<String, String>,
177 ) -> Result<HashMap<String, String>, DevContainerError> {
178 let mut merged_remote_env = container_env.clone();
179 // HOME is user-specific, and we will often not run as the image user
180 merged_remote_env.remove("HOME");
181 if let Some(remote_env) = self.dev_container().remote_env.clone() {
182 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
183 log::error!(
184 "Unexpected error serializing dev container remote_env: {e} - {:?}",
185 remote_env
186 );
187 DevContainerError::DevContainerParseFailed
188 })?;
189 for (k, v) in container_env {
190 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
191 }
192 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
193 .map_err(|e| {
194 log::error!(
195 "Unexpected error reserializing dev container remote env: {e} - {:?}",
196 &raw
197 );
198 DevContainerError::DevContainerParseFailed
199 })?;
200 for (k, v) in reserialized {
201 merged_remote_env.insert(k, v);
202 }
203 }
204 Ok(merged_remote_env)
205 }
206
207 fn config_file(&self) -> PathBuf {
208 self.config_directory.join(&self.file_name)
209 }
210
211 fn dev_container(&self) -> &DevContainer {
212 match &self.config {
213 ConfigStatus::Deserialized(dev_container) => dev_container,
214 ConfigStatus::VariableParsed(dev_container) => dev_container,
215 }
216 }
217
218 async fn dockerfile_location(&self) -> Option<PathBuf> {
219 let dev_container = self.dev_container();
220 match dev_container.build_type() {
221 DevContainerBuildType::Image(_) => None,
222 DevContainerBuildType::Dockerfile(build) => {
223 Some(self.config_directory.join(&build.dockerfile))
224 }
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service.build.and_then(|b| {
234 let compose_file = docker_compose_manifest.files.first()?;
235 resolve_compose_dockerfile(
236 compose_file,
237 b.context.as_deref(),
238 b.dockerfile.as_deref()?,
239 )
240 })
241 }
242 DevContainerBuildType::None => None,
243 }
244 }
245
246 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
247 let mut hasher = DefaultHasher::new();
248 let prefix = match &self.dev_container().name {
249 Some(name) => &safe_id_lower(name),
250 None => "zed-dc",
251 };
252 let prefix = prefix.get(..6).unwrap_or(prefix);
253
254 dockerfile_build_path.hash(&mut hasher);
255
256 let hash = hasher.finish();
257 format!("{}-{:x}-features", prefix, hash)
258 }
259
260 /// Gets the base image from the devcontainer with the following precedence:
261 /// - The devcontainer image if an image is specified
262 /// - The image sourced in the Dockerfile if a Dockerfile is specified
263 /// - The image sourced in the docker-compose main service, if one is specified
264 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
265 /// If no such image is available, return an error
266 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
267 match self.dev_container().build_type() {
268 DevContainerBuildType::Image(image) => {
269 return Ok(image);
270 }
271 DevContainerBuildType::Dockerfile(build) => {
272 let dockerfile_contents = self.expanded_dockerfile_content().await?;
273 return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
274 || {
275 log::error!("Unable to find base image in Dockerfile");
276 DevContainerError::DevContainerParseFailed
277 },
278 );
279 }
280 DevContainerBuildType::DockerCompose => {
281 let docker_compose_manifest = self.docker_compose_manifest().await?;
282 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
283
284 if let Some(_) = main_service
285 .build
286 .as_ref()
287 .and_then(|b| b.dockerfile.as_ref())
288 {
289 let dockerfile_contents = self.expanded_dockerfile_content().await?;
290 return image_from_dockerfile(
291 dockerfile_contents,
292 &main_service.build.as_ref().and_then(|b| b.target.clone()),
293 )
294 .ok_or_else(|| {
295 log::error!("Unable to find base image in Dockerfile");
296 DevContainerError::DevContainerParseFailed
297 });
298 }
299 if let Some(image) = &main_service.image {
300 return Ok(image.to_string());
301 }
302
303 log::error!("No valid base image found in docker-compose configuration");
304 return Err(DevContainerError::DevContainerParseFailed);
305 }
306 DevContainerBuildType::None => {
307 log::error!("Not a valid devcontainer config for build");
308 return Err(DevContainerError::NotInValidProject);
309 }
310 }
311 }
312
313 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
314 let dev_container = match &self.config {
315 ConfigStatus::Deserialized(_) => {
316 log::error!(
317 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
318 );
319 return Err(DevContainerError::DevContainerParseFailed);
320 }
321 ConfigStatus::VariableParsed(dev_container) => dev_container,
322 };
323 let root_image_tag = self.get_base_image_from_config().await?;
324 let root_image = self.docker_client.inspect(&root_image_tag).await?;
325
326 let temp_base = std::env::temp_dir().join("devcontainer-zed");
327 let timestamp = std::time::SystemTime::now()
328 .duration_since(std::time::UNIX_EPOCH)
329 .map(|d| d.as_millis())
330 .unwrap_or(0);
331
332 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
333 let empty_context_dir = temp_base.join("empty-folder");
334
335 self.fs
336 .create_dir(&features_content_dir)
337 .await
338 .map_err(|e| {
339 log::error!("Failed to create features content dir: {e}");
340 DevContainerError::FilesystemError
341 })?;
342
343 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
344 log::error!("Failed to create empty context dir: {e}");
345 DevContainerError::FilesystemError
346 })?;
347
348 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
349 let image_tag =
350 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
351
352 let build_info = FeaturesBuildInfo {
353 dockerfile_path,
354 features_content_dir,
355 empty_context_dir,
356 build_image: dev_container.image.clone(),
357 image_tag,
358 };
359
360 let features = match &dev_container.features {
361 Some(features) => features,
362 None => &HashMap::new(),
363 };
364
365 let container_user = get_container_user_from_config(&root_image, self)?;
366 let remote_user = get_remote_user_from_config(&root_image, self)?;
367
368 let builtin_env_content = format!(
369 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
370 container_user, remote_user
371 );
372
373 let builtin_env_path = build_info
374 .features_content_dir
375 .join("devcontainer-features.builtin.env");
376
377 self.fs
378 .write(&builtin_env_path, &builtin_env_content.as_bytes())
379 .await
380 .map_err(|e| {
381 log::error!("Failed to write builtin env file: {e}");
382 DevContainerError::FilesystemError
383 })?;
384
385 let ordered_features =
386 resolve_feature_order(features, &dev_container.override_feature_install_order);
387
388 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
389 if matches!(options, FeatureOptions::Bool(false)) {
390 log::debug!(
391 "Feature '{}' is disabled (set to false), skipping",
392 feature_ref
393 );
394 continue;
395 }
396
397 let feature_id = extract_feature_id(feature_ref);
398 let consecutive_id = format!("{}_{}", feature_id, index);
399 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
400
401 self.fs.create_dir(&feature_dir).await.map_err(|e| {
402 log::error!(
403 "Failed to create feature directory for {}: {e}",
404 feature_ref
405 );
406 DevContainerError::FilesystemError
407 })?;
408
409 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
410 log::error!(
411 "Feature '{}' is not a supported OCI feature reference",
412 feature_ref
413 );
414 DevContainerError::DevContainerParseFailed
415 })?;
416 let TokenResponse { token } =
417 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
418 .await
419 .map_err(|e| {
420 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
421 DevContainerError::ResourceFetchFailed
422 })?;
423 let manifest = get_oci_manifest(
424 &oci_ref.registry,
425 &oci_ref.path,
426 &token,
427 &self.http_client,
428 &oci_ref.version,
429 None,
430 )
431 .await
432 .map_err(|e| {
433 log::error!(
434 "Failed to fetch OCI manifest for feature '{}': {e}",
435 feature_ref
436 );
437 DevContainerError::ResourceFetchFailed
438 })?;
439 let digest = &manifest
440 .layers
441 .first()
442 .ok_or_else(|| {
443 log::error!(
444 "OCI manifest for feature '{}' contains no layers",
445 feature_ref
446 );
447 DevContainerError::ResourceFetchFailed
448 })?
449 .digest;
450 download_oci_tarball(
451 &token,
452 &oci_ref.registry,
453 &oci_ref.path,
454 digest,
455 "application/vnd.devcontainers.layer.v1+tar",
456 &feature_dir,
457 &self.http_client,
458 &self.fs,
459 None,
460 )
461 .await?;
462
463 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
464 if !self.fs.is_file(feature_json_path).await {
465 let message = format!(
466 "No devcontainer-feature.json found in {:?}, no defaults to apply",
467 feature_json_path
468 );
469 log::error!("{}", &message);
470 return Err(DevContainerError::ResourceFetchFailed);
471 }
472
473 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
474 log::error!("error reading devcontainer-feature.json: {:?}", e);
475 DevContainerError::FilesystemError
476 })?;
477
478 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
479
480 let feature_json: DevContainerFeatureJson =
481 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
482 log::error!("Failed to parse devcontainer-feature.json: {e}");
483 DevContainerError::ResourceFetchFailed
484 })?;
485
486 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
487
488 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
489
490 let env_content = feature_manifest
491 .write_feature_env(&self.fs, options)
492 .await?;
493
494 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
495
496 self.fs
497 .write(
498 &feature_manifest
499 .file_path()
500 .join("devcontainer-features-install.sh"),
501 &wrapper_content.as_bytes(),
502 )
503 .await
504 .map_err(|e| {
505 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
506 DevContainerError::FilesystemError
507 })?;
508
509 self.features.push(feature_manifest);
510 }
511
512 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
513
514 let is_compose = match dev_container.build_type() {
515 DevContainerBuildType::DockerCompose => true,
516 _ => false,
517 };
518 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
519
520 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
521 self.fs.load(location).await.log_err()
522 } else {
523 None
524 };
525
526 let build_target = if is_compose {
527 find_primary_service(&self.docker_compose_manifest().await?, self)?
528 .1
529 .build
530 .and_then(|b| b.target)
531 } else {
532 dev_container.build.as_ref().and_then(|b| b.target.clone())
533 };
534
535 let dockerfile_content = dockerfile_base_content
536 .map(|content| {
537 dockerfile_inject_alias(
538 &content,
539 "dev_container_auto_added_stage_label",
540 build_target,
541 )
542 })
543 .unwrap_or_default();
544
545 let dockerfile_content = self.generate_dockerfile_extended(
546 &container_user,
547 &remote_user,
548 dockerfile_content,
549 use_buildkit,
550 );
551
552 self.fs
553 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
554 .await
555 .map_err(|e| {
556 log::error!("Failed to write Dockerfile.extended: {e}");
557 DevContainerError::FilesystemError
558 })?;
559
560 log::debug!(
561 "Features build resources written to {:?}",
562 build_info.features_content_dir
563 );
564
565 self.root_image = Some(root_image);
566 self.features_build_info = Some(build_info);
567
568 Ok(())
569 }
570
571 fn generate_dockerfile_extended(
572 &self,
573 container_user: &str,
574 remote_user: &str,
575 dockerfile_content: String,
576 use_buildkit: bool,
577 ) -> String {
578 #[cfg(not(target_os = "windows"))]
579 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
580 #[cfg(target_os = "windows")]
581 let update_remote_user_uid = false;
582 let feature_layers: String = self
583 .features
584 .iter()
585 .map(|manifest| {
586 manifest.generate_dockerfile_feature_layer(
587 use_buildkit,
588 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
589 )
590 })
591 .collect();
592
593 let container_home_cmd = get_ent_passwd_shell_command(container_user);
594 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
595
596 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
597
598 let feature_content_source_stage = if use_buildkit {
599 "".to_string()
600 } else {
601 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
602 .to_string()
603 };
604
605 let builtin_env_source_path = if use_buildkit {
606 "./devcontainer-features.builtin.env"
607 } else {
608 "/tmp/build-features/devcontainer-features.builtin.env"
609 };
610
611 let mut extended_dockerfile = format!(
612 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
613
614{dockerfile_content}
615{feature_content_source_stage}
616FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
617USER root
618COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
619RUN chmod -R 0755 /tmp/build-features/
620
621FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
622
623USER root
624
625RUN mkdir -p {dest}
626COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
627
628RUN \
629echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
630echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
631
632{feature_layers}
633
634ARG _DEV_CONTAINERS_IMAGE_USER=root
635USER $_DEV_CONTAINERS_IMAGE_USER
636"#
637 );
638
639 // If we're not adding a uid update layer, then we should add env vars to this layer instead
640 if !update_remote_user_uid {
641 extended_dockerfile = format!(
642 r#"{extended_dockerfile}
643# Ensure that /etc/profile does not clobber the existing path
644RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
645"#
646 );
647
648 for feature in &self.features {
649 let container_env_layer = feature.generate_dockerfile_env();
650 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
651 }
652
653 if let Some(env) = &self.dev_container().container_env {
654 for (key, value) in env {
655 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
656 }
657 }
658 }
659
660 extended_dockerfile
661 }
662
663 fn build_merged_resources(
664 &self,
665 base_image: DockerInspect,
666 ) -> Result<DockerBuildResources, DevContainerError> {
667 let dev_container = match &self.config {
668 ConfigStatus::Deserialized(_) => {
669 log::error!(
670 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
671 );
672 return Err(DevContainerError::DevContainerParseFailed);
673 }
674 ConfigStatus::VariableParsed(dev_container) => dev_container,
675 };
676 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
677
678 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
679
680 mounts.append(&mut feature_mounts);
681
682 let privileged = dev_container.privileged.unwrap_or(false)
683 || self.features.iter().any(|f| f.privileged());
684
685 let mut entrypoint_script_lines = vec![
686 "echo Container started".to_string(),
687 "trap \"exit 0\" 15".to_string(),
688 ];
689
690 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
691 entrypoint_script_lines.push(entrypoint.clone());
692 }
693 entrypoint_script_lines.append(&mut vec![
694 "exec \"$@\"".to_string(),
695 "while sleep 1 & wait $!; do :; done".to_string(),
696 ]);
697
698 Ok(DockerBuildResources {
699 image: base_image,
700 additional_mounts: mounts,
701 privileged,
702 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
703 })
704 }
705
706 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
707 if let ConfigStatus::Deserialized(_) = &self.config {
708 log::error!(
709 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
710 );
711 return Err(DevContainerError::DevContainerParseFailed);
712 }
713 let dev_container = self.dev_container();
714 match dev_container.build_type() {
715 DevContainerBuildType::Image(base_image) => {
716 let built_docker_image = self.build_docker_image().await?;
717
718 let built_docker_image = self
719 .update_remote_user_uid(built_docker_image, &base_image)
720 .await?;
721
722 let resources = self.build_merged_resources(built_docker_image)?;
723 Ok(DevContainerBuildResources::Docker(resources))
724 }
725 DevContainerBuildType::Dockerfile(_) => {
726 let built_docker_image = self.build_docker_image().await?;
727 let Some(features_build_info) = &self.features_build_info else {
728 log::error!(
729 "Can't attempt to build update UID dockerfile before initial docker build"
730 );
731 return Err(DevContainerError::DevContainerParseFailed);
732 };
733 let built_docker_image = self
734 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
735 .await?;
736
737 let resources = self.build_merged_resources(built_docker_image)?;
738 Ok(DevContainerBuildResources::Docker(resources))
739 }
740 DevContainerBuildType::DockerCompose => {
741 log::debug!("Using docker compose. Building extended compose files");
742 let docker_compose_resources = self.build_and_extend_compose_files().await?;
743
744 return Ok(DevContainerBuildResources::DockerCompose(
745 docker_compose_resources,
746 ));
747 }
748 DevContainerBuildType::None => {
749 return Err(DevContainerError::DevContainerParseFailed);
750 }
751 }
752 }
753
754 async fn run_dev_container(
755 &self,
756 build_resources: DevContainerBuildResources,
757 ) -> Result<DevContainerUp, DevContainerError> {
758 let ConfigStatus::VariableParsed(_) = &self.config else {
759 log::error!(
760 "Variables have not been parsed; cannot proceed with running the dev container"
761 );
762 return Err(DevContainerError::DevContainerParseFailed);
763 };
764 let running_container = match build_resources {
765 DevContainerBuildResources::DockerCompose(resources) => {
766 self.run_docker_compose(resources).await?
767 }
768 DevContainerBuildResources::Docker(resources) => {
769 self.run_docker_image(resources).await?
770 }
771 };
772
773 let remote_user = get_remote_user_from_config(&running_container, self)?;
774 let remote_workspace_folder = self.remote_workspace_folder()?;
775
776 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
777
778 Ok(DevContainerUp {
779 container_id: running_container.id,
780 remote_user,
781 remote_workspace_folder: remote_workspace_folder.display().to_string(),
782 extension_ids: self.extension_ids(),
783 remote_env,
784 })
785 }
786
787 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
788 let dev_container = match &self.config {
789 ConfigStatus::Deserialized(_) => {
790 log::error!(
791 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
792 );
793 return Err(DevContainerError::DevContainerParseFailed);
794 }
795 ConfigStatus::VariableParsed(dev_container) => dev_container,
796 };
797 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
798 return Err(DevContainerError::DevContainerParseFailed);
799 };
800 let docker_compose_full_paths = docker_compose_files
801 .iter()
802 .map(|relative| self.config_directory.join(relative))
803 .collect::<Vec<PathBuf>>();
804
805 let Some(config) = self
806 .docker_client
807 .get_docker_compose_config(&docker_compose_full_paths)
808 .await?
809 else {
810 log::error!("Output could not deserialize into DockerComposeConfig");
811 return Err(DevContainerError::DevContainerParseFailed);
812 };
813 Ok(DockerComposeResources {
814 files: docker_compose_full_paths,
815 config,
816 })
817 }
818
819 async fn build_and_extend_compose_files(
820 &self,
821 ) -> Result<DockerComposeResources, DevContainerError> {
822 let dev_container = match &self.config {
823 ConfigStatus::Deserialized(_) => {
824 log::error!(
825 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
826 );
827 return Err(DevContainerError::DevContainerParseFailed);
828 }
829 ConfigStatus::VariableParsed(dev_container) => dev_container,
830 };
831
832 let Some(features_build_info) = &self.features_build_info else {
833 log::error!(
834 "Cannot build and extend compose files: features build info is not yet constructed"
835 );
836 return Err(DevContainerError::DevContainerParseFailed);
837 };
838 let mut docker_compose_resources = self.docker_compose_manifest().await?;
839 let supports_buildkit = self.docker_client.supports_compose_buildkit();
840
841 let (main_service_name, main_service) =
842 find_primary_service(&docker_compose_resources, self)?;
843 let (built_service_image, built_service_image_tag) = if main_service
844 .build
845 .as_ref()
846 .map(|b| b.dockerfile.as_ref())
847 .is_some()
848 {
849 if !supports_buildkit {
850 self.build_feature_content_image().await?;
851 }
852
853 let dockerfile_path = &features_build_info.dockerfile_path;
854
855 let build_args = if !supports_buildkit {
856 HashMap::from([
857 (
858 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
859 "dev_container_auto_added_stage_label".to_string(),
860 ),
861 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
862 ])
863 } else {
864 HashMap::from([
865 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
866 (
867 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
868 "dev_container_auto_added_stage_label".to_string(),
869 ),
870 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
871 ])
872 };
873
874 let additional_contexts = if !supports_buildkit {
875 None
876 } else {
877 Some(HashMap::from([(
878 "dev_containers_feature_content_source".to_string(),
879 features_build_info
880 .features_content_dir
881 .display()
882 .to_string(),
883 )]))
884 };
885
886 let build_override = DockerComposeConfig {
887 name: None,
888 services: HashMap::from([(
889 main_service_name.clone(),
890 DockerComposeService {
891 image: Some(features_build_info.image_tag.clone()),
892 entrypoint: None,
893 cap_add: None,
894 security_opt: None,
895 labels: None,
896 build: Some(DockerComposeServiceBuild {
897 context: Some(
898 main_service
899 .build
900 .as_ref()
901 .and_then(|b| b.context.clone())
902 .unwrap_or_else(|| {
903 features_build_info.empty_context_dir.display().to_string()
904 }),
905 ),
906 dockerfile: Some(dockerfile_path.display().to_string()),
907 target: Some("dev_containers_target_stage".to_string()),
908 args: Some(build_args),
909 additional_contexts,
910 }),
911 volumes: Vec::new(),
912 ..Default::default()
913 },
914 )]),
915 volumes: HashMap::new(),
916 };
917
918 let temp_base = std::env::temp_dir().join("devcontainer-zed");
919 let config_location = temp_base.join("docker_compose_build.json");
920
921 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
922 log::error!("Error serializing docker compose runtime override: {e}");
923 DevContainerError::DevContainerParseFailed
924 })?;
925
926 self.fs
927 .write(&config_location, config_json.as_bytes())
928 .await
929 .map_err(|e| {
930 log::error!("Error writing the runtime override file: {e}");
931 DevContainerError::FilesystemError
932 })?;
933
934 docker_compose_resources.files.push(config_location);
935
936 self.docker_client
937 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
938 .await?;
939 (
940 self.docker_client
941 .inspect(&features_build_info.image_tag)
942 .await?,
943 &features_build_info.image_tag,
944 )
945 } else if let Some(image) = &main_service.image {
946 if dev_container
947 .features
948 .as_ref()
949 .is_none_or(|features| features.is_empty())
950 {
951 (self.docker_client.inspect(image).await?, image)
952 } else {
953 if !supports_buildkit {
954 self.build_feature_content_image().await?;
955 }
956
957 let dockerfile_path = &features_build_info.dockerfile_path;
958
959 let build_args = if !supports_buildkit {
960 HashMap::from([
961 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
962 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
963 ])
964 } else {
965 HashMap::from([
966 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
967 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
968 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
969 ])
970 };
971
972 let additional_contexts = if !supports_buildkit {
973 None
974 } else {
975 Some(HashMap::from([(
976 "dev_containers_feature_content_source".to_string(),
977 features_build_info
978 .features_content_dir
979 .display()
980 .to_string(),
981 )]))
982 };
983
984 let build_override = DockerComposeConfig {
985 name: None,
986 services: HashMap::from([(
987 main_service_name.clone(),
988 DockerComposeService {
989 image: Some(features_build_info.image_tag.clone()),
990 entrypoint: None,
991 cap_add: None,
992 security_opt: None,
993 labels: None,
994 build: Some(DockerComposeServiceBuild {
995 context: Some(
996 features_build_info.empty_context_dir.display().to_string(),
997 ),
998 dockerfile: Some(dockerfile_path.display().to_string()),
999 target: Some("dev_containers_target_stage".to_string()),
1000 args: Some(build_args),
1001 additional_contexts,
1002 }),
1003 volumes: Vec::new(),
1004 ..Default::default()
1005 },
1006 )]),
1007 volumes: HashMap::new(),
1008 };
1009
1010 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1011 let config_location = temp_base.join("docker_compose_build.json");
1012
1013 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1014 log::error!("Error serializing docker compose runtime override: {e}");
1015 DevContainerError::DevContainerParseFailed
1016 })?;
1017
1018 self.fs
1019 .write(&config_location, config_json.as_bytes())
1020 .await
1021 .map_err(|e| {
1022 log::error!("Error writing the runtime override file: {e}");
1023 DevContainerError::FilesystemError
1024 })?;
1025
1026 docker_compose_resources.files.push(config_location);
1027
1028 self.docker_client
1029 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1030 .await?;
1031
1032 (
1033 self.docker_client
1034 .inspect(&features_build_info.image_tag)
1035 .await?,
1036 &features_build_info.image_tag,
1037 )
1038 }
1039 } else {
1040 log::error!("Docker compose must have either image or dockerfile defined");
1041 return Err(DevContainerError::DevContainerParseFailed);
1042 };
1043
1044 let built_service_image = self
1045 .update_remote_user_uid(built_service_image, built_service_image_tag)
1046 .await?;
1047
1048 let resources = self.build_merged_resources(built_service_image)?;
1049
1050 let network_mode = main_service.network_mode.as_ref();
1051 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1052 let runtime_override_file = self
1053 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1054 .await?;
1055
1056 docker_compose_resources.files.push(runtime_override_file);
1057
1058 Ok(docker_compose_resources)
1059 }
1060
1061 async fn write_runtime_override_file(
1062 &self,
1063 main_service_name: &str,
1064 network_mode_service: Option<&str>,
1065 resources: DockerBuildResources,
1066 ) -> Result<PathBuf, DevContainerError> {
1067 let config =
1068 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1069 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1070 let config_location = temp_base.join("docker_compose_runtime.json");
1071
1072 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1073 log::error!("Error serializing docker compose runtime override: {e}");
1074 DevContainerError::DevContainerParseFailed
1075 })?;
1076
1077 self.fs
1078 .write(&config_location, config_json.as_bytes())
1079 .await
1080 .map_err(|e| {
1081 log::error!("Error writing the runtime override file: {e}");
1082 DevContainerError::FilesystemError
1083 })?;
1084
1085 Ok(config_location)
1086 }
1087
1088 fn build_runtime_override(
1089 &self,
1090 main_service_name: &str,
1091 network_mode_service: Option<&str>,
1092 resources: DockerBuildResources,
1093 ) -> Result<DockerComposeConfig, DevContainerError> {
1094 let mut runtime_labels = HashMap::new();
1095
1096 if let Some(metadata) = &resources.image.config.labels.metadata {
1097 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1098 log::error!("Error serializing docker image metadata: {e}");
1099 DevContainerError::ContainerNotValid(resources.image.id.clone())
1100 })?;
1101
1102 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1103 }
1104
1105 for (k, v) in self.identifying_labels() {
1106 runtime_labels.insert(k.to_string(), v.to_string());
1107 }
1108
1109 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1110 .additional_mounts
1111 .iter()
1112 .filter_map(|mount| {
1113 if let Some(mount_type) = &mount.mount_type
1114 && mount_type.to_lowercase() == "volume"
1115 && let Some(source) = &mount.source
1116 {
1117 Some((
1118 source.clone(),
1119 DockerComposeVolume {
1120 name: source.clone(),
1121 },
1122 ))
1123 } else {
1124 None
1125 }
1126 })
1127 .collect();
1128
1129 let volumes: Vec<MountDefinition> = resources
1130 .additional_mounts
1131 .iter()
1132 .map(|v| MountDefinition {
1133 source: v.source.clone(),
1134 target: v.target.clone(),
1135 mount_type: v.mount_type.clone(),
1136 })
1137 .collect();
1138
1139 let mut main_service = DockerComposeService {
1140 entrypoint: Some(vec![
1141 "/bin/sh".to_string(),
1142 "-c".to_string(),
1143 resources.entrypoint_script,
1144 "-".to_string(),
1145 ]),
1146 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1147 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1148 labels: Some(runtime_labels),
1149 volumes,
1150 privileged: Some(resources.privileged),
1151 ..Default::default()
1152 };
1153 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1154 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1155 if let Some(forward_ports) = &self.dev_container().forward_ports {
1156 let main_service_ports: Vec<String> = forward_ports
1157 .iter()
1158 .filter_map(|f| match f {
1159 ForwardPort::Number(port) => Some(port.to_string()),
1160 ForwardPort::String(port) => {
1161 let parts: Vec<&str> = port.split(":").collect();
1162 if parts.len() <= 1 {
1163 Some(port.to_string())
1164 } else if parts.len() == 2 {
1165 if parts[0] == main_service_name {
1166 Some(parts[1].to_string())
1167 } else {
1168 None
1169 }
1170 } else {
1171 None
1172 }
1173 }
1174 })
1175 .collect();
1176 for port in main_service_ports {
1177 // If the main service uses a different service's network bridge, append to that service's ports instead
1178 if let Some(network_service_name) = network_mode_service {
1179 if let Some(service) = service_declarations.get_mut(network_service_name) {
1180 service.ports.push(DockerComposeServicePort {
1181 target: port.clone(),
1182 published: port.clone(),
1183 ..Default::default()
1184 });
1185 } else {
1186 service_declarations.insert(
1187 network_service_name.to_string(),
1188 DockerComposeService {
1189 ports: vec![DockerComposeServicePort {
1190 target: port.clone(),
1191 published: port.clone(),
1192 ..Default::default()
1193 }],
1194 ..Default::default()
1195 },
1196 );
1197 }
1198 } else {
1199 main_service.ports.push(DockerComposeServicePort {
1200 target: port.clone(),
1201 published: port.clone(),
1202 ..Default::default()
1203 });
1204 }
1205 }
1206 let other_service_ports: Vec<(&str, &str)> = forward_ports
1207 .iter()
1208 .filter_map(|f| match f {
1209 ForwardPort::Number(_) => None,
1210 ForwardPort::String(port) => {
1211 let parts: Vec<&str> = port.split(":").collect();
1212 if parts.len() != 2 {
1213 None
1214 } else {
1215 if parts[0] == main_service_name {
1216 None
1217 } else {
1218 Some((parts[0], parts[1]))
1219 }
1220 }
1221 }
1222 })
1223 .collect();
1224 for (service_name, port) in other_service_ports {
1225 if let Some(service) = service_declarations.get_mut(service_name) {
1226 service.ports.push(DockerComposeServicePort {
1227 target: port.to_string(),
1228 published: port.to_string(),
1229 ..Default::default()
1230 });
1231 } else {
1232 service_declarations.insert(
1233 service_name.to_string(),
1234 DockerComposeService {
1235 ports: vec![DockerComposeServicePort {
1236 target: port.to_string(),
1237 published: port.to_string(),
1238 ..Default::default()
1239 }],
1240 ..Default::default()
1241 },
1242 );
1243 }
1244 }
1245 }
1246
1247 service_declarations.insert(main_service_name.to_string(), main_service);
1248 let new_docker_compose_config = DockerComposeConfig {
1249 name: None,
1250 services: service_declarations,
1251 volumes: config_volumes,
1252 };
1253
1254 Ok(new_docker_compose_config)
1255 }
1256
1257 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1258 let dev_container = match &self.config {
1259 ConfigStatus::Deserialized(_) => {
1260 log::error!(
1261 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1262 );
1263 return Err(DevContainerError::DevContainerParseFailed);
1264 }
1265 ConfigStatus::VariableParsed(dev_container) => dev_container,
1266 };
1267
1268 match dev_container.build_type() {
1269 DevContainerBuildType::Image(image_tag) => {
1270 let base_image = self.docker_client.inspect(&image_tag).await?;
1271 if dev_container
1272 .features
1273 .as_ref()
1274 .is_none_or(|features| features.is_empty())
1275 {
1276 log::debug!("No features to add. Using base image");
1277 return Ok(base_image);
1278 }
1279 }
1280 DevContainerBuildType::Dockerfile(_) => {}
1281 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1282 return Err(DevContainerError::DevContainerParseFailed);
1283 }
1284 };
1285
1286 let mut command = self.create_docker_build()?;
1287
1288 let output = self
1289 .command_runner
1290 .run_command(&mut command)
1291 .await
1292 .map_err(|e| {
1293 log::error!("Error building docker image: {e}");
1294 DevContainerError::CommandFailed(command.get_program().display().to_string())
1295 })?;
1296
1297 if !output.status.success() {
1298 let stderr = String::from_utf8_lossy(&output.stderr);
1299 log::error!("docker buildx build failed: {stderr}");
1300 return Err(DevContainerError::CommandFailed(
1301 command.get_program().display().to_string(),
1302 ));
1303 }
1304
1305 // After a successful build, inspect the newly tagged image to get its metadata
1306 let Some(features_build_info) = &self.features_build_info else {
1307 log::error!("Features build info expected, but not created");
1308 return Err(DevContainerError::DevContainerParseFailed);
1309 };
1310 let image = self
1311 .docker_client
1312 .inspect(&features_build_info.image_tag)
1313 .await?;
1314
1315 Ok(image)
1316 }
1317
1318 #[cfg(target_os = "windows")]
1319 async fn update_remote_user_uid(
1320 &self,
1321 image: DockerInspect,
1322 _base_image: &str,
1323 ) -> Result<DockerInspect, DevContainerError> {
1324 Ok(image)
1325 }
1326 #[cfg(not(target_os = "windows"))]
1327 async fn update_remote_user_uid(
1328 &self,
1329 image: DockerInspect,
1330 base_image: &str,
1331 ) -> Result<DockerInspect, DevContainerError> {
1332 let dev_container = self.dev_container();
1333
1334 let Some(features_build_info) = &self.features_build_info else {
1335 return Ok(image);
1336 };
1337
1338 // updateRemoteUserUID defaults to true per the devcontainers spec
1339 if dev_container.update_remote_user_uid == Some(false) {
1340 return Ok(image);
1341 }
1342
1343 let remote_user = get_remote_user_from_config(&image, self)?;
1344 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1345 return Ok(image);
1346 }
1347
1348 let image_user = image
1349 .config
1350 .image_user
1351 .as_deref()
1352 .unwrap_or("root")
1353 .to_string();
1354
1355 let host_uid = Command::new("id")
1356 .arg("-u")
1357 .output()
1358 .await
1359 .map_err(|e| {
1360 log::error!("Failed to get host UID: {e}");
1361 DevContainerError::CommandFailed("id -u".to_string())
1362 })
1363 .and_then(|output| {
1364 String::from_utf8_lossy(&output.stdout)
1365 .trim()
1366 .parse::<u32>()
1367 .map_err(|e| {
1368 log::error!("Failed to parse host UID: {e}");
1369 DevContainerError::CommandFailed("id -u".to_string())
1370 })
1371 })?;
1372
1373 let host_gid = Command::new("id")
1374 .arg("-g")
1375 .output()
1376 .await
1377 .map_err(|e| {
1378 log::error!("Failed to get host GID: {e}");
1379 DevContainerError::CommandFailed("id -g".to_string())
1380 })
1381 .and_then(|output| {
1382 String::from_utf8_lossy(&output.stdout)
1383 .trim()
1384 .parse::<u32>()
1385 .map_err(|e| {
1386 log::error!("Failed to parse host GID: {e}");
1387 DevContainerError::CommandFailed("id -g".to_string())
1388 })
1389 })?;
1390
1391 let dockerfile_content = self.generate_update_uid_dockerfile();
1392
1393 let dockerfile_path = features_build_info
1394 .features_content_dir
1395 .join("updateUID.Dockerfile");
1396 self.fs
1397 .write(&dockerfile_path, dockerfile_content.as_bytes())
1398 .await
1399 .map_err(|e| {
1400 log::error!("Failed to write updateUID Dockerfile: {e}");
1401 DevContainerError::FilesystemError
1402 })?;
1403
1404 let updated_image_tag = features_build_info.image_tag.clone();
1405
1406 let mut command = Command::new(self.docker_client.docker_cli());
1407 command.args(["build"]);
1408 command.args(["-f", &dockerfile_path.display().to_string()]);
1409 command.args(["-t", &updated_image_tag]);
1410 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1411 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1412 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1413 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1414 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1415 command.arg(features_build_info.empty_context_dir.display().to_string());
1416
1417 let output = self
1418 .command_runner
1419 .run_command(&mut command)
1420 .await
1421 .map_err(|e| {
1422 log::error!("Error building UID update image: {e}");
1423 DevContainerError::CommandFailed(command.get_program().display().to_string())
1424 })?;
1425
1426 if !output.status.success() {
1427 let stderr = String::from_utf8_lossy(&output.stderr);
1428 log::error!("UID update build failed: {stderr}");
1429 return Err(DevContainerError::CommandFailed(
1430 command.get_program().display().to_string(),
1431 ));
1432 }
1433
1434 self.docker_client.inspect(&updated_image_tag).await
1435 }
1436
1437 #[cfg(not(target_os = "windows"))]
1438 fn generate_update_uid_dockerfile(&self) -> String {
1439 let mut dockerfile = r#"ARG BASE_IMAGE
1440FROM $BASE_IMAGE
1441
1442USER root
1443
1444ARG REMOTE_USER
1445ARG NEW_UID
1446ARG NEW_GID
1447SHELL ["/bin/sh", "-c"]
1448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1449 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1450 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1451 if [ -z "$OLD_UID" ]; then \
1452 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1453 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1454 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1455 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1456 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1457 else \
1458 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1459 FREE_GID=65532; \
1460 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1461 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1462 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1463 fi; \
1464 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1465 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1466 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1467 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1468 fi; \
1469 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1470 fi;
1471
1472ARG IMAGE_USER
1473USER $IMAGE_USER
1474
1475# Ensure that /etc/profile does not clobber the existing path
1476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1477"#.to_string();
1478 for feature in &self.features {
1479 let container_env_layer = feature.generate_dockerfile_env();
1480 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1481 }
1482
1483 if let Some(env) = &self.dev_container().container_env {
1484 for (key, value) in env {
1485 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1486 }
1487 }
1488 dockerfile
1489 }
1490
1491 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1492 let Some(features_build_info) = &self.features_build_info else {
1493 log::error!("Features build info not available for building feature content image");
1494 return Err(DevContainerError::DevContainerParseFailed);
1495 };
1496 let features_content_dir = &features_build_info.features_content_dir;
1497
1498 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1499 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1500
1501 self.fs
1502 .write(&dockerfile_path, dockerfile_content.as_bytes())
1503 .await
1504 .map_err(|e| {
1505 log::error!("Failed to write feature content Dockerfile: {e}");
1506 DevContainerError::FilesystemError
1507 })?;
1508
1509 let mut command = Command::new(self.docker_client.docker_cli());
1510 command.args([
1511 "build",
1512 "-t",
1513 "dev_container_feature_content_temp",
1514 "-f",
1515 &dockerfile_path.display().to_string(),
1516 &features_content_dir.display().to_string(),
1517 ]);
1518
1519 let output = self
1520 .command_runner
1521 .run_command(&mut command)
1522 .await
1523 .map_err(|e| {
1524 log::error!("Error building feature content image: {e}");
1525 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1526 })?;
1527
1528 if !output.status.success() {
1529 let stderr = String::from_utf8_lossy(&output.stderr);
1530 log::error!("Feature content image build failed: {stderr}");
1531 return Err(DevContainerError::CommandFailed(
1532 self.docker_client.docker_cli(),
1533 ));
1534 }
1535
1536 Ok(())
1537 }
1538
1539 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1540 let dev_container = match &self.config {
1541 ConfigStatus::Deserialized(_) => {
1542 log::error!(
1543 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1544 );
1545 return Err(DevContainerError::DevContainerParseFailed);
1546 }
1547 ConfigStatus::VariableParsed(dev_container) => dev_container,
1548 };
1549
1550 let Some(features_build_info) = &self.features_build_info else {
1551 log::error!(
1552 "Cannot create docker build command; features build info has not been constructed"
1553 );
1554 return Err(DevContainerError::DevContainerParseFailed);
1555 };
1556 let mut command = Command::new(self.docker_client.docker_cli());
1557
1558 command.args(["buildx", "build"]);
1559
1560 // --load is short for --output=docker, loading the built image into the local docker images
1561 command.arg("--load");
1562
1563 // BuildKit build context: provides the features content directory as a named context
1564 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1565 command.args([
1566 "--build-context",
1567 &format!(
1568 "dev_containers_feature_content_source={}",
1569 features_build_info.features_content_dir.display()
1570 ),
1571 ]);
1572
1573 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1574 if let Some(build_image) = &features_build_info.build_image {
1575 command.args([
1576 "--build-arg",
1577 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1578 ]);
1579 } else {
1580 command.args([
1581 "--build-arg",
1582 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1583 ]);
1584 }
1585
1586 command.args([
1587 "--build-arg",
1588 &format!(
1589 "_DEV_CONTAINERS_IMAGE_USER={}",
1590 self.root_image
1591 .as_ref()
1592 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1593 .unwrap_or(&"root".to_string())
1594 ),
1595 ]);
1596
1597 command.args([
1598 "--build-arg",
1599 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1600 ]);
1601
1602 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1603 for (key, value) in args {
1604 command.args(["--build-arg", &format!("{}={}", key, value)]);
1605 }
1606 }
1607
1608 command.args(["--target", "dev_containers_target_stage"]);
1609
1610 command.args([
1611 "-f",
1612 &features_build_info.dockerfile_path.display().to_string(),
1613 ]);
1614
1615 command.args(["-t", &features_build_info.image_tag]);
1616
1617 if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1618 command.arg(self.config_directory.display().to_string());
1619 } else {
1620 // Use an empty folder as the build context to avoid pulling in unneeded files.
1621 // The actual feature content is supplied via the BuildKit build context above.
1622 command.arg(features_build_info.empty_context_dir.display().to_string());
1623 }
1624
1625 Ok(command)
1626 }
1627
1628 async fn run_docker_compose(
1629 &self,
1630 resources: DockerComposeResources,
1631 ) -> Result<DockerInspect, DevContainerError> {
1632 let mut command = Command::new(self.docker_client.docker_cli());
1633 command.args(&["compose", "--project-name", &self.project_name()]);
1634 for docker_compose_file in resources.files {
1635 command.args(&["-f", &docker_compose_file.display().to_string()]);
1636 }
1637 command.args(&["up", "-d"]);
1638
1639 let output = self
1640 .command_runner
1641 .run_command(&mut command)
1642 .await
1643 .map_err(|e| {
1644 log::error!("Error running docker compose up: {e}");
1645 DevContainerError::CommandFailed(command.get_program().display().to_string())
1646 })?;
1647
1648 if !output.status.success() {
1649 let stderr = String::from_utf8_lossy(&output.stderr);
1650 log::error!("Non-success status from docker compose up: {}", stderr);
1651 return Err(DevContainerError::CommandFailed(
1652 command.get_program().display().to_string(),
1653 ));
1654 }
1655
1656 if let Some(docker_ps) = self.check_for_existing_container().await? {
1657 log::debug!("Found newly created dev container");
1658 return self.docker_client.inspect(&docker_ps.id).await;
1659 }
1660
1661 log::error!("Could not find existing container after docker compose up");
1662
1663 Err(DevContainerError::DevContainerParseFailed)
1664 }
1665
1666 async fn run_docker_image(
1667 &self,
1668 build_resources: DockerBuildResources,
1669 ) -> Result<DockerInspect, DevContainerError> {
1670 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1671
1672 let output = self
1673 .command_runner
1674 .run_command(&mut docker_run_command)
1675 .await
1676 .map_err(|e| {
1677 log::error!("Error running docker run: {e}");
1678 DevContainerError::CommandFailed(
1679 docker_run_command.get_program().display().to_string(),
1680 )
1681 })?;
1682
1683 if !output.status.success() {
1684 let std_err = String::from_utf8_lossy(&output.stderr);
1685 log::error!("Non-success status from docker run. StdErr: {std_err}");
1686 return Err(DevContainerError::CommandFailed(
1687 docker_run_command.get_program().display().to_string(),
1688 ));
1689 }
1690
1691 log::debug!("Checking for container that was started");
1692 let Some(docker_ps) = self.check_for_existing_container().await? else {
1693 log::error!("Could not locate container just created");
1694 return Err(DevContainerError::DevContainerParseFailed);
1695 };
1696 self.docker_client.inspect(&docker_ps.id).await
1697 }
1698
1699 fn local_workspace_folder(&self) -> String {
1700 self.local_project_directory.display().to_string()
1701 }
1702 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1703 self.local_project_directory
1704 .file_name()
1705 .map(|f| f.display().to_string())
1706 .ok_or(DevContainerError::DevContainerParseFailed)
1707 }
1708
1709 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1710 self.dev_container()
1711 .workspace_folder
1712 .as_ref()
1713 .map(|folder| PathBuf::from(folder))
1714 .or(Some(
1715 // We explicitly use "/" here, instead of PathBuf::join
1716 // because we want remote targets to use unix-style filepaths,
1717 // even on a Windows host
1718 PathBuf::from(format!(
1719 "{}/{}",
1720 DEFAULT_REMOTE_PROJECT_DIR,
1721 self.local_workspace_base_name()?
1722 )),
1723 ))
1724 .ok_or(DevContainerError::DevContainerParseFailed)
1725 }
1726 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1727 self.remote_workspace_folder().and_then(|f| {
1728 f.file_name()
1729 .map(|file_name| file_name.display().to_string())
1730 .ok_or(DevContainerError::DevContainerParseFailed)
1731 })
1732 }
1733
1734 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1735 if let Some(mount) = &self.dev_container().workspace_mount {
1736 return Ok(mount.clone());
1737 }
1738 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1739 return Err(DevContainerError::DevContainerParseFailed);
1740 };
1741
1742 Ok(MountDefinition {
1743 source: Some(self.local_workspace_folder()),
1744 // We explicitly use "/" here, instead of PathBuf::join
1745 // because we want the remote target to use unix-style filepaths,
1746 // even on a Windows host
1747 target: format!(
1748 "{}/{}",
1749 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1750 project_directory_name.display()
1751 ),
1752 mount_type: None,
1753 })
1754 }
1755
1756 fn create_docker_run_command(
1757 &self,
1758 build_resources: DockerBuildResources,
1759 ) -> Result<Command, DevContainerError> {
1760 let remote_workspace_mount = self.remote_workspace_mount()?;
1761
1762 let docker_cli = self.docker_client.docker_cli();
1763 let mut command = Command::new(&docker_cli);
1764
1765 command.arg("run");
1766
1767 if build_resources.privileged {
1768 command.arg("--privileged");
1769 }
1770
1771 let run_args = match &self.dev_container().run_args {
1772 Some(run_args) => run_args,
1773 None => &Vec::new(),
1774 };
1775
1776 for arg in run_args {
1777 command.arg(arg);
1778 }
1779
1780 let run_if_missing = {
1781 |arg_name: &str, arg: &str, command: &mut Command| {
1782 if !run_args
1783 .iter()
1784 .any(|arg| arg.strip_prefix(arg_name).is_some())
1785 {
1786 command.arg(arg);
1787 }
1788 }
1789 };
1790
1791 if &docker_cli == "podman" {
1792 run_if_missing(
1793 "--security-opt",
1794 "--security-opt=label=disable",
1795 &mut command,
1796 );
1797 run_if_missing("--userns", "--userns=keep-id", &mut command);
1798 }
1799
1800 run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1801 command.arg("-d");
1802 command.arg("--mount");
1803 command.arg(remote_workspace_mount.to_string());
1804
1805 for mount in &build_resources.additional_mounts {
1806 command.arg("--mount");
1807 command.arg(mount.to_string());
1808 }
1809
1810 for (key, val) in self.identifying_labels() {
1811 command.arg("-l");
1812 command.arg(format!("{}={}", key, val));
1813 }
1814
1815 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1816 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1817 log::error!("Problem serializing image metadata: {e}");
1818 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1819 })?;
1820 command.arg("-l");
1821 command.arg(format!(
1822 "{}={}",
1823 "devcontainer.metadata", serialized_metadata
1824 ));
1825 }
1826
1827 if let Some(forward_ports) = &self.dev_container().forward_ports {
1828 for port in forward_ports {
1829 if let ForwardPort::Number(port_number) = port {
1830 command.arg("-p");
1831 command.arg(format!("{port_number}:{port_number}"));
1832 }
1833 }
1834 }
1835 for app_port in &self.dev_container().app_port {
1836 command.arg("-p");
1837 command.arg(app_port);
1838 }
1839
1840 command.arg("--entrypoint");
1841 command.arg("/bin/sh");
1842 command.arg(&build_resources.image.id);
1843 command.arg("-c");
1844
1845 command.arg(build_resources.entrypoint_script);
1846 command.arg("-");
1847
1848 Ok(command)
1849 }
1850
1851 fn extension_ids(&self) -> Vec<String> {
1852 self.dev_container()
1853 .customizations
1854 .as_ref()
1855 .map(|c| c.zed.extensions.clone())
1856 .unwrap_or_default()
1857 }
1858
1859 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1860 self.dev_container().validate_devcontainer_contents()?;
1861
1862 self.run_initialize_commands().await?;
1863
1864 self.download_feature_and_dockerfile_resources().await?;
1865
1866 let build_resources = self.build_resources().await?;
1867
1868 let devcontainer_up = self.run_dev_container(build_resources).await?;
1869
1870 self.run_remote_scripts(&devcontainer_up, true).await?;
1871
1872 Ok(devcontainer_up)
1873 }
1874
1875 async fn run_remote_scripts(
1876 &self,
1877 devcontainer_up: &DevContainerUp,
1878 new_container: bool,
1879 ) -> Result<(), DevContainerError> {
1880 let ConfigStatus::VariableParsed(config) = &self.config else {
1881 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1882 return Err(DevContainerError::DevContainerScriptsFailed);
1883 };
1884 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1885
1886 if new_container {
1887 if let Some(on_create_command) = &config.on_create_command {
1888 for (command_name, command) in on_create_command.script_commands() {
1889 log::debug!("Running on create command {command_name}");
1890 self.docker_client
1891 .run_docker_exec(
1892 &devcontainer_up.container_id,
1893 &remote_folder,
1894 &devcontainer_up.remote_user,
1895 &devcontainer_up.remote_env,
1896 command,
1897 )
1898 .await?;
1899 }
1900 }
1901 if let Some(update_content_command) = &config.update_content_command {
1902 for (command_name, command) in update_content_command.script_commands() {
1903 log::debug!("Running update content command {command_name}");
1904 self.docker_client
1905 .run_docker_exec(
1906 &devcontainer_up.container_id,
1907 &remote_folder,
1908 &devcontainer_up.remote_user,
1909 &devcontainer_up.remote_env,
1910 command,
1911 )
1912 .await?;
1913 }
1914 }
1915
1916 if let Some(post_create_command) = &config.post_create_command {
1917 for (command_name, command) in post_create_command.script_commands() {
1918 log::debug!("Running post create command {command_name}");
1919 self.docker_client
1920 .run_docker_exec(
1921 &devcontainer_up.container_id,
1922 &remote_folder,
1923 &devcontainer_up.remote_user,
1924 &devcontainer_up.remote_env,
1925 command,
1926 )
1927 .await?;
1928 }
1929 }
1930 if let Some(post_start_command) = &config.post_start_command {
1931 for (command_name, command) in post_start_command.script_commands() {
1932 log::debug!("Running post start command {command_name}");
1933 self.docker_client
1934 .run_docker_exec(
1935 &devcontainer_up.container_id,
1936 &remote_folder,
1937 &devcontainer_up.remote_user,
1938 &devcontainer_up.remote_env,
1939 command,
1940 )
1941 .await?;
1942 }
1943 }
1944 }
1945 if let Some(post_attach_command) = &config.post_attach_command {
1946 for (command_name, command) in post_attach_command.script_commands() {
1947 log::debug!("Running post attach command {command_name}");
1948 self.docker_client
1949 .run_docker_exec(
1950 &devcontainer_up.container_id,
1951 &remote_folder,
1952 &devcontainer_up.remote_user,
1953 &devcontainer_up.remote_env,
1954 command,
1955 )
1956 .await?;
1957 }
1958 }
1959
1960 Ok(())
1961 }
1962
1963 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1964 let ConfigStatus::VariableParsed(config) = &self.config else {
1965 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1966 return Err(DevContainerError::DevContainerParseFailed);
1967 };
1968
1969 if let Some(initialize_command) = &config.initialize_command {
1970 log::debug!("Running initialize command");
1971 initialize_command
1972 .run(&self.command_runner, &self.local_project_directory)
1973 .await
1974 } else {
1975 log::warn!("No initialize command found");
1976 Ok(())
1977 }
1978 }
1979
1980 async fn check_for_existing_devcontainer(
1981 &self,
1982 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1983 if let Some(docker_ps) = self.check_for_existing_container().await? {
1984 log::debug!("Dev container already found. Proceeding with it");
1985
1986 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1987
1988 if !docker_inspect.is_running() {
1989 log::debug!("Container not running. Will attempt to start, and then proceed");
1990 self.docker_client.start_container(&docker_ps.id).await?;
1991 }
1992
1993 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1994
1995 let remote_folder = self.remote_workspace_folder()?;
1996
1997 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1998
1999 let dev_container_up = DevContainerUp {
2000 container_id: docker_ps.id,
2001 remote_user: remote_user,
2002 remote_workspace_folder: remote_folder.display().to_string(),
2003 extension_ids: self.extension_ids(),
2004 remote_env,
2005 };
2006
2007 self.run_remote_scripts(&dev_container_up, false).await?;
2008
2009 Ok(Some(dev_container_up))
2010 } else {
2011 log::debug!("Existing container not found.");
2012
2013 Ok(None)
2014 }
2015 }
2016
2017 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2018 self.docker_client
2019 .find_process_by_filters(
2020 self.identifying_labels()
2021 .iter()
2022 .map(|(k, v)| format!("label={k}={v}"))
2023 .collect(),
2024 )
2025 .await
2026 }
2027
2028 fn project_name(&self) -> String {
2029 if let Some(name) = &self.dev_container().name {
2030 safe_id_lower(name)
2031 } else {
2032 let alternate_name = &self
2033 .local_workspace_base_name()
2034 .unwrap_or(self.local_workspace_folder());
2035 safe_id_lower(alternate_name)
2036 }
2037 }
2038
2039 async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2040 let Some(dockerfile_path) = self.dockerfile_location().await else {
2041 log::error!("Tried to expand dockerfile for an image-type config");
2042 return Err(DevContainerError::DevContainerParseFailed);
2043 };
2044
2045 let devcontainer_args = self
2046 .dev_container()
2047 .build
2048 .as_ref()
2049 .and_then(|b| b.args.clone())
2050 .unwrap_or_default();
2051 let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2052 log::error!("Failed to load Dockerfile: {e}");
2053 DevContainerError::FilesystemError
2054 })?;
2055 let mut parsed_lines: Vec<String> = Vec::new();
2056 let mut inline_args: Vec<(String, String)> = Vec::new();
2057 let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2058
2059 for line in contents.lines() {
2060 let mut parsed_line = line.to_string();
2061 // Replace from devcontainer args first, since they take precedence
2062 for (key, value) in &devcontainer_args {
2063 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2064 }
2065 for (key, value) in &inline_args {
2066 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2067 }
2068 if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2069 let trimmed = arg_directives.trim();
2070 let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2071 for (i, captures) in key_matches.iter().enumerate() {
2072 let key = captures[1].to_string();
2073 // Insert the devcontainer overrides here if needed
2074 let value_start = captures.get(0).expect("full match").end();
2075 let value_end = if i + 1 < key_matches.len() {
2076 key_matches[i + 1].get(0).expect("full match").start()
2077 } else {
2078 trimmed.len()
2079 };
2080 let raw_value = trimmed[value_start..value_end].trim();
2081 let value = if raw_value.starts_with('"')
2082 && raw_value.ends_with('"')
2083 && raw_value.len() > 1
2084 {
2085 &raw_value[1..raw_value.len() - 1]
2086 } else {
2087 raw_value
2088 };
2089 inline_args.push((key, value.to_string()));
2090 }
2091 }
2092 parsed_lines.push(parsed_line);
2093 }
2094
2095 Ok(parsed_lines.join("\n"))
2096 }
2097}
2098
2099/// Holds all the information needed to construct a `docker buildx build` command
2100/// that extends a base image with dev container features.
2101///
2102/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2103/// (cli/src/spec-node/containerFeatures.ts).
2104#[derive(Debug, Eq, PartialEq)]
2105pub(crate) struct FeaturesBuildInfo {
2106 /// Path to the generated Dockerfile.extended
2107 pub dockerfile_path: PathBuf,
2108 /// Path to the features content directory (used as a BuildKit build context)
2109 pub features_content_dir: PathBuf,
2110 /// Path to an empty directory used as the Docker build context
2111 pub empty_context_dir: PathBuf,
2112 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2113 pub build_image: Option<String>,
2114 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2115 pub image_tag: String,
2116}
2117
2118pub(crate) async fn read_devcontainer_configuration(
2119 config: DevContainerConfig,
2120 context: &DevContainerContext,
2121 environment: HashMap<String, String>,
2122) -> Result<DevContainer, DevContainerError> {
2123 let docker = if context.use_podman {
2124 Docker::new("podman").await
2125 } else {
2126 Docker::new("docker").await
2127 };
2128 let mut dev_container = DevContainerManifest::new(
2129 context,
2130 environment,
2131 Arc::new(docker),
2132 Arc::new(DefaultCommandRunner::new()),
2133 config,
2134 &context.project_directory.as_ref(),
2135 )
2136 .await?;
2137 dev_container.parse_nonremote_vars()?;
2138 Ok(dev_container.dev_container().clone())
2139}
2140
2141pub(crate) async fn spawn_dev_container(
2142 context: &DevContainerContext,
2143 environment: HashMap<String, String>,
2144 config: DevContainerConfig,
2145 local_project_path: &Path,
2146) -> Result<DevContainerUp, DevContainerError> {
2147 let docker = if context.use_podman {
2148 Docker::new("podman").await
2149 } else {
2150 Docker::new("docker").await
2151 };
2152 let mut devcontainer_manifest = DevContainerManifest::new(
2153 context,
2154 environment,
2155 Arc::new(docker),
2156 Arc::new(DefaultCommandRunner::new()),
2157 config,
2158 local_project_path,
2159 )
2160 .await?;
2161
2162 devcontainer_manifest.parse_nonremote_vars()?;
2163
2164 log::debug!("Checking for existing container");
2165 if let Some(devcontainer) = devcontainer_manifest
2166 .check_for_existing_devcontainer()
2167 .await?
2168 {
2169 Ok(devcontainer)
2170 } else {
2171 log::debug!("Existing container not found. Building");
2172
2173 devcontainer_manifest.build_and_run().await
2174 }
2175}
2176
2177#[derive(Debug)]
2178struct DockerBuildResources {
2179 image: DockerInspect,
2180 additional_mounts: Vec<MountDefinition>,
2181 privileged: bool,
2182 entrypoint_script: String,
2183}
2184
2185#[derive(Debug)]
2186enum DevContainerBuildResources {
2187 DockerCompose(DockerComposeResources),
2188 Docker(DockerBuildResources),
2189}
2190
2191fn find_primary_service(
2192 docker_compose: &DockerComposeResources,
2193 devcontainer: &DevContainerManifest,
2194) -> Result<(String, DockerComposeService), DevContainerError> {
2195 let Some(service_name) = &devcontainer.dev_container().service else {
2196 return Err(DevContainerError::DevContainerParseFailed);
2197 };
2198
2199 match docker_compose.config.services.get(service_name) {
2200 Some(service) => Ok((service_name.clone(), service.clone())),
2201 None => Err(DevContainerError::DevContainerParseFailed),
2202 }
2203}
2204
2205/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2206/// `dockerfile` is relative to the build `context`, and `context` is relative to
2207/// the compose file's directory.
2208fn resolve_compose_dockerfile(
2209 compose_file: &Path,
2210 context: Option<&str>,
2211 dockerfile: &str,
2212) -> Option<PathBuf> {
2213 let dockerfile = PathBuf::from(dockerfile);
2214 if dockerfile.is_absolute() {
2215 return Some(dockerfile);
2216 }
2217 let compose_dir = compose_file.parent()?;
2218 let context_dir = match context {
2219 Some(ctx) => {
2220 let ctx = PathBuf::from(ctx);
2221 if ctx.is_absolute() {
2222 ctx
2223 } else {
2224 normalize_path(&compose_dir.join(ctx))
2225 }
2226 }
2227 None => compose_dir.to_path_buf(),
2228 };
2229 Some(context_dir.join(dockerfile))
2230}
2231
2232/// Destination folder inside the container where feature content is staged during build.
2233/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2234const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2235
2236/// Escapes regex special characters in a string.
2237fn escape_regex_chars(input: &str) -> String {
2238 let mut result = String::with_capacity(input.len() * 2);
2239 for c in input.chars() {
2240 if ".*+?^${}()|[]\\".contains(c) {
2241 result.push('\\');
2242 }
2243 result.push(c);
2244 }
2245 result
2246}
2247
2248/// Extracts the short feature ID from a full feature reference string.
2249///
2250/// Examples:
2251/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2252/// - `ghcr.io/user/repo/go` → `go`
2253/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2254/// - `./myFeature` → `myFeature`
2255fn extract_feature_id(feature_ref: &str) -> &str {
2256 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2257 &feature_ref[..at_idx]
2258 } else {
2259 let last_slash = feature_ref.rfind('/');
2260 let last_colon = feature_ref.rfind(':');
2261 match (last_slash, last_colon) {
2262 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2263 _ => feature_ref,
2264 }
2265 };
2266 match without_version.rfind('/') {
2267 Some(idx) => &without_version[idx + 1..],
2268 None => without_version,
2269 }
2270}
2271
2272/// Generates a shell command that looks up a user's passwd entry.
2273///
2274/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2275/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2276fn get_ent_passwd_shell_command(user: &str) -> String {
2277 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2278 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2279 format!(
2280 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2281 shell = escaped_for_shell,
2282 re = escaped_for_regex,
2283 )
2284}
2285
2286/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2287///
2288/// Features listed in the override come first (in the specified order), followed
2289/// by any remaining features sorted lexicographically by their full reference ID.
2290fn resolve_feature_order<'a>(
2291 features: &'a HashMap<String, FeatureOptions>,
2292 override_order: &Option<Vec<String>>,
2293) -> Vec<(&'a String, &'a FeatureOptions)> {
2294 if let Some(order) = override_order {
2295 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2296 for ordered_id in order {
2297 if let Some((key, options)) = features.get_key_value(ordered_id) {
2298 ordered.push((key, options));
2299 }
2300 }
2301 let mut remaining: Vec<_> = features
2302 .iter()
2303 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2304 .collect();
2305 remaining.sort_by_key(|(id, _)| id.as_str());
2306 ordered.extend(remaining);
2307 ordered
2308 } else {
2309 let mut entries: Vec<_> = features.iter().collect();
2310 entries.sort_by_key(|(id, _)| id.as_str());
2311 entries
2312 }
2313}
2314
2315/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2316///
2317/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2318/// `containerFeaturesConfiguration.ts`.
2319fn generate_install_wrapper(
2320 feature_ref: &str,
2321 feature_id: &str,
2322 env_variables: &str,
2323) -> Result<String, DevContainerError> {
2324 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2325 log::error!("Error escaping feature ref {feature_ref}: {e}");
2326 DevContainerError::DevContainerParseFailed
2327 })?;
2328 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2329 log::error!("Error escaping feature {feature_id}: {e}");
2330 DevContainerError::DevContainerParseFailed
2331 })?;
2332 let options_indented: String = env_variables
2333 .lines()
2334 .filter(|l| !l.is_empty())
2335 .map(|l| format!(" {}", l))
2336 .collect::<Vec<_>>()
2337 .join("\n");
2338 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2339 log::error!("Error escaping options {options_indented}: {e}");
2340 DevContainerError::DevContainerParseFailed
2341 })?;
2342
2343 let script = format!(
2344 r#"#!/bin/sh
2345set -e
2346
2347on_exit () {{
2348 [ $? -eq 0 ] && exit
2349 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2350}}
2351
2352trap on_exit EXIT
2353
2354echo ===========================================================================
2355echo 'Feature : {escaped_name}'
2356echo 'Id : {escaped_id}'
2357echo 'Options :'
2358echo {escaped_options}
2359echo ===========================================================================
2360
2361set -a
2362. ../devcontainer-features.builtin.env
2363. ./devcontainer-features.env
2364set +a
2365
2366chmod +x ./install.sh
2367./install.sh
2368"#
2369 );
2370
2371 Ok(script)
2372}
2373
2374fn dockerfile_inject_alias(
2375 dockerfile_content: &str,
2376 alias: &str,
2377 build_target: Option<String>,
2378) -> String {
2379 match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2380 Some(target) => format!(
2381 r#"{dockerfile_content}
2382FROM {target} AS {alias}"#
2383 ),
2384 None => dockerfile_content.to_string(),
2385 }
2386}
2387
2388fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2389 dockerfile_contents
2390 .lines()
2391 .filter(|line| line.starts_with("FROM"))
2392 .rfind(|from_line| match &target {
2393 Some(target) => {
2394 let parts = from_line.split(' ').collect::<Vec<&str>>();
2395 if parts.len() >= 3
2396 && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2397 {
2398 parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2399 } else {
2400 false
2401 }
2402 }
2403 None => true,
2404 })
2405 .and_then(|from_line| {
2406 from_line
2407 .split(' ')
2408 .collect::<Vec<&str>>()
2409 .get(1)
2410 .map(|s| s.to_string())
2411 })
2412}
2413
2414fn get_remote_user_from_config(
2415 docker_config: &DockerInspect,
2416 devcontainer: &DevContainerManifest,
2417) -> Result<String, DevContainerError> {
2418 if let DevContainer {
2419 remote_user: Some(user),
2420 ..
2421 } = &devcontainer.dev_container()
2422 {
2423 return Ok(user.clone());
2424 }
2425 if let Some(metadata) = &docker_config.config.labels.metadata {
2426 for metadatum in metadata {
2427 if let Some(remote_user) = metadatum.get("remoteUser") {
2428 if let Some(remote_user_str) = remote_user.as_str() {
2429 return Ok(remote_user_str.to_string());
2430 }
2431 }
2432 }
2433 }
2434 if let Some(image_user) = &docker_config.config.image_user {
2435 if !image_user.is_empty() {
2436 return Ok(image_user.to_string());
2437 }
2438 }
2439 Ok("root".to_string())
2440}
2441
2442// This should come from spec - see the docs
2443fn get_container_user_from_config(
2444 docker_config: &DockerInspect,
2445 devcontainer: &DevContainerManifest,
2446) -> Result<String, DevContainerError> {
2447 if let Some(user) = &devcontainer.dev_container().container_user {
2448 return Ok(user.to_string());
2449 }
2450 if let Some(metadata) = &docker_config.config.labels.metadata {
2451 for metadatum in metadata {
2452 if let Some(container_user) = metadatum.get("containerUser") {
2453 if let Some(container_user_str) = container_user.as_str() {
2454 return Ok(container_user_str.to_string());
2455 }
2456 }
2457 }
2458 }
2459 if let Some(image_user) = &docker_config.config.image_user {
2460 return Ok(image_user.to_string());
2461 }
2462
2463 Ok("root".to_string())
2464}
2465
2466#[cfg(test)]
2467mod test {
2468 use std::{
2469 collections::HashMap,
2470 ffi::OsStr,
2471 path::{Path, PathBuf},
2472 process::{ExitStatus, Output},
2473 sync::{Arc, Mutex},
2474 };
2475
2476 use async_trait::async_trait;
2477 use fs::{FakeFs, Fs};
2478 use gpui::{AppContext, TestAppContext};
2479 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2480 use project::{
2481 ProjectEnvironment,
2482 worktree_store::{WorktreeIdCounter, WorktreeStore},
2483 };
2484 use serde_json_lenient::Value;
2485 use util::{command::Command, paths::SanitizedPath};
2486
2487 #[cfg(not(target_os = "windows"))]
2488 use crate::docker::DockerComposeServicePort;
2489 use crate::{
2490 DevContainerConfig, DevContainerContext,
2491 command_json::CommandRunner,
2492 devcontainer_api::DevContainerError,
2493 devcontainer_json::MountDefinition,
2494 devcontainer_manifest::{
2495 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2496 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2497 image_from_dockerfile, resolve_compose_dockerfile,
2498 },
2499 docker::{
2500 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2501 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2502 DockerPs,
2503 },
2504 oci::TokenResponse,
2505 };
2506 #[cfg(not(target_os = "windows"))]
2507 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2508 #[cfg(target_os = "windows")]
2509 const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2510
2511 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2512 let buffer = futures::io::Cursor::new(Vec::new());
2513 let mut builder = async_tar::Builder::new(buffer);
2514 for (file_name, content) in content {
2515 if content.is_empty() {
2516 let mut header = async_tar::Header::new_gnu();
2517 header.set_size(0);
2518 header.set_mode(0o755);
2519 header.set_entry_type(async_tar::EntryType::Directory);
2520 header.set_cksum();
2521 builder
2522 .append_data(&mut header, file_name, &[] as &[u8])
2523 .await
2524 .unwrap();
2525 } else {
2526 let data = content.as_bytes();
2527 let mut header = async_tar::Header::new_gnu();
2528 header.set_size(data.len() as u64);
2529 header.set_mode(0o755);
2530 header.set_entry_type(async_tar::EntryType::Regular);
2531 header.set_cksum();
2532 builder
2533 .append_data(&mut header, file_name, data)
2534 .await
2535 .unwrap();
2536 }
2537 }
2538 let buffer = builder.into_inner().await.unwrap();
2539 buffer.into_inner()
2540 }
2541
2542 fn test_project_filename() -> String {
2543 PathBuf::from(TEST_PROJECT_PATH)
2544 .file_name()
2545 .expect("is valid")
2546 .display()
2547 .to_string()
2548 }
2549
2550 async fn init_devcontainer_config(
2551 fs: &Arc<FakeFs>,
2552 devcontainer_contents: &str,
2553 ) -> DevContainerConfig {
2554 fs.insert_tree(
2555 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2556 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2557 )
2558 .await;
2559
2560 DevContainerConfig::default_config()
2561 }
2562
2563 struct TestDependencies {
2564 fs: Arc<FakeFs>,
2565 _http_client: Arc<dyn HttpClient>,
2566 docker: Arc<FakeDocker>,
2567 command_runner: Arc<TestCommandRunner>,
2568 }
2569
2570 async fn init_default_devcontainer_manifest(
2571 cx: &mut TestAppContext,
2572 devcontainer_contents: &str,
2573 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2574 let fs = FakeFs::new(cx.executor());
2575 let http_client = fake_http_client();
2576 let command_runner = Arc::new(TestCommandRunner::new());
2577 let docker = Arc::new(FakeDocker::new());
2578 let environment = HashMap::new();
2579
2580 init_devcontainer_manifest(
2581 cx,
2582 fs,
2583 http_client,
2584 docker,
2585 command_runner,
2586 environment,
2587 devcontainer_contents,
2588 )
2589 .await
2590 }
2591
2592 async fn init_devcontainer_manifest(
2593 cx: &mut TestAppContext,
2594 fs: Arc<FakeFs>,
2595 http_client: Arc<dyn HttpClient>,
2596 docker_client: Arc<FakeDocker>,
2597 command_runner: Arc<TestCommandRunner>,
2598 environment: HashMap<String, String>,
2599 devcontainer_contents: &str,
2600 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2601 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2602 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2603 let worktree_store =
2604 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2605 let project_environment =
2606 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2607
2608 let context = DevContainerContext {
2609 project_directory: SanitizedPath::cast_arc(project_path),
2610 use_podman: false,
2611 fs: fs.clone(),
2612 http_client: http_client.clone(),
2613 environment: project_environment.downgrade(),
2614 };
2615
2616 let test_dependencies = TestDependencies {
2617 fs: fs.clone(),
2618 _http_client: http_client.clone(),
2619 docker: docker_client.clone(),
2620 command_runner: command_runner.clone(),
2621 };
2622 let manifest = DevContainerManifest::new(
2623 &context,
2624 environment,
2625 docker_client,
2626 command_runner,
2627 local_config,
2628 &PathBuf::from(TEST_PROJECT_PATH),
2629 )
2630 .await?;
2631
2632 Ok((test_dependencies, manifest))
2633 }
2634
2635 #[gpui::test]
2636 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2637 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2638 cx,
2639 r#"
2640// These are some external comments. serde_lenient should handle them
2641{
2642 // These are some internal comments
2643 "image": "image",
2644 "remoteUser": "root",
2645}
2646 "#,
2647 )
2648 .await
2649 .unwrap();
2650
2651 let mut metadata = HashMap::new();
2652 metadata.insert(
2653 "remoteUser".to_string(),
2654 serde_json_lenient::Value::String("vsCode".to_string()),
2655 );
2656 let given_docker_config = DockerInspect {
2657 id: "docker_id".to_string(),
2658 config: DockerInspectConfig {
2659 labels: DockerConfigLabels {
2660 metadata: Some(vec![metadata]),
2661 },
2662 image_user: None,
2663 env: Vec::new(),
2664 },
2665 mounts: None,
2666 state: None,
2667 };
2668
2669 let remote_user =
2670 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2671
2672 assert_eq!(remote_user, "root".to_string())
2673 }
2674
2675 #[gpui::test]
2676 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2677 let (_, devcontainer_manifest) =
2678 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2679 let mut metadata = HashMap::new();
2680 metadata.insert(
2681 "remoteUser".to_string(),
2682 serde_json_lenient::Value::String("vsCode".to_string()),
2683 );
2684 let given_docker_config = DockerInspect {
2685 id: "docker_id".to_string(),
2686 config: DockerInspectConfig {
2687 labels: DockerConfigLabels {
2688 metadata: Some(vec![metadata]),
2689 },
2690 image_user: None,
2691 env: Vec::new(),
2692 },
2693 mounts: None,
2694 state: None,
2695 };
2696
2697 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2698
2699 assert!(remote_user.is_ok());
2700 let remote_user = remote_user.expect("ok");
2701 assert_eq!(&remote_user, "vsCode")
2702 }
2703
2704 #[test]
2705 fn should_extract_feature_id_from_references() {
2706 assert_eq!(
2707 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2708 "aws-cli"
2709 );
2710 assert_eq!(
2711 extract_feature_id("ghcr.io/devcontainers/features/go"),
2712 "go"
2713 );
2714 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2715 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2716 assert_eq!(
2717 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2718 "rust"
2719 );
2720 }
2721
2722 #[gpui::test]
2723 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2724 let mut metadata = HashMap::new();
2725 metadata.insert(
2726 "remoteUser".to_string(),
2727 serde_json_lenient::Value::String("vsCode".to_string()),
2728 );
2729
2730 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2731 cx,
2732 r#"{
2733 "name": "TODO"
2734 }"#,
2735 )
2736 .await
2737 .unwrap();
2738 let build_resources = DockerBuildResources {
2739 image: DockerInspect {
2740 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2741 config: DockerInspectConfig {
2742 labels: DockerConfigLabels { metadata: None },
2743 image_user: None,
2744 env: Vec::new(),
2745 },
2746 mounts: None,
2747 state: None,
2748 },
2749 additional_mounts: vec![],
2750 privileged: false,
2751 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2752 };
2753 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2754
2755 assert!(docker_run_command.is_ok());
2756 let docker_run_command = docker_run_command.expect("ok");
2757
2758 assert_eq!(docker_run_command.get_program(), "docker");
2759 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2760 .join(".devcontainer")
2761 .join("devcontainer.json");
2762 let expected_config_file_label = expected_config_file_label.display();
2763 assert_eq!(
2764 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2765 vec![
2766 OsStr::new("run"),
2767 OsStr::new("--sig-proxy=false"),
2768 OsStr::new("-d"),
2769 OsStr::new("--mount"),
2770 OsStr::new(&format!(
2771 "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
2772 )),
2773 OsStr::new("-l"),
2774 OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
2775 OsStr::new("-l"),
2776 OsStr::new(&format!(
2777 "devcontainer.config_file={expected_config_file_label}"
2778 )),
2779 OsStr::new("--entrypoint"),
2780 OsStr::new("/bin/sh"),
2781 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2782 OsStr::new("-c"),
2783 OsStr::new(
2784 "
2785 echo Container started
2786 trap \"exit 0\" 15
2787 exec \"$@\"
2788 while sleep 1 & wait $!; do :; done
2789 "
2790 .trim()
2791 ),
2792 OsStr::new("-"),
2793 ]
2794 )
2795 }
2796
2797 #[gpui::test]
2798 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2799 // State where service not defined in dev container
2800 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2801 let given_docker_compose_config = DockerComposeResources {
2802 config: DockerComposeConfig {
2803 name: Some("devcontainers".to_string()),
2804 services: HashMap::new(),
2805 ..Default::default()
2806 },
2807 ..Default::default()
2808 };
2809
2810 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2811
2812 assert!(bad_result.is_err());
2813
2814 // State where service defined in devcontainer, not found in DockerCompose config
2815 let (_, given_dev_container) =
2816 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2817 .await
2818 .unwrap();
2819 let given_docker_compose_config = DockerComposeResources {
2820 config: DockerComposeConfig {
2821 name: Some("devcontainers".to_string()),
2822 services: HashMap::new(),
2823 ..Default::default()
2824 },
2825 ..Default::default()
2826 };
2827
2828 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2829
2830 assert!(bad_result.is_err());
2831 // State where service defined in devcontainer and in DockerCompose config
2832
2833 let (_, given_dev_container) =
2834 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2835 .await
2836 .unwrap();
2837 let given_docker_compose_config = DockerComposeResources {
2838 config: DockerComposeConfig {
2839 name: Some("devcontainers".to_string()),
2840 services: HashMap::from([(
2841 "found_service".to_string(),
2842 DockerComposeService {
2843 ..Default::default()
2844 },
2845 )]),
2846 ..Default::default()
2847 },
2848 ..Default::default()
2849 };
2850
2851 let (service_name, _) =
2852 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2853
2854 assert_eq!(service_name, "found_service".to_string());
2855 }
2856
2857 #[gpui::test]
2858 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2859 let fs = FakeFs::new(cx.executor());
2860 let given_devcontainer_contents = r#"
2861// These are some external comments. serde_lenient should handle them
2862{
2863 // These are some internal comments
2864 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2865 "name": "myDevContainer-${devcontainerId}",
2866 "remoteUser": "root",
2867 "remoteEnv": {
2868 "DEVCONTAINER_ID": "${devcontainerId}",
2869 "MYVAR2": "myvarothervalue",
2870 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2871 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2872 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2873 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2874 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2875 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2876
2877 }
2878}
2879 "#;
2880 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2881 cx,
2882 fs,
2883 fake_http_client(),
2884 Arc::new(FakeDocker::new()),
2885 Arc::new(TestCommandRunner::new()),
2886 HashMap::from([
2887 ("local_env_1".to_string(), "local_env_value1".to_string()),
2888 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2889 ]),
2890 given_devcontainer_contents,
2891 )
2892 .await
2893 .unwrap();
2894
2895 devcontainer_manifest.parse_nonremote_vars().unwrap();
2896
2897 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2898 &devcontainer_manifest.config
2899 else {
2900 panic!("Config not parsed");
2901 };
2902
2903 // ${devcontainerId}
2904 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2905 assert_eq!(
2906 variable_replaced_devcontainer.name,
2907 Some(format!("myDevContainer-{devcontainer_id}"))
2908 );
2909 assert_eq!(
2910 variable_replaced_devcontainer
2911 .remote_env
2912 .as_ref()
2913 .and_then(|env| env.get("DEVCONTAINER_ID")),
2914 Some(&devcontainer_id)
2915 );
2916
2917 // ${containerWorkspaceFolderBasename}
2918 assert_eq!(
2919 variable_replaced_devcontainer
2920 .remote_env
2921 .as_ref()
2922 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2923 Some(&test_project_filename())
2924 );
2925
2926 // ${localWorkspaceFolderBasename}
2927 assert_eq!(
2928 variable_replaced_devcontainer
2929 .remote_env
2930 .as_ref()
2931 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2932 Some(&test_project_filename())
2933 );
2934
2935 // ${containerWorkspaceFolder}
2936 assert_eq!(
2937 variable_replaced_devcontainer
2938 .remote_env
2939 .as_ref()
2940 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2941 Some(&format!("/workspaces/{}", test_project_filename()))
2942 );
2943
2944 // ${localWorkspaceFolder}
2945 assert_eq!(
2946 variable_replaced_devcontainer
2947 .remote_env
2948 .as_ref()
2949 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2950 // We replace backslashes with forward slashes during variable replacement for JSON safety
2951 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
2952 );
2953
2954 // ${localEnv:VARIABLE_NAME}
2955 assert_eq!(
2956 variable_replaced_devcontainer
2957 .remote_env
2958 .as_ref()
2959 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2960 Some(&"local_env_value1".to_string())
2961 );
2962 assert_eq!(
2963 variable_replaced_devcontainer
2964 .remote_env
2965 .as_ref()
2966 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2967 Some(&"THISVALUEHERE".to_string())
2968 );
2969 }
2970
2971 #[gpui::test]
2972 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2973 let given_devcontainer_contents = r#"
2974 // These are some external comments. serde_lenient should handle them
2975 {
2976 // These are some internal comments
2977 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2978 "name": "myDevContainer-${devcontainerId}",
2979 "remoteUser": "root",
2980 "remoteEnv": {
2981 "DEVCONTAINER_ID": "${devcontainerId}",
2982 "MYVAR2": "myvarothervalue",
2983 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2984 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2985 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2986 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2987
2988 },
2989 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2990 "workspaceFolder": "/workspace/customfolder"
2991 }
2992 "#;
2993
2994 let (_, mut devcontainer_manifest) =
2995 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2996 .await
2997 .unwrap();
2998
2999 devcontainer_manifest.parse_nonremote_vars().unwrap();
3000
3001 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3002 &devcontainer_manifest.config
3003 else {
3004 panic!("Config not parsed");
3005 };
3006
3007 // ${devcontainerId}
3008 let devcontainer_id = devcontainer_manifest.devcontainer_id();
3009 assert_eq!(
3010 variable_replaced_devcontainer.name,
3011 Some(format!("myDevContainer-{devcontainer_id}"))
3012 );
3013 assert_eq!(
3014 variable_replaced_devcontainer
3015 .remote_env
3016 .as_ref()
3017 .and_then(|env| env.get("DEVCONTAINER_ID")),
3018 Some(&devcontainer_id)
3019 );
3020
3021 // ${containerWorkspaceFolderBasename}
3022 assert_eq!(
3023 variable_replaced_devcontainer
3024 .remote_env
3025 .as_ref()
3026 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3027 Some(&"customfolder".to_string())
3028 );
3029
3030 // ${localWorkspaceFolderBasename}
3031 assert_eq!(
3032 variable_replaced_devcontainer
3033 .remote_env
3034 .as_ref()
3035 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3036 Some(&"project".to_string())
3037 );
3038
3039 // ${containerWorkspaceFolder}
3040 assert_eq!(
3041 variable_replaced_devcontainer
3042 .remote_env
3043 .as_ref()
3044 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3045 Some(&"/workspace/customfolder".to_string())
3046 );
3047
3048 // ${localWorkspaceFolder}
3049 assert_eq!(
3050 variable_replaced_devcontainer
3051 .remote_env
3052 .as_ref()
3053 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3054 // We replace backslashes with forward slashes during variable replacement for JSON safety
3055 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3056 );
3057 }
3058
3059 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3060 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3061 #[cfg(not(target_os = "windows"))]
3062 #[gpui::test]
3063 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3064 cx.executor().allow_parking();
3065 env_logger::try_init().ok();
3066 let given_devcontainer_contents = r#"
3067 /*---------------------------------------------------------------------------------------------
3068 * Copyright (c) Microsoft Corporation. All rights reserved.
3069 * Licensed under the MIT License. See License.txt in the project root for license information.
3070 *--------------------------------------------------------------------------------------------*/
3071 {
3072 "name": "cli-${devcontainerId}",
3073 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3074 "build": {
3075 "dockerfile": "Dockerfile",
3076 "args": {
3077 "VARIANT": "18-bookworm",
3078 "FOO": "bar",
3079 },
3080 },
3081 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3082 "workspaceFolder": "/workspace2",
3083 "mounts": [
3084 // Keep command history across instances
3085 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3086 ],
3087
3088 "runArgs": [
3089 "--cap-add=SYS_PTRACE",
3090 "--sig-proxy=true",
3091 ],
3092
3093 "forwardPorts": [
3094 8082,
3095 8083,
3096 ],
3097 "appPort": [
3098 8084,
3099 "8085:8086",
3100 ],
3101
3102 "containerEnv": {
3103 "VARIABLE_VALUE": "value",
3104 },
3105
3106 "initializeCommand": "touch IAM.md",
3107
3108 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3109
3110 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3111
3112 "postCreateCommand": {
3113 "yarn": "yarn install",
3114 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3115 },
3116
3117 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3118
3119 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3120
3121 "remoteUser": "node",
3122
3123 "remoteEnv": {
3124 "PATH": "${containerEnv:PATH}:/some/other/path",
3125 "OTHER_ENV": "other_env_value"
3126 },
3127
3128 "features": {
3129 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3130 "moby": false,
3131 },
3132 "ghcr.io/devcontainers/features/go:1": {},
3133 },
3134
3135 "customizations": {
3136 "vscode": {
3137 "extensions": [
3138 "dbaeumer.vscode-eslint",
3139 "GitHub.vscode-pull-request-github",
3140 ],
3141 },
3142 "zed": {
3143 "extensions": ["vue", "ruby"],
3144 },
3145 "codespaces": {
3146 "repositories": {
3147 "devcontainers/features": {
3148 "permissions": {
3149 "contents": "write",
3150 "workflows": "write",
3151 },
3152 },
3153 },
3154 },
3155 },
3156 }
3157 "#;
3158
3159 let (test_dependencies, mut devcontainer_manifest) =
3160 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3161 .await
3162 .unwrap();
3163
3164 test_dependencies
3165 .fs
3166 .atomic_write(
3167 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3168 r#"
3169# Copyright (c) Microsoft Corporation. All rights reserved.
3170# Licensed under the MIT License. See License.txt in the project root for license information.
3171ARG VARIANT="16-bullseye"
3172FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3173
3174RUN mkdir -p /workspaces && chown node:node /workspaces
3175
3176ARG USERNAME=node
3177USER $USERNAME
3178
3179# Save command line history
3180RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3181&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3182&& mkdir -p /home/$USERNAME/commandhistory \
3183&& touch /home/$USERNAME/commandhistory/.bash_history \
3184&& chown -R $USERNAME /home/$USERNAME/commandhistory
3185 "#.trim().to_string(),
3186 )
3187 .await
3188 .unwrap();
3189
3190 devcontainer_manifest.parse_nonremote_vars().unwrap();
3191
3192 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3193
3194 assert_eq!(
3195 devcontainer_up.extension_ids,
3196 vec!["vue".to_string(), "ruby".to_string()]
3197 );
3198
3199 let files = test_dependencies.fs.files();
3200 let feature_dockerfile = files
3201 .iter()
3202 .find(|f| {
3203 f.file_name()
3204 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3205 })
3206 .expect("to be found");
3207 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3208 assert_eq!(
3209 &feature_dockerfile,
3210 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3211
3212# Copyright (c) Microsoft Corporation. All rights reserved.
3213# Licensed under the MIT License. See License.txt in the project root for license information.
3214ARG VARIANT="16-bullseye"
3215FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3216
3217RUN mkdir -p /workspaces && chown node:node /workspaces
3218
3219ARG USERNAME=node
3220USER $USERNAME
3221
3222# Save command line history
3223RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3224&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3225&& mkdir -p /home/$USERNAME/commandhistory \
3226&& touch /home/$USERNAME/commandhistory/.bash_history \
3227&& chown -R $USERNAME /home/$USERNAME/commandhistory
3228FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3229
3230FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3231USER root
3232COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3233RUN chmod -R 0755 /tmp/build-features/
3234
3235FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3236
3237USER root
3238
3239RUN mkdir -p /tmp/dev-container-features
3240COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3241
3242RUN \
3243echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3244echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3245
3246
3247RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3248cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3249&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3250&& cd /tmp/dev-container-features/docker-in-docker_0 \
3251&& chmod +x ./devcontainer-features-install.sh \
3252&& ./devcontainer-features-install.sh \
3253&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3254
3255RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3256cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3257&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3258&& cd /tmp/dev-container-features/go_1 \
3259&& chmod +x ./devcontainer-features-install.sh \
3260&& ./devcontainer-features-install.sh \
3261&& rm -rf /tmp/dev-container-features/go_1
3262
3263
3264ARG _DEV_CONTAINERS_IMAGE_USER=root
3265USER $_DEV_CONTAINERS_IMAGE_USER
3266"#
3267 );
3268
3269 let uid_dockerfile = files
3270 .iter()
3271 .find(|f| {
3272 f.file_name()
3273 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3274 })
3275 .expect("to be found");
3276 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3277
3278 assert_eq!(
3279 &uid_dockerfile,
3280 r#"ARG BASE_IMAGE
3281FROM $BASE_IMAGE
3282
3283USER root
3284
3285ARG REMOTE_USER
3286ARG NEW_UID
3287ARG NEW_GID
3288SHELL ["/bin/sh", "-c"]
3289RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3290 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3291 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3292 if [ -z "$OLD_UID" ]; then \
3293 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3294 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3295 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3296 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3297 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3298 else \
3299 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3300 FREE_GID=65532; \
3301 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3302 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3303 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3304 fi; \
3305 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3306 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3307 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3308 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3309 fi; \
3310 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3311 fi;
3312
3313ARG IMAGE_USER
3314USER $IMAGE_USER
3315
3316# Ensure that /etc/profile does not clobber the existing path
3317RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3318
3319ENV DOCKER_BUILDKIT=1
3320
3321ENV GOPATH=/go
3322ENV GOROOT=/usr/local/go
3323ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3324ENV VARIABLE_VALUE=value
3325"#
3326 );
3327
3328 let golang_install_wrapper = files
3329 .iter()
3330 .find(|f| {
3331 f.file_name()
3332 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3333 && f.to_str().is_some_and(|s| s.contains("/go_"))
3334 })
3335 .expect("to be found");
3336 let golang_install_wrapper = test_dependencies
3337 .fs
3338 .load(golang_install_wrapper)
3339 .await
3340 .unwrap();
3341 assert_eq!(
3342 &golang_install_wrapper,
3343 r#"#!/bin/sh
3344set -e
3345
3346on_exit () {
3347 [ $? -eq 0 ] && exit
3348 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3349}
3350
3351trap on_exit EXIT
3352
3353echo ===========================================================================
3354echo 'Feature : go'
3355echo 'Id : ghcr.io/devcontainers/features/go:1'
3356echo 'Options :'
3357echo ' GOLANGCILINTVERSION=latest
3358 VERSION=latest'
3359echo ===========================================================================
3360
3361set -a
3362. ../devcontainer-features.builtin.env
3363. ./devcontainer-features.env
3364set +a
3365
3366chmod +x ./install.sh
3367./install.sh
3368"#
3369 );
3370
3371 let docker_commands = test_dependencies
3372 .command_runner
3373 .commands_by_program("docker");
3374
3375 let docker_run_command = docker_commands
3376 .iter()
3377 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3378 .expect("found");
3379
3380 assert_eq!(
3381 docker_run_command.args,
3382 vec![
3383 "run".to_string(),
3384 "--privileged".to_string(),
3385 "--cap-add=SYS_PTRACE".to_string(),
3386 "--sig-proxy=true".to_string(),
3387 "-d".to_string(),
3388 "--mount".to_string(),
3389 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3390 "--mount".to_string(),
3391 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3392 "--mount".to_string(),
3393 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3394 "-l".to_string(),
3395 "devcontainer.local_folder=/path/to/local/project".to_string(),
3396 "-l".to_string(),
3397 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3398 "-l".to_string(),
3399 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3400 "-p".to_string(),
3401 "8082:8082".to_string(),
3402 "-p".to_string(),
3403 "8083:8083".to_string(),
3404 "-p".to_string(),
3405 "8084:8084".to_string(),
3406 "-p".to_string(),
3407 "8085:8086".to_string(),
3408 "--entrypoint".to_string(),
3409 "/bin/sh".to_string(),
3410 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3411 "-c".to_string(),
3412 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3413 "-".to_string()
3414 ]
3415 );
3416
3417 let docker_exec_commands = test_dependencies
3418 .docker
3419 .exec_commands_recorded
3420 .lock()
3421 .unwrap();
3422
3423 assert!(docker_exec_commands.iter().all(|exec| {
3424 exec.env
3425 == HashMap::from([
3426 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3427 (
3428 "PATH".to_string(),
3429 "/initial/path:/some/other/path".to_string(),
3430 ),
3431 ])
3432 }))
3433 }
3434
3435 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3436 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3437 #[cfg(not(target_os = "windows"))]
3438 #[gpui::test]
3439 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3440 cx.executor().allow_parking();
3441 env_logger::try_init().ok();
3442 let given_devcontainer_contents = r#"
3443 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3444 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3445 {
3446 "features": {
3447 "ghcr.io/devcontainers/features/aws-cli:1": {},
3448 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3449 },
3450 "name": "Rust and PostgreSQL",
3451 "dockerComposeFile": "docker-compose.yml",
3452 "service": "app",
3453 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3454
3455 // Features to add to the dev container. More info: https://containers.dev/features.
3456 // "features": {},
3457
3458 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3459 "forwardPorts": [
3460 8083,
3461 "db:5432",
3462 "db:1234",
3463 ],
3464
3465 // Use 'postCreateCommand' to run commands after the container is created.
3466 // "postCreateCommand": "rustc --version",
3467
3468 // Configure tool-specific properties.
3469 // "customizations": {},
3470
3471 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3472 // "remoteUser": "root"
3473 }
3474 "#;
3475 let (test_dependencies, mut devcontainer_manifest) =
3476 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3477 .await
3478 .unwrap();
3479
3480 test_dependencies
3481 .fs
3482 .atomic_write(
3483 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3484 r#"
3485version: '3.8'
3486
3487volumes:
3488 postgres-data:
3489
3490services:
3491 app:
3492 build:
3493 context: .
3494 dockerfile: Dockerfile
3495 env_file:
3496 # Ensure that the variables in .env match the same variables in devcontainer.json
3497 - .env
3498
3499 volumes:
3500 - ../..:/workspaces:cached
3501
3502 # Overrides default command so things don't shut down after the process ends.
3503 command: sleep infinity
3504
3505 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3506 network_mode: service:db
3507
3508 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3509 # (Adding the "ports" property to this file will not forward from a Codespace.)
3510
3511 db:
3512 image: postgres:14.1
3513 restart: unless-stopped
3514 volumes:
3515 - postgres-data:/var/lib/postgresql/data
3516 env_file:
3517 # Ensure that the variables in .env match the same variables in devcontainer.json
3518 - .env
3519
3520 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3521 # (Adding the "ports" property to this file will not forward from a Codespace.)
3522 "#.trim().to_string(),
3523 )
3524 .await
3525 .unwrap();
3526
3527 test_dependencies.fs.atomic_write(
3528 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3529 r#"
3530FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3531
3532# Include lld linker to improve build times either by using environment variable
3533# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3534RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3535 && apt-get -y install clang lld \
3536 && apt-get autoremove -y && apt-get clean -y
3537 "#.trim().to_string()).await.unwrap();
3538
3539 devcontainer_manifest.parse_nonremote_vars().unwrap();
3540
3541 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3542
3543 let files = test_dependencies.fs.files();
3544 let feature_dockerfile = files
3545 .iter()
3546 .find(|f| {
3547 f.file_name()
3548 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3549 })
3550 .expect("to be found");
3551 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3552 assert_eq!(
3553 &feature_dockerfile,
3554 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3555
3556FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3557
3558# Include lld linker to improve build times either by using environment variable
3559# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3560RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3561 && apt-get -y install clang lld \
3562 && apt-get autoremove -y && apt-get clean -y
3563FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3564
3565FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3566USER root
3567COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3568RUN chmod -R 0755 /tmp/build-features/
3569
3570FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3571
3572USER root
3573
3574RUN mkdir -p /tmp/dev-container-features
3575COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3576
3577RUN \
3578echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3579echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3580
3581
3582RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3583cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3584&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3585&& cd /tmp/dev-container-features/aws-cli_0 \
3586&& chmod +x ./devcontainer-features-install.sh \
3587&& ./devcontainer-features-install.sh \
3588&& rm -rf /tmp/dev-container-features/aws-cli_0
3589
3590RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3591cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3592&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3593&& cd /tmp/dev-container-features/docker-in-docker_1 \
3594&& chmod +x ./devcontainer-features-install.sh \
3595&& ./devcontainer-features-install.sh \
3596&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3597
3598
3599ARG _DEV_CONTAINERS_IMAGE_USER=root
3600USER $_DEV_CONTAINERS_IMAGE_USER
3601"#
3602 );
3603
3604 let uid_dockerfile = files
3605 .iter()
3606 .find(|f| {
3607 f.file_name()
3608 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3609 })
3610 .expect("to be found");
3611 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3612
3613 assert_eq!(
3614 &uid_dockerfile,
3615 r#"ARG BASE_IMAGE
3616FROM $BASE_IMAGE
3617
3618USER root
3619
3620ARG REMOTE_USER
3621ARG NEW_UID
3622ARG NEW_GID
3623SHELL ["/bin/sh", "-c"]
3624RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3625 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3626 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3627 if [ -z "$OLD_UID" ]; then \
3628 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3629 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3630 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3631 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3632 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3633 else \
3634 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3635 FREE_GID=65532; \
3636 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3637 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3638 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3639 fi; \
3640 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3641 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3642 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3643 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3644 fi; \
3645 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3646 fi;
3647
3648ARG IMAGE_USER
3649USER $IMAGE_USER
3650
3651# Ensure that /etc/profile does not clobber the existing path
3652RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3653
3654
3655ENV DOCKER_BUILDKIT=1
3656"#
3657 );
3658
3659 let build_override = files
3660 .iter()
3661 .find(|f| {
3662 f.file_name()
3663 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3664 })
3665 .expect("to be found");
3666 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3667 let build_config: DockerComposeConfig =
3668 serde_json_lenient::from_str(&build_override).unwrap();
3669 let build_context = build_config
3670 .services
3671 .get("app")
3672 .and_then(|s| s.build.as_ref())
3673 .and_then(|b| b.context.clone())
3674 .expect("build override should have a context");
3675 assert_eq!(
3676 build_context, ".",
3677 "build override should preserve the original build context from docker-compose.yml"
3678 );
3679
3680 let runtime_override = files
3681 .iter()
3682 .find(|f| {
3683 f.file_name()
3684 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3685 })
3686 .expect("to be found");
3687 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3688
3689 let expected_runtime_override = DockerComposeConfig {
3690 name: None,
3691 services: HashMap::from([
3692 (
3693 "app".to_string(),
3694 DockerComposeService {
3695 entrypoint: Some(vec![
3696 "/bin/sh".to_string(),
3697 "-c".to_string(),
3698 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3699 "-".to_string(),
3700 ]),
3701 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3702 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3703 privileged: Some(true),
3704 labels: Some(HashMap::from([
3705 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3706 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3707 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3708 ])),
3709 volumes: vec![
3710 MountDefinition {
3711 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3712 target: "/var/lib/docker".to_string(),
3713 mount_type: Some("volume".to_string())
3714 }
3715 ],
3716 ..Default::default()
3717 },
3718 ),
3719 (
3720 "db".to_string(),
3721 DockerComposeService {
3722 ports: vec![
3723 DockerComposeServicePort {
3724 target: "8083".to_string(),
3725 published: "8083".to_string(),
3726 ..Default::default()
3727 },
3728 DockerComposeServicePort {
3729 target: "5432".to_string(),
3730 published: "5432".to_string(),
3731 ..Default::default()
3732 },
3733 DockerComposeServicePort {
3734 target: "1234".to_string(),
3735 published: "1234".to_string(),
3736 ..Default::default()
3737 },
3738 ],
3739 ..Default::default()
3740 },
3741 ),
3742 ]),
3743 volumes: HashMap::from([(
3744 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3745 DockerComposeVolume {
3746 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3747 },
3748 )]),
3749 };
3750
3751 assert_eq!(
3752 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3753 expected_runtime_override
3754 )
3755 }
3756
3757 #[test]
3758 fn test_resolve_compose_dockerfile() {
3759 let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3760
3761 // Bug case (#53473): context ".." with relative dockerfile
3762 assert_eq!(
3763 resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3764 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3765 );
3766
3767 // Compose path containing ".." (as docker_compose_manifest() produces)
3768 assert_eq!(
3769 resolve_compose_dockerfile(
3770 Path::new("/project/.devcontainer/../docker-compose.yml"),
3771 Some("."),
3772 "docker/Dockerfile",
3773 ),
3774 Some(PathBuf::from("/project/docker/Dockerfile")),
3775 );
3776
3777 // Absolute dockerfile returned as-is
3778 assert_eq!(
3779 resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3780 Some(PathBuf::from("/absolute/Dockerfile")),
3781 );
3782
3783 // Absolute context used directly
3784 assert_eq!(
3785 resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3786 Some(PathBuf::from("/abs/context/Dockerfile")),
3787 );
3788
3789 // No context defaults to compose file's directory
3790 assert_eq!(
3791 resolve_compose_dockerfile(compose, None, "Dockerfile"),
3792 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3793 );
3794 }
3795
3796 #[gpui::test]
3797 async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
3798 cx.executor().allow_parking();
3799 env_logger::try_init().ok();
3800
3801 let given_devcontainer_contents = r#"
3802 {
3803 "name": "Test",
3804 "dockerComposeFile": "docker-compose-context-parent.yml",
3805 "service": "app",
3806 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
3807 }
3808 "#;
3809 let (_, mut devcontainer_manifest) =
3810 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3811 .await
3812 .unwrap();
3813
3814 devcontainer_manifest.parse_nonremote_vars().unwrap();
3815
3816 let expected = PathBuf::from(TEST_PROJECT_PATH)
3817 .join(".devcontainer")
3818 .join("Dockerfile");
3819 assert_eq!(
3820 devcontainer_manifest.dockerfile_location().await,
3821 Some(expected)
3822 );
3823 }
3824
3825 #[gpui::test]
3826 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3827 cx: &mut TestAppContext,
3828 ) {
3829 cx.executor().allow_parking();
3830 env_logger::try_init().ok();
3831 let given_devcontainer_contents = r#"
3832 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3833 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3834 {
3835 "features": {
3836 "ghcr.io/devcontainers/features/aws-cli:1": {},
3837 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3838 },
3839 "name": "Rust and PostgreSQL",
3840 "dockerComposeFile": "docker-compose.yml",
3841 "service": "app",
3842 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3843
3844 // Features to add to the dev container. More info: https://containers.dev/features.
3845 // "features": {},
3846
3847 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3848 "forwardPorts": [
3849 8083,
3850 "db:5432",
3851 "db:1234",
3852 ],
3853 "updateRemoteUserUID": false,
3854 "appPort": "8084",
3855
3856 // Use 'postCreateCommand' to run commands after the container is created.
3857 // "postCreateCommand": "rustc --version",
3858
3859 // Configure tool-specific properties.
3860 // "customizations": {},
3861
3862 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3863 // "remoteUser": "root"
3864 }
3865 "#;
3866 let (test_dependencies, mut devcontainer_manifest) =
3867 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3868 .await
3869 .unwrap();
3870
3871 test_dependencies
3872 .fs
3873 .atomic_write(
3874 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3875 r#"
3876version: '3.8'
3877
3878volumes:
3879postgres-data:
3880
3881services:
3882app:
3883 build:
3884 context: .
3885 dockerfile: Dockerfile
3886 env_file:
3887 # Ensure that the variables in .env match the same variables in devcontainer.json
3888 - .env
3889
3890 volumes:
3891 - ../..:/workspaces:cached
3892
3893 # Overrides default command so things don't shut down after the process ends.
3894 command: sleep infinity
3895
3896 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3897 network_mode: service:db
3898
3899 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3900 # (Adding the "ports" property to this file will not forward from a Codespace.)
3901
3902db:
3903 image: postgres:14.1
3904 restart: unless-stopped
3905 volumes:
3906 - postgres-data:/var/lib/postgresql/data
3907 env_file:
3908 # Ensure that the variables in .env match the same variables in devcontainer.json
3909 - .env
3910
3911 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3912 # (Adding the "ports" property to this file will not forward from a Codespace.)
3913 "#.trim().to_string(),
3914 )
3915 .await
3916 .unwrap();
3917
3918 test_dependencies.fs.atomic_write(
3919 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3920 r#"
3921FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3922
3923# Include lld linker to improve build times either by using environment variable
3924# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3925RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3926&& apt-get -y install clang lld \
3927&& apt-get autoremove -y && apt-get clean -y
3928 "#.trim().to_string()).await.unwrap();
3929
3930 devcontainer_manifest.parse_nonremote_vars().unwrap();
3931
3932 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3933
3934 let files = test_dependencies.fs.files();
3935 let feature_dockerfile = files
3936 .iter()
3937 .find(|f| {
3938 f.file_name()
3939 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3940 })
3941 .expect("to be found");
3942 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3943 assert_eq!(
3944 &feature_dockerfile,
3945 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3946
3947FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3948
3949# Include lld linker to improve build times either by using environment variable
3950# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3951RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3952&& apt-get -y install clang lld \
3953&& apt-get autoremove -y && apt-get clean -y
3954FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3955
3956FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3957USER root
3958COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3959RUN chmod -R 0755 /tmp/build-features/
3960
3961FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3962
3963USER root
3964
3965RUN mkdir -p /tmp/dev-container-features
3966COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3967
3968RUN \
3969echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3970echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3971
3972
3973RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3974cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3975&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3976&& cd /tmp/dev-container-features/aws-cli_0 \
3977&& chmod +x ./devcontainer-features-install.sh \
3978&& ./devcontainer-features-install.sh \
3979&& rm -rf /tmp/dev-container-features/aws-cli_0
3980
3981RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3982cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3983&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3984&& cd /tmp/dev-container-features/docker-in-docker_1 \
3985&& chmod +x ./devcontainer-features-install.sh \
3986&& ./devcontainer-features-install.sh \
3987&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3988
3989
3990ARG _DEV_CONTAINERS_IMAGE_USER=root
3991USER $_DEV_CONTAINERS_IMAGE_USER
3992
3993# Ensure that /etc/profile does not clobber the existing path
3994RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3995
3996
3997ENV DOCKER_BUILDKIT=1
3998"#
3999 );
4000 }
4001
4002 #[cfg(not(target_os = "windows"))]
4003 #[gpui::test]
4004 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
4005 cx.executor().allow_parking();
4006 env_logger::try_init().ok();
4007 let given_devcontainer_contents = r#"
4008 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4009 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4010 {
4011 "features": {
4012 "ghcr.io/devcontainers/features/aws-cli:1": {},
4013 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4014 },
4015 "name": "Rust and PostgreSQL",
4016 "dockerComposeFile": "docker-compose.yml",
4017 "service": "app",
4018 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4019
4020 // Features to add to the dev container. More info: https://containers.dev/features.
4021 // "features": {},
4022
4023 // Use 'forwardPorts' to make a list of ports inside the container available locally.
4024 // "forwardPorts": [5432],
4025
4026 // Use 'postCreateCommand' to run commands after the container is created.
4027 // "postCreateCommand": "rustc --version",
4028
4029 // Configure tool-specific properties.
4030 // "customizations": {},
4031
4032 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4033 // "remoteUser": "root"
4034 }
4035 "#;
4036 let mut fake_docker = FakeDocker::new();
4037 fake_docker.set_podman(true);
4038 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4039 cx,
4040 FakeFs::new(cx.executor()),
4041 fake_http_client(),
4042 Arc::new(fake_docker),
4043 Arc::new(TestCommandRunner::new()),
4044 HashMap::new(),
4045 given_devcontainer_contents,
4046 )
4047 .await
4048 .unwrap();
4049
4050 test_dependencies
4051 .fs
4052 .atomic_write(
4053 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4054 r#"
4055version: '3.8'
4056
4057volumes:
4058postgres-data:
4059
4060services:
4061app:
4062build:
4063 context: .
4064 dockerfile: Dockerfile
4065env_file:
4066 # Ensure that the variables in .env match the same variables in devcontainer.json
4067 - .env
4068
4069volumes:
4070 - ../..:/workspaces:cached
4071
4072# Overrides default command so things don't shut down after the process ends.
4073command: sleep infinity
4074
4075# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4076network_mode: service:db
4077
4078# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4079# (Adding the "ports" property to this file will not forward from a Codespace.)
4080
4081db:
4082image: postgres:14.1
4083restart: unless-stopped
4084volumes:
4085 - postgres-data:/var/lib/postgresql/data
4086env_file:
4087 # Ensure that the variables in .env match the same variables in devcontainer.json
4088 - .env
4089
4090# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4091# (Adding the "ports" property to this file will not forward from a Codespace.)
4092 "#.trim().to_string(),
4093 )
4094 .await
4095 .unwrap();
4096
4097 test_dependencies.fs.atomic_write(
4098 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4099 r#"
4100FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4101
4102# Include lld linker to improve build times either by using environment variable
4103# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4104RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4105&& apt-get -y install clang lld \
4106&& apt-get autoremove -y && apt-get clean -y
4107 "#.trim().to_string()).await.unwrap();
4108
4109 devcontainer_manifest.parse_nonremote_vars().unwrap();
4110
4111 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4112
4113 let files = test_dependencies.fs.files();
4114
4115 let feature_dockerfile = files
4116 .iter()
4117 .find(|f| {
4118 f.file_name()
4119 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4120 })
4121 .expect("to be found");
4122 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4123 assert_eq!(
4124 &feature_dockerfile,
4125 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4126
4127FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4128
4129# Include lld linker to improve build times either by using environment variable
4130# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4131RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4132&& apt-get -y install clang lld \
4133&& apt-get autoremove -y && apt-get clean -y
4134FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4135
4136FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4137
4138FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4139USER root
4140COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4141RUN chmod -R 0755 /tmp/build-features/
4142
4143FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4144
4145USER root
4146
4147RUN mkdir -p /tmp/dev-container-features
4148COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4149
4150RUN \
4151echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4152echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4153
4154
4155COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4156RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4157&& cd /tmp/dev-container-features/aws-cli_0 \
4158&& chmod +x ./devcontainer-features-install.sh \
4159&& ./devcontainer-features-install.sh
4160
4161COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4162RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4163&& cd /tmp/dev-container-features/docker-in-docker_1 \
4164&& chmod +x ./devcontainer-features-install.sh \
4165&& ./devcontainer-features-install.sh
4166
4167
4168ARG _DEV_CONTAINERS_IMAGE_USER=root
4169USER $_DEV_CONTAINERS_IMAGE_USER
4170"#
4171 );
4172
4173 let uid_dockerfile = files
4174 .iter()
4175 .find(|f| {
4176 f.file_name()
4177 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4178 })
4179 .expect("to be found");
4180 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4181
4182 assert_eq!(
4183 &uid_dockerfile,
4184 r#"ARG BASE_IMAGE
4185FROM $BASE_IMAGE
4186
4187USER root
4188
4189ARG REMOTE_USER
4190ARG NEW_UID
4191ARG NEW_GID
4192SHELL ["/bin/sh", "-c"]
4193RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4194 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4195 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4196 if [ -z "$OLD_UID" ]; then \
4197 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4198 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4199 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4200 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4201 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4202 else \
4203 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4204 FREE_GID=65532; \
4205 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4206 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4207 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4208 fi; \
4209 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4210 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4211 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4212 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4213 fi; \
4214 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4215 fi;
4216
4217ARG IMAGE_USER
4218USER $IMAGE_USER
4219
4220# Ensure that /etc/profile does not clobber the existing path
4221RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4222
4223
4224ENV DOCKER_BUILDKIT=1
4225"#
4226 );
4227 }
4228
4229 #[gpui::test]
4230 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4231 cx.executor().allow_parking();
4232 env_logger::try_init().ok();
4233 let given_devcontainer_contents = r#"
4234 /*---------------------------------------------------------------------------------------------
4235 * Copyright (c) Microsoft Corporation. All rights reserved.
4236 * Licensed under the MIT License. See License.txt in the project root for license information.
4237 *--------------------------------------------------------------------------------------------*/
4238 {
4239 "name": "cli-${devcontainerId}",
4240 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4241 "build": {
4242 "dockerfile": "Dockerfile",
4243 "args": {
4244 "VARIANT": "18-bookworm",
4245 "FOO": "bar",
4246 },
4247 "target": "development",
4248 },
4249 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4250 "workspaceFolder": "/workspace2",
4251 "mounts": [
4252 // Keep command history across instances
4253 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4254 ],
4255
4256 "forwardPorts": [
4257 8082,
4258 8083,
4259 ],
4260 "appPort": "8084",
4261 "updateRemoteUserUID": false,
4262
4263 "containerEnv": {
4264 "VARIABLE_VALUE": "value",
4265 },
4266
4267 "initializeCommand": "touch IAM.md",
4268
4269 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4270
4271 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4272
4273 "postCreateCommand": {
4274 "yarn": "yarn install",
4275 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4276 },
4277
4278 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4279
4280 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4281
4282 "remoteUser": "node",
4283
4284 "remoteEnv": {
4285 "PATH": "${containerEnv:PATH}:/some/other/path",
4286 "OTHER_ENV": "other_env_value"
4287 },
4288
4289 "features": {
4290 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4291 "moby": false,
4292 },
4293 "ghcr.io/devcontainers/features/go:1": {},
4294 },
4295
4296 "customizations": {
4297 "vscode": {
4298 "extensions": [
4299 "dbaeumer.vscode-eslint",
4300 "GitHub.vscode-pull-request-github",
4301 ],
4302 },
4303 "zed": {
4304 "extensions": ["vue", "ruby"],
4305 },
4306 "codespaces": {
4307 "repositories": {
4308 "devcontainers/features": {
4309 "permissions": {
4310 "contents": "write",
4311 "workflows": "write",
4312 },
4313 },
4314 },
4315 },
4316 },
4317 }
4318 "#;
4319
4320 let (test_dependencies, mut devcontainer_manifest) =
4321 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4322 .await
4323 .unwrap();
4324
4325 test_dependencies
4326 .fs
4327 .atomic_write(
4328 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4329 r#"
4330# Copyright (c) Microsoft Corporation. All rights reserved.
4331# Licensed under the MIT License. See License.txt in the project root for license information.
4332ARG VARIANT="16-bullseye"
4333FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4334FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4335
4336RUN mkdir -p /workspaces && chown node:node /workspaces
4337
4338ARG USERNAME=node
4339USER $USERNAME
4340
4341# Save command line history
4342RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4343&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4344&& mkdir -p /home/$USERNAME/commandhistory \
4345&& touch /home/$USERNAME/commandhistory/.bash_history \
4346&& chown -R $USERNAME /home/$USERNAME/commandhistory
4347 "#.trim().to_string(),
4348 )
4349 .await
4350 .unwrap();
4351
4352 devcontainer_manifest.parse_nonremote_vars().unwrap();
4353
4354 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4355
4356 assert_eq!(
4357 devcontainer_up.extension_ids,
4358 vec!["vue".to_string(), "ruby".to_string()]
4359 );
4360
4361 let files = test_dependencies.fs.files();
4362 let feature_dockerfile = files
4363 .iter()
4364 .find(|f| {
4365 f.file_name()
4366 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4367 })
4368 .expect("to be found");
4369 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4370 assert_eq!(
4371 &feature_dockerfile,
4372 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4373
4374# Copyright (c) Microsoft Corporation. All rights reserved.
4375# Licensed under the MIT License. See License.txt in the project root for license information.
4376ARG VARIANT="16-bullseye"
4377FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4378FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4379
4380RUN mkdir -p /workspaces && chown node:node /workspaces
4381
4382ARG USERNAME=node
4383USER $USERNAME
4384
4385# Save command line history
4386RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4387&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4388&& mkdir -p /home/$USERNAME/commandhistory \
4389&& touch /home/$USERNAME/commandhistory/.bash_history \
4390&& chown -R $USERNAME /home/$USERNAME/commandhistory
4391FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4392
4393FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4394USER root
4395COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4396RUN chmod -R 0755 /tmp/build-features/
4397
4398FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4399
4400USER root
4401
4402RUN mkdir -p /tmp/dev-container-features
4403COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4404
4405RUN \
4406echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4407echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4408
4409
4410RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4411cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4412&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4413&& cd /tmp/dev-container-features/docker-in-docker_0 \
4414&& chmod +x ./devcontainer-features-install.sh \
4415&& ./devcontainer-features-install.sh \
4416&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4417
4418RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4419cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4420&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4421&& cd /tmp/dev-container-features/go_1 \
4422&& chmod +x ./devcontainer-features-install.sh \
4423&& ./devcontainer-features-install.sh \
4424&& rm -rf /tmp/dev-container-features/go_1
4425
4426
4427ARG _DEV_CONTAINERS_IMAGE_USER=root
4428USER $_DEV_CONTAINERS_IMAGE_USER
4429
4430# Ensure that /etc/profile does not clobber the existing path
4431RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4432
4433ENV DOCKER_BUILDKIT=1
4434
4435ENV GOPATH=/go
4436ENV GOROOT=/usr/local/go
4437ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4438ENV VARIABLE_VALUE=value
4439"#
4440 );
4441
4442 let golang_install_wrapper = files
4443 .iter()
4444 .find(|f| {
4445 f.file_name()
4446 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4447 && f.to_str().is_some_and(|s| s.contains("go_"))
4448 })
4449 .expect("to be found");
4450 let golang_install_wrapper = test_dependencies
4451 .fs
4452 .load(golang_install_wrapper)
4453 .await
4454 .unwrap();
4455 assert_eq!(
4456 &golang_install_wrapper,
4457 r#"#!/bin/sh
4458set -e
4459
4460on_exit () {
4461 [ $? -eq 0 ] && exit
4462 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4463}
4464
4465trap on_exit EXIT
4466
4467echo ===========================================================================
4468echo 'Feature : go'
4469echo 'Id : ghcr.io/devcontainers/features/go:1'
4470echo 'Options :'
4471echo ' GOLANGCILINTVERSION=latest
4472 VERSION=latest'
4473echo ===========================================================================
4474
4475set -a
4476. ../devcontainer-features.builtin.env
4477. ./devcontainer-features.env
4478set +a
4479
4480chmod +x ./install.sh
4481./install.sh
4482"#
4483 );
4484
4485 let docker_commands = test_dependencies
4486 .command_runner
4487 .commands_by_program("docker");
4488
4489 let docker_run_command = docker_commands
4490 .iter()
4491 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4492
4493 assert!(docker_run_command.is_some());
4494
4495 let docker_exec_commands = test_dependencies
4496 .docker
4497 .exec_commands_recorded
4498 .lock()
4499 .unwrap();
4500
4501 assert!(docker_exec_commands.iter().all(|exec| {
4502 exec.env
4503 == HashMap::from([
4504 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4505 (
4506 "PATH".to_string(),
4507 "/initial/path:/some/other/path".to_string(),
4508 ),
4509 ])
4510 }))
4511 }
4512
4513 #[cfg(not(target_os = "windows"))]
4514 #[gpui::test]
4515 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4516 cx.executor().allow_parking();
4517 env_logger::try_init().ok();
4518 let given_devcontainer_contents = r#"
4519 {
4520 "name": "cli-${devcontainerId}",
4521 "image": "test_image:latest",
4522 }
4523 "#;
4524
4525 let (test_dependencies, mut devcontainer_manifest) =
4526 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4527 .await
4528 .unwrap();
4529
4530 devcontainer_manifest.parse_nonremote_vars().unwrap();
4531
4532 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4533
4534 let files = test_dependencies.fs.files();
4535 let uid_dockerfile = files
4536 .iter()
4537 .find(|f| {
4538 f.file_name()
4539 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4540 })
4541 .expect("to be found");
4542 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4543
4544 assert_eq!(
4545 &uid_dockerfile,
4546 r#"ARG BASE_IMAGE
4547FROM $BASE_IMAGE
4548
4549USER root
4550
4551ARG REMOTE_USER
4552ARG NEW_UID
4553ARG NEW_GID
4554SHELL ["/bin/sh", "-c"]
4555RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4556 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4557 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4558 if [ -z "$OLD_UID" ]; then \
4559 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4560 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4561 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4562 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4563 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4564 else \
4565 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4566 FREE_GID=65532; \
4567 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4568 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4569 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4570 fi; \
4571 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4572 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4573 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4574 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4575 fi; \
4576 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4577 fi;
4578
4579ARG IMAGE_USER
4580USER $IMAGE_USER
4581
4582# Ensure that /etc/profile does not clobber the existing path
4583RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4584"#
4585 );
4586 }
4587
4588 #[cfg(target_os = "windows")]
4589 #[gpui::test]
4590 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4591 cx.executor().allow_parking();
4592 env_logger::try_init().ok();
4593 let given_devcontainer_contents = r#"
4594 {
4595 "name": "cli-${devcontainerId}",
4596 "image": "test_image:latest",
4597 }
4598 "#;
4599
4600 let (_, mut devcontainer_manifest) =
4601 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4602 .await
4603 .unwrap();
4604
4605 devcontainer_manifest.parse_nonremote_vars().unwrap();
4606
4607 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4608
4609 assert_eq!(
4610 devcontainer_up.remote_workspace_folder,
4611 "/workspaces/project"
4612 );
4613 }
4614
4615 #[cfg(not(target_os = "windows"))]
4616 #[gpui::test]
4617 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4618 cx.executor().allow_parking();
4619 env_logger::try_init().ok();
4620 let given_devcontainer_contents = r#"
4621 {
4622 "name": "cli-${devcontainerId}",
4623 "dockerComposeFile": "docker-compose-plain.yml",
4624 "service": "app",
4625 }
4626 "#;
4627
4628 let (test_dependencies, mut devcontainer_manifest) =
4629 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4630 .await
4631 .unwrap();
4632
4633 test_dependencies
4634 .fs
4635 .atomic_write(
4636 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4637 r#"
4638services:
4639 app:
4640 image: test_image:latest
4641 command: sleep infinity
4642 volumes:
4643 - ..:/workspace:cached
4644 "#
4645 .trim()
4646 .to_string(),
4647 )
4648 .await
4649 .unwrap();
4650
4651 devcontainer_manifest.parse_nonremote_vars().unwrap();
4652
4653 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4654
4655 let files = test_dependencies.fs.files();
4656 let uid_dockerfile = files
4657 .iter()
4658 .find(|f| {
4659 f.file_name()
4660 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4661 })
4662 .expect("to be found");
4663 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4664
4665 assert_eq!(
4666 &uid_dockerfile,
4667 r#"ARG BASE_IMAGE
4668FROM $BASE_IMAGE
4669
4670USER root
4671
4672ARG REMOTE_USER
4673ARG NEW_UID
4674ARG NEW_GID
4675SHELL ["/bin/sh", "-c"]
4676RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4677 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4678 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4679 if [ -z "$OLD_UID" ]; then \
4680 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4681 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4682 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4683 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4684 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4685 else \
4686 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4687 FREE_GID=65532; \
4688 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4689 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4690 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4691 fi; \
4692 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4693 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4694 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4695 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4696 fi; \
4697 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4698 fi;
4699
4700ARG IMAGE_USER
4701USER $IMAGE_USER
4702
4703# Ensure that /etc/profile does not clobber the existing path
4704RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4705"#
4706 );
4707 }
4708
4709 #[gpui::test]
4710 async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4711 cx.executor().allow_parking();
4712 env_logger::try_init().ok();
4713 let given_devcontainer_contents = r#"
4714 {
4715 "name": "cli-${devcontainerId}",
4716 "build": {
4717 "dockerfile": "Dockerfile",
4718 "args": {
4719 "VERSION": "1.22",
4720 }
4721 },
4722 }
4723 "#;
4724
4725 let (test_dependencies, mut devcontainer_manifest) =
4726 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4727 .await
4728 .unwrap();
4729
4730 test_dependencies
4731 .fs
4732 .atomic_write(
4733 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4734 r#"
4735FROM dontgrabme as build_context
4736ARG VERSION=1.21
4737ARG REPOSITORY=mybuild
4738ARG REGISTRY=docker.io/stuff
4739
4740ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4741
4742FROM ${IMAGE} AS devcontainer
4743 "#
4744 .trim()
4745 .to_string(),
4746 )
4747 .await
4748 .unwrap();
4749
4750 devcontainer_manifest.parse_nonremote_vars().unwrap();
4751
4752 let dockerfile_contents = devcontainer_manifest
4753 .expanded_dockerfile_content()
4754 .await
4755 .unwrap();
4756 let base_image = image_from_dockerfile(
4757 dockerfile_contents,
4758 &devcontainer_manifest
4759 .dev_container()
4760 .build
4761 .as_ref()
4762 .and_then(|b| b.target.clone()),
4763 )
4764 .unwrap();
4765
4766 assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4767 }
4768
4769 #[gpui::test]
4770 async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4771 cx.executor().allow_parking();
4772 env_logger::try_init().ok();
4773 let given_devcontainer_contents = r#"
4774 {
4775 "name": "cli-${devcontainerId}",
4776 "build": {
4777 "dockerfile": "Dockerfile",
4778 "args": {
4779 "VERSION": "1.22",
4780 },
4781 "target": "development"
4782 },
4783 }
4784 "#;
4785
4786 let (test_dependencies, mut devcontainer_manifest) =
4787 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4788 .await
4789 .unwrap();
4790
4791 test_dependencies
4792 .fs
4793 .atomic_write(
4794 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4795 r#"
4796FROM dontgrabme as build_context
4797ARG VERSION=1.21
4798ARG REPOSITORY=mybuild
4799ARG REGISTRY=docker.io/stuff
4800
4801ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4802ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4803
4804FROM ${DEV_IMAGE} AS development
4805FROM ${IMAGE} AS production
4806 "#
4807 .trim()
4808 .to_string(),
4809 )
4810 .await
4811 .unwrap();
4812
4813 devcontainer_manifest.parse_nonremote_vars().unwrap();
4814
4815 let dockerfile_contents = devcontainer_manifest
4816 .expanded_dockerfile_content()
4817 .await
4818 .unwrap();
4819 let base_image = image_from_dockerfile(
4820 dockerfile_contents,
4821 &devcontainer_manifest
4822 .dev_container()
4823 .build
4824 .as_ref()
4825 .and_then(|b| b.target.clone()),
4826 )
4827 .unwrap();
4828
4829 assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4830 }
4831
4832 #[gpui::test]
4833 async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4834 cx.executor().allow_parking();
4835 env_logger::try_init().ok();
4836 let given_devcontainer_contents = r#"
4837 {
4838 "name": "cli-${devcontainerId}",
4839 "build": {
4840 "dockerfile": "Dockerfile",
4841 "args": {
4842 "JSON_ARG": "some-value",
4843 "ELIXIR_VERSION": "1.21",
4844 }
4845 },
4846 }
4847 "#;
4848
4849 let (test_dependencies, mut devcontainer_manifest) =
4850 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4851 .await
4852 .unwrap();
4853
4854 test_dependencies
4855 .fs
4856 .atomic_write(
4857 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4858 r#"
4859ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4860ARG ELIXIR_VERSION=1.20.0-rc.4
4861ARG FOO=foo BAR=bar
4862ARG FOOBAR=${FOO}${BAR}
4863ARG OTP_VERSION=28.4.1
4864ARG DEBIAN_VERSION=trixie-20260316-slim
4865ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4866ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4867ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4868ARG FROM_JSON=${JSON_ARG}
4869
4870FROM ${IMAGE} AS devcontainer
4871 "#
4872 .trim()
4873 .to_string(),
4874 )
4875 .await
4876 .unwrap();
4877
4878 devcontainer_manifest.parse_nonremote_vars().unwrap();
4879
4880 let expanded_dockerfile = devcontainer_manifest
4881 .expanded_dockerfile_content()
4882 .await
4883 .unwrap();
4884
4885 assert_eq!(
4886 &expanded_dockerfile,
4887 r#"
4888ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4889ARG ELIXIR_VERSION=1.20.0-rc.4
4890ARG FOO=foo BAR=bar
4891ARG FOOBAR=foobar
4892ARG OTP_VERSION=28.4.1
4893ARG DEBIAN_VERSION=trixie-20260316-slim
4894ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4895ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4896ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4897ARG FROM_JSON=some-value
4898
4899FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4900 "#
4901 .trim()
4902 )
4903 }
4904
4905 #[test]
4906 fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4907
4908 #[test]
4909 fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4910
4911 #[test]
4912 fn test_aliases_dockerfile_with_build_target_specified() {}
4913
4914 pub(crate) struct RecordedExecCommand {
4915 pub(crate) _container_id: String,
4916 pub(crate) _remote_folder: String,
4917 pub(crate) _user: String,
4918 pub(crate) env: HashMap<String, String>,
4919 pub(crate) _inner_command: Command,
4920 }
4921
4922 pub(crate) struct FakeDocker {
4923 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4924 podman: bool,
4925 has_buildx: bool,
4926 }
4927
4928 impl FakeDocker {
4929 pub(crate) fn new() -> Self {
4930 Self {
4931 podman: false,
4932 has_buildx: true,
4933 exec_commands_recorded: Mutex::new(Vec::new()),
4934 }
4935 }
4936 #[cfg(not(target_os = "windows"))]
4937 fn set_podman(&mut self, podman: bool) {
4938 self.podman = podman;
4939 }
4940 }
4941
4942 #[async_trait]
4943 impl DockerClient for FakeDocker {
4944 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4945 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4946 return Ok(DockerInspect {
4947 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4948 .to_string(),
4949 config: DockerInspectConfig {
4950 labels: DockerConfigLabels {
4951 metadata: Some(vec![HashMap::from([(
4952 "remoteUser".to_string(),
4953 Value::String("node".to_string()),
4954 )])]),
4955 },
4956 env: Vec::new(),
4957 image_user: Some("root".to_string()),
4958 },
4959 mounts: None,
4960 state: None,
4961 });
4962 }
4963 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4964 return Ok(DockerInspect {
4965 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4966 .to_string(),
4967 config: DockerInspectConfig {
4968 labels: DockerConfigLabels {
4969 metadata: Some(vec![HashMap::from([(
4970 "remoteUser".to_string(),
4971 Value::String("vscode".to_string()),
4972 )])]),
4973 },
4974 image_user: Some("root".to_string()),
4975 env: Vec::new(),
4976 },
4977 mounts: None,
4978 state: None,
4979 });
4980 }
4981 if id.starts_with("cli_") {
4982 return Ok(DockerInspect {
4983 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4984 .to_string(),
4985 config: DockerInspectConfig {
4986 labels: DockerConfigLabels {
4987 metadata: Some(vec![HashMap::from([(
4988 "remoteUser".to_string(),
4989 Value::String("node".to_string()),
4990 )])]),
4991 },
4992 image_user: Some("root".to_string()),
4993 env: vec!["PATH=/initial/path".to_string()],
4994 },
4995 mounts: None,
4996 state: None,
4997 });
4998 }
4999 if id == "found_docker_ps" {
5000 return Ok(DockerInspect {
5001 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5002 .to_string(),
5003 config: DockerInspectConfig {
5004 labels: DockerConfigLabels {
5005 metadata: Some(vec![HashMap::from([(
5006 "remoteUser".to_string(),
5007 Value::String("node".to_string()),
5008 )])]),
5009 },
5010 image_user: Some("root".to_string()),
5011 env: vec!["PATH=/initial/path".to_string()],
5012 },
5013 mounts: Some(vec![DockerInspectMount {
5014 source: "/path/to/local/project".to_string(),
5015 destination: "/workspaces/project".to_string(),
5016 }]),
5017 state: None,
5018 });
5019 }
5020 if id.starts_with("rust_a-") {
5021 return Ok(DockerInspect {
5022 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
5023 .to_string(),
5024 config: DockerInspectConfig {
5025 labels: DockerConfigLabels {
5026 metadata: Some(vec![HashMap::from([(
5027 "remoteUser".to_string(),
5028 Value::String("vscode".to_string()),
5029 )])]),
5030 },
5031 image_user: Some("root".to_string()),
5032 env: Vec::new(),
5033 },
5034 mounts: None,
5035 state: None,
5036 });
5037 }
5038 if id == "test_image:latest" {
5039 return Ok(DockerInspect {
5040 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5041 .to_string(),
5042 config: DockerInspectConfig {
5043 labels: DockerConfigLabels {
5044 metadata: Some(vec![HashMap::from([(
5045 "remoteUser".to_string(),
5046 Value::String("node".to_string()),
5047 )])]),
5048 },
5049 env: Vec::new(),
5050 image_user: Some("root".to_string()),
5051 },
5052 mounts: None,
5053 state: None,
5054 });
5055 }
5056
5057 Err(DevContainerError::DockerNotAvailable)
5058 }
5059 async fn get_docker_compose_config(
5060 &self,
5061 config_files: &Vec<PathBuf>,
5062 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5063 let project_path = PathBuf::from(TEST_PROJECT_PATH);
5064 if config_files.len() == 1
5065 && config_files.get(0)
5066 == Some(
5067 &project_path
5068 .join(".devcontainer")
5069 .join("docker-compose.yml"),
5070 )
5071 {
5072 return Ok(Some(DockerComposeConfig {
5073 name: None,
5074 services: HashMap::from([
5075 (
5076 "app".to_string(),
5077 DockerComposeService {
5078 build: Some(DockerComposeServiceBuild {
5079 context: Some(".".to_string()),
5080 dockerfile: Some("Dockerfile".to_string()),
5081 args: None,
5082 additional_contexts: None,
5083 target: None,
5084 }),
5085 volumes: vec![MountDefinition {
5086 source: Some("../..".to_string()),
5087 target: "/workspaces".to_string(),
5088 mount_type: Some("bind".to_string()),
5089 }],
5090 network_mode: Some("service:db".to_string()),
5091 ..Default::default()
5092 },
5093 ),
5094 (
5095 "db".to_string(),
5096 DockerComposeService {
5097 image: Some("postgres:14.1".to_string()),
5098 volumes: vec![MountDefinition {
5099 source: Some("postgres-data".to_string()),
5100 target: "/var/lib/postgresql/data".to_string(),
5101 mount_type: Some("volume".to_string()),
5102 }],
5103 env_file: Some(vec![".env".to_string()]),
5104 ..Default::default()
5105 },
5106 ),
5107 ]),
5108 volumes: HashMap::from([(
5109 "postgres-data".to_string(),
5110 DockerComposeVolume::default(),
5111 )]),
5112 }));
5113 }
5114 if config_files.len() == 1
5115 && config_files.get(0)
5116 == Some(
5117 &project_path
5118 .join(".devcontainer")
5119 .join("docker-compose-context-parent.yml"),
5120 )
5121 {
5122 return Ok(Some(DockerComposeConfig {
5123 name: None,
5124 services: HashMap::from([(
5125 "app".to_string(),
5126 DockerComposeService {
5127 build: Some(DockerComposeServiceBuild {
5128 context: Some("..".to_string()),
5129 dockerfile: Some(
5130 PathBuf::from(".devcontainer")
5131 .join("Dockerfile")
5132 .display()
5133 .to_string(),
5134 ),
5135 args: None,
5136 additional_contexts: None,
5137 target: None,
5138 }),
5139 ..Default::default()
5140 },
5141 )]),
5142 volumes: HashMap::new(),
5143 }));
5144 }
5145 if config_files.len() == 1
5146 && config_files.get(0)
5147 == Some(
5148 &project_path
5149 .join(".devcontainer")
5150 .join("docker-compose-plain.yml"),
5151 )
5152 {
5153 return Ok(Some(DockerComposeConfig {
5154 name: None,
5155 services: HashMap::from([(
5156 "app".to_string(),
5157 DockerComposeService {
5158 image: Some("test_image:latest".to_string()),
5159 command: vec!["sleep".to_string(), "infinity".to_string()],
5160 ..Default::default()
5161 },
5162 )]),
5163 ..Default::default()
5164 }));
5165 }
5166 Err(DevContainerError::DockerNotAvailable)
5167 }
5168 async fn docker_compose_build(
5169 &self,
5170 _config_files: &Vec<PathBuf>,
5171 _project_name: &str,
5172 ) -> Result<(), DevContainerError> {
5173 Ok(())
5174 }
5175 async fn run_docker_exec(
5176 &self,
5177 container_id: &str,
5178 remote_folder: &str,
5179 user: &str,
5180 env: &HashMap<String, String>,
5181 inner_command: Command,
5182 ) -> Result<(), DevContainerError> {
5183 let mut record = self
5184 .exec_commands_recorded
5185 .lock()
5186 .expect("should be available");
5187 record.push(RecordedExecCommand {
5188 _container_id: container_id.to_string(),
5189 _remote_folder: remote_folder.to_string(),
5190 _user: user.to_string(),
5191 env: env.clone(),
5192 _inner_command: inner_command,
5193 });
5194 Ok(())
5195 }
5196 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5197 Err(DevContainerError::DockerNotAvailable)
5198 }
5199 async fn find_process_by_filters(
5200 &self,
5201 _filters: Vec<String>,
5202 ) -> Result<Option<DockerPs>, DevContainerError> {
5203 Ok(Some(DockerPs {
5204 id: "found_docker_ps".to_string(),
5205 }))
5206 }
5207 fn supports_compose_buildkit(&self) -> bool {
5208 !self.podman && self.has_buildx
5209 }
5210 fn docker_cli(&self) -> String {
5211 if self.podman {
5212 "podman".to_string()
5213 } else {
5214 "docker".to_string()
5215 }
5216 }
5217 }
5218
5219 #[derive(Debug, Clone)]
5220 pub(crate) struct TestCommand {
5221 pub(crate) program: String,
5222 pub(crate) args: Vec<String>,
5223 }
5224
5225 pub(crate) struct TestCommandRunner {
5226 commands_recorded: Mutex<Vec<TestCommand>>,
5227 }
5228
5229 impl TestCommandRunner {
5230 fn new() -> Self {
5231 Self {
5232 commands_recorded: Mutex::new(Vec::new()),
5233 }
5234 }
5235
5236 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5237 let record = self.commands_recorded.lock().expect("poisoned");
5238 record
5239 .iter()
5240 .filter(|r| r.program == program)
5241 .map(|r| r.clone())
5242 .collect()
5243 }
5244 }
5245
5246 #[async_trait]
5247 impl CommandRunner for TestCommandRunner {
5248 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5249 let mut record = self.commands_recorded.lock().expect("poisoned");
5250
5251 record.push(TestCommand {
5252 program: command.get_program().display().to_string(),
5253 args: command
5254 .get_args()
5255 .map(|a| a.display().to_string())
5256 .collect(),
5257 });
5258
5259 Ok(Output {
5260 status: ExitStatus::default(),
5261 stdout: vec![],
5262 stderr: vec![],
5263 })
5264 }
5265 }
5266
5267 fn fake_http_client() -> Arc<dyn HttpClient> {
5268 FakeHttpClient::create(|request| async move {
5269 let (parts, _body) = request.into_parts();
5270 if parts.uri.path() == "/token" {
5271 let token_response = TokenResponse {
5272 token: "token".to_string(),
5273 };
5274 return Ok(http::Response::builder()
5275 .status(200)
5276 .body(http_client::AsyncBody::from(
5277 serde_json_lenient::to_string(&token_response).unwrap(),
5278 ))
5279 .unwrap());
5280 }
5281
5282 // OCI specific things
5283 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5284 let response = r#"
5285 {
5286 "schemaVersion": 2,
5287 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5288 "config": {
5289 "mediaType": "application/vnd.devcontainers",
5290 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5291 "size": 2
5292 },
5293 "layers": [
5294 {
5295 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5296 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5297 "size": 59392,
5298 "annotations": {
5299 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5300 }
5301 }
5302 ],
5303 "annotations": {
5304 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5305 "com.github.package.type": "devcontainer_feature"
5306 }
5307 }
5308 "#;
5309 return Ok(http::Response::builder()
5310 .status(200)
5311 .body(http_client::AsyncBody::from(response))
5312 .unwrap());
5313 }
5314
5315 if parts.uri.path()
5316 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5317 {
5318 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5323 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5324 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5325 ```
5326 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5327 ```
5328 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5329
5330
5331 ## OS Support
5332
5333 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5334
5335 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5336
5337 `bash` is required to execute the `install.sh` script."#),
5338 ("./README.md", r#"
5339 # Docker (Docker-in-Docker) (docker-in-docker)
5340
5341 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5342
5343 ## Example Usage
5344
5345 ```json
5346 "features": {
5347 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5348 }
5349 ```
5350
5351 ## Options
5352
5353 | Options Id | Description | Type | Default Value |
5354 |-----|-----|-----|-----|
5355 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5356 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5357 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5358 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5359 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5360 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5361 | installDockerBuildx | Install Docker Buildx | boolean | true |
5362 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5363 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5364
5365 ## Customizations
5366
5367 ### VS Code Extensions
5368
5369 - `ms-azuretools.vscode-containers`
5370
5371 ## Limitations
5372
5373 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5374 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5375 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5376 ```
5377 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5378 ```
5379 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5380
5381
5382 ## OS Support
5383
5384 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5385
5386 `bash` is required to execute the `install.sh` script.
5387
5388
5389 ---
5390
5391 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
5392 ("./devcontainer-feature.json", r#"
5393 {
5394 "id": "docker-in-docker",
5395 "version": "2.16.1",
5396 "name": "Docker (Docker-in-Docker)",
5397 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5398 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5399 "options": {
5400 "version": {
5401 "type": "string",
5402 "proposals": [
5403 "latest",
5404 "none",
5405 "20.10"
5406 ],
5407 "default": "latest",
5408 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5409 },
5410 "moby": {
5411 "type": "boolean",
5412 "default": true,
5413 "description": "Install OSS Moby build instead of Docker CE"
5414 },
5415 "mobyBuildxVersion": {
5416 "type": "string",
5417 "default": "latest",
5418 "description": "Install a specific version of moby-buildx when using Moby"
5419 },
5420 "dockerDashComposeVersion": {
5421 "type": "string",
5422 "enum": [
5423 "none",
5424 "v1",
5425 "v2"
5426 ],
5427 "default": "v2",
5428 "description": "Default version of Docker Compose (v1, v2 or none)"
5429 },
5430 "azureDnsAutoDetection": {
5431 "type": "boolean",
5432 "default": true,
5433 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5434 },
5435 "dockerDefaultAddressPool": {
5436 "type": "string",
5437 "default": "",
5438 "proposals": [],
5439 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5440 },
5441 "installDockerBuildx": {
5442 "type": "boolean",
5443 "default": true,
5444 "description": "Install Docker Buildx"
5445 },
5446 "installDockerComposeSwitch": {
5447 "type": "boolean",
5448 "default": false,
5449 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5450 },
5451 "disableIp6tables": {
5452 "type": "boolean",
5453 "default": false,
5454 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5455 }
5456 },
5457 "entrypoint": "/usr/local/share/docker-init.sh",
5458 "privileged": true,
5459 "containerEnv": {
5460 "DOCKER_BUILDKIT": "1"
5461 },
5462 "customizations": {
5463 "vscode": {
5464 "extensions": [
5465 "ms-azuretools.vscode-containers"
5466 ],
5467 "settings": {
5468 "github.copilot.chat.codeGeneration.instructions": [
5469 {
5470 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5471 }
5472 ]
5473 }
5474 }
5475 },
5476 "mounts": [
5477 {
5478 "source": "dind-var-lib-docker-${devcontainerId}",
5479 "target": "/var/lib/docker",
5480 "type": "volume"
5481 }
5482 ],
5483 "installsAfter": [
5484 "ghcr.io/devcontainers/features/common-utils"
5485 ]
5486 }"#),
5487 ("./install.sh", r#"
5488 #!/usr/bin/env bash
5489 #-------------------------------------------------------------------------------------------------------------
5490 # Copyright (c) Microsoft Corporation. All rights reserved.
5491 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5492 #-------------------------------------------------------------------------------------------------------------
5493 #
5494 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5495 # Maintainer: The Dev Container spec maintainers
5496
5497
5498 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5499 USE_MOBY="${MOBY:-"true"}"
5500 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5501 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5502 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5503 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5504 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5505 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5506 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5507 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5508 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5509 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5510 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5511 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5512
5513 # Default: Exit on any failure.
5514 set -e
5515
5516 # Clean up
5517 rm -rf /var/lib/apt/lists/*
5518
5519 # Setup STDERR.
5520 err() {
5521 echo "(!) $*" >&2
5522 }
5523
5524 if [ "$(id -u)" -ne 0 ]; then
5525 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5526 exit 1
5527 fi
5528
5529 ###################
5530 # Helper Functions
5531 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5532 ###################
5533
5534 # Determine the appropriate non-root user
5535 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5536 USERNAME=""
5537 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5538 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5539 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5540 USERNAME=${CURRENT_USER}
5541 break
5542 fi
5543 done
5544 if [ "${USERNAME}" = "" ]; then
5545 USERNAME=root
5546 fi
5547 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5548 USERNAME=root
5549 fi
5550
5551 # Package manager update function
5552 pkg_mgr_update() {
5553 case ${ADJUSTED_ID} in
5554 debian)
5555 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5556 echo "Running apt-get update..."
5557 apt-get update -y
5558 fi
5559 ;;
5560 rhel)
5561 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5562 cache_check_dir="/var/cache/yum"
5563 else
5564 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5565 fi
5566 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5567 echo "Running ${PKG_MGR_CMD} makecache ..."
5568 ${PKG_MGR_CMD} makecache
5569 fi
5570 ;;
5571 esac
5572 }
5573
5574 # Checks if packages are installed and installs them if not
5575 check_packages() {
5576 case ${ADJUSTED_ID} in
5577 debian)
5578 if ! dpkg -s "$@" > /dev/null 2>&1; then
5579 pkg_mgr_update
5580 apt-get -y install --no-install-recommends "$@"
5581 fi
5582 ;;
5583 rhel)
5584 if ! rpm -q "$@" > /dev/null 2>&1; then
5585 pkg_mgr_update
5586 ${PKG_MGR_CMD} -y install "$@"
5587 fi
5588 ;;
5589 esac
5590 }
5591
5592 # Figure out correct version of a three part version number is not passed
5593 find_version_from_git_tags() {
5594 local variable_name=$1
5595 local requested_version=${!variable_name}
5596 if [ "${requested_version}" = "none" ]; then return; fi
5597 local repository=$2
5598 local prefix=${3:-"tags/v"}
5599 local separator=${4:-"."}
5600 local last_part_optional=${5:-"false"}
5601 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5602 local escaped_separator=${separator//./\\.}
5603 local last_part
5604 if [ "${last_part_optional}" = "true" ]; then
5605 last_part="(${escaped_separator}[0-9]+)?"
5606 else
5607 last_part="${escaped_separator}[0-9]+"
5608 fi
5609 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5610 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5611 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5612 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5613 else
5614 set +e
5615 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5616 set -e
5617 fi
5618 fi
5619 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5620 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5621 exit 1
5622 fi
5623 echo "${variable_name}=${!variable_name}"
5624 }
5625
5626 # Use semver logic to decrement a version number then look for the closest match
5627 find_prev_version_from_git_tags() {
5628 local variable_name=$1
5629 local current_version=${!variable_name}
5630 local repository=$2
5631 # Normally a "v" is used before the version number, but support alternate cases
5632 local prefix=${3:-"tags/v"}
5633 # Some repositories use "_" instead of "." for version number part separation, support that
5634 local separator=${4:-"."}
5635 # Some tools release versions that omit the last digit (e.g. go)
5636 local last_part_optional=${5:-"false"}
5637 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5638 local version_suffix_regex=$6
5639 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5640 set +e
5641 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5642 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5643 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5644
5645 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5646 ((major=major-1))
5647 declare -g ${variable_name}="${major}"
5648 # Look for latest version from previous major release
5649 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5650 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5651 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5652 ((minor=minor-1))
5653 declare -g ${variable_name}="${major}.${minor}"
5654 # Look for latest version from previous minor release
5655 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5656 else
5657 ((breakfix=breakfix-1))
5658 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5659 declare -g ${variable_name}="${major}.${minor}"
5660 else
5661 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5662 fi
5663 fi
5664 set -e
5665 }
5666
5667 # Function to fetch the version released prior to the latest version
5668 get_previous_version() {
5669 local url=$1
5670 local repo_url=$2
5671 local variable_name=$3
5672 prev_version=${!variable_name}
5673
5674 output=$(curl -s "$repo_url");
5675 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5676 message=$(echo "$output" | jq -r '.message')
5677
5678 if [[ $message == "API rate limit exceeded"* ]]; then
5679 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5680 echo -e "\nAttempting to find latest version using GitHub tags."
5681 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5682 declare -g ${variable_name}="${prev_version}"
5683 fi
5684 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5685 echo -e "\nAttempting to find latest version using GitHub Api."
5686 version=$(echo "$output" | jq -r '.[1].tag_name')
5687 declare -g ${variable_name}="${version#v}"
5688 fi
5689 echo "${variable_name}=${!variable_name}"
5690 }
5691
5692 get_github_api_repo_url() {
5693 local url=$1
5694 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5695 }
5696
5697 ###########################################
5698 # Start docker-in-docker installation
5699 ###########################################
5700
5701 # Ensure apt is in non-interactive to avoid prompts
5702 export DEBIAN_FRONTEND=noninteractive
5703
5704 # Source /etc/os-release to get OS info
5705 . /etc/os-release
5706
5707 # Determine adjusted ID and package manager
5708 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5709 ADJUSTED_ID="debian"
5710 PKG_MGR_CMD="apt-get"
5711 # Use dpkg for Debian-based systems
5712 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5713 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5714 ADJUSTED_ID="rhel"
5715 # Determine the appropriate package manager for RHEL-based systems
5716 for pkg_mgr in tdnf dnf microdnf yum; do
5717 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5718 PKG_MGR_CMD="$pkg_mgr"
5719 break
5720 fi
5721 done
5722
5723 if [ -z "${PKG_MGR_CMD}" ]; then
5724 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5725 exit 1
5726 fi
5727
5728 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5729 else
5730 err "Linux distro ${ID} not supported."
5731 exit 1
5732 fi
5733
5734 # Azure Linux specific setup
5735 if [ "${ID}" = "azurelinux" ]; then
5736 VERSION_CODENAME="azurelinux${VERSION_ID}"
5737 fi
5738
5739 # Prevent attempting to install Moby on Debian trixie (packages removed)
5740 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5741 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5742 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5743 exit 1
5744 fi
5745
5746 # Check if distro is supported
5747 if [ "${USE_MOBY}" = "true" ]; then
5748 if [ "${ADJUSTED_ID}" = "debian" ]; then
5749 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5750 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5751 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5752 exit 1
5753 fi
5754 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5755 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5756 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5757 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5758 else
5759 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5760 fi
5761 fi
5762 else
5763 if [ "${ADJUSTED_ID}" = "debian" ]; then
5764 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5765 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5766 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5767 exit 1
5768 fi
5769 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5770 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5771
5772 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5773 fi
5774 fi
5775
5776 # Install base dependencies
5777 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5778 case ${ADJUSTED_ID} in
5779 debian)
5780 check_packages apt-transport-https $base_packages dirmngr
5781 ;;
5782 rhel)
5783 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5784
5785 ;;
5786 esac
5787
5788 # Install git if not already present
5789 if ! command -v git >/dev/null 2>&1; then
5790 check_packages git
5791 fi
5792
5793 # Update CA certificates to ensure HTTPS connections work properly
5794 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5795 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5796 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5797 update-ca-certificates
5798 fi
5799
5800 # Swap to legacy iptables for compatibility (Debian only)
5801 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5802 update-alternatives --set iptables /usr/sbin/iptables-legacy
5803 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5804 fi
5805
5806 # Set up the necessary repositories
5807 if [ "${USE_MOBY}" = "true" ]; then
5808 # Name of open source engine/cli
5809 engine_package_name="moby-engine"
5810 cli_package_name="moby-cli"
5811
5812 case ${ADJUSTED_ID} in
5813 debian)
5814 # Import key safely and import Microsoft apt repo
5815 {
5816 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5817 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5818 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5819 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5820 ;;
5821 rhel)
5822 echo "(*) ${ID} detected - checking for Moby packages..."
5823
5824 # Check if moby packages are available in default repos
5825 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5826 echo "(*) Using built-in ${ID} Moby packages"
5827 else
5828 case "${ID}" in
5829 azurelinux)
5830 echo "(*) Moby packages not found in Azure Linux repositories"
5831 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5832 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5833 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5834 exit 1
5835 ;;
5836 mariner)
5837 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5838 # Add Microsoft repository if packages aren't available locally
5839 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5840 cat > /etc/yum.repos.d/microsoft.repo << EOF
5841 [microsoft]
5842 name=Microsoft Repository
5843 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5844 enabled=1
5845 gpgcheck=1
5846 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5847 EOF
5848 # Verify packages are available after adding repo
5849 pkg_mgr_update
5850 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5851 echo "(*) Moby packages not found in Microsoft repository either"
5852 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5853 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5854 exit 1
5855 fi
5856 ;;
5857 *)
5858 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5859 exit 1
5860 ;;
5861 esac
5862 fi
5863 ;;
5864 esac
5865 else
5866 # Name of licensed engine/cli
5867 engine_package_name="docker-ce"
5868 cli_package_name="docker-ce-cli"
5869 case ${ADJUSTED_ID} in
5870 debian)
5871 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5872 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5873 ;;
5874 rhel)
5875 # Docker CE repository setup for RHEL-based systems
5876 setup_docker_ce_repo() {
5877 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5878 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5879 [docker-ce-stable]
5880 name=Docker CE Stable
5881 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5882 enabled=1
5883 gpgcheck=1
5884 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5885 skip_if_unavailable=1
5886 module_hotfixes=1
5887 EOF
5888 }
5889 install_azure_linux_deps() {
5890 echo "(*) Installing device-mapper libraries for Docker CE..."
5891 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5892 echo "(*) Installing additional Docker CE dependencies..."
5893 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5894 echo "(*) Some optional dependencies could not be installed, continuing..."
5895 }
5896 }
5897 setup_selinux_context() {
5898 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5899 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5900 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5901 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5902 fi
5903 }
5904
5905 # Special handling for RHEL Docker CE installation
5906 case "${ID}" in
5907 azurelinux|mariner)
5908 echo "(*) ${ID} detected"
5909 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5910 echo "(*) Setting up Docker CE repository..."
5911
5912 setup_docker_ce_repo
5913 install_azure_linux_deps
5914
5915 if [ "${USE_MOBY}" != "true" ]; then
5916 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5917 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5918 setup_selinux_context
5919 else
5920 echo "(*) Using Moby - container-selinux not required"
5921 fi
5922 ;;
5923 *)
5924 # Standard RHEL/CentOS/Fedora approach
5925 if command -v dnf >/dev/null 2>&1; then
5926 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5927 elif command -v yum-config-manager >/dev/null 2>&1; then
5928 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5929 else
5930 # Manual fallback
5931 setup_docker_ce_repo
5932 fi
5933 ;;
5934 esac
5935 ;;
5936 esac
5937 fi
5938
5939 # Refresh package database
5940 case ${ADJUSTED_ID} in
5941 debian)
5942 apt-get update
5943 ;;
5944 rhel)
5945 pkg_mgr_update
5946 ;;
5947 esac
5948
5949 # Soft version matching
5950 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5951 # Empty, meaning grab whatever "latest" is in apt repo
5952 engine_version_suffix=""
5953 cli_version_suffix=""
5954 else
5955 case ${ADJUSTED_ID} in
5956 debian)
5957 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5958 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5959 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5960 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5961 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5962 set +e # Don't exit if finding version fails - will handle gracefully
5963 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5964 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5965 set -e
5966 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5967 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5968 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5969 exit 1
5970 fi
5971 ;;
5972 rhel)
5973 # For RHEL-based systems, use dnf/yum to find versions
5974 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5975 set +e # Don't exit if finding version fails - will handle gracefully
5976 if [ "${USE_MOBY}" = "true" ]; then
5977 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5978 else
5979 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5980 fi
5981 set -e
5982 if [ -n "${available_versions}" ]; then
5983 engine_version_suffix="-${available_versions}"
5984 cli_version_suffix="-${available_versions}"
5985 else
5986 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5987 engine_version_suffix=""
5988 cli_version_suffix=""
5989 fi
5990 ;;
5991 esac
5992 fi
5993
5994 # Version matching for moby-buildx
5995 if [ "${USE_MOBY}" = "true" ]; then
5996 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5997 # Empty, meaning grab whatever "latest" is in apt repo
5998 buildx_version_suffix=""
5999 else
6000 case ${ADJUSTED_ID} in
6001 debian)
6002 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6003 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
6004 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6005 set +e
6006 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
6007 set -e
6008 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
6009 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6010 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6011 exit 1
6012 fi
6013 ;;
6014 rhel)
6015 # For RHEL-based systems, try to find buildx version or use latest
6016 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6017 set +e
6018 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
6019 set -e
6020 if [ -n "${available_buildx}" ]; then
6021 buildx_version_suffix="-${available_buildx}"
6022 else
6023 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
6024 buildx_version_suffix=""
6025 fi
6026 ;;
6027 esac
6028 echo "buildx_version_suffix ${buildx_version_suffix}"
6029 fi
6030 fi
6031
6032 # Install Docker / Moby CLI if not already installed
6033 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
6034 echo "Docker / Moby CLI and Engine already installed."
6035 else
6036 case ${ADJUSTED_ID} in
6037 debian)
6038 if [ "${USE_MOBY}" = "true" ]; then
6039 # Install engine
6040 set +e # Handle error gracefully
6041 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6042 exit_code=$?
6043 set -e
6044
6045 if [ ${exit_code} -ne 0 ]; then
6046 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6047 exit 1
6048 fi
6049
6050 # Install compose
6051 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6052 else
6053 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6054 # Install compose
6055 apt-mark hold docker-ce docker-ce-cli
6056 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6057 fi
6058 ;;
6059 rhel)
6060 if [ "${USE_MOBY}" = "true" ]; then
6061 set +e # Handle error gracefully
6062 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6063 exit_code=$?
6064 set -e
6065
6066 if [ ${exit_code} -ne 0 ]; then
6067 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6068 exit 1
6069 fi
6070
6071 # Install compose
6072 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6073 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6074 fi
6075 else
6076 # Special handling for Azure Linux Docker CE installation
6077 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6078 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6079
6080 # Use rpm with --force and --nodeps for Azure Linux
6081 set +e # Don't exit on error for this section
6082 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6083 install_result=$?
6084 set -e
6085
6086 if [ $install_result -ne 0 ]; then
6087 echo "(*) Standard installation failed, trying manual installation..."
6088
6089 echo "(*) Standard installation failed, trying manual installation..."
6090
6091 # Create directory for downloading packages
6092 mkdir -p /tmp/docker-ce-install
6093
6094 # Download packages manually using curl since tdnf doesn't support download
6095 echo "(*) Downloading Docker CE packages manually..."
6096
6097 # Get the repository baseurl
6098 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6099
6100 # Download packages directly
6101 cd /tmp/docker-ce-install
6102
6103 # Get package names with versions
6104 if [ -n "${cli_version_suffix}" ]; then
6105 docker_ce_version="${cli_version_suffix#-}"
6106 docker_cli_version="${engine_version_suffix#-}"
6107 else
6108 # Get latest version from repository
6109 docker_ce_version="latest"
6110 fi
6111
6112 echo "(*) Attempting to download Docker CE packages from repository..."
6113
6114 # Try to download latest packages if specific version fails
6115 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6116 # Fallback: try to get latest available version
6117 echo "(*) Specific version not found, trying latest..."
6118 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6119 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6120 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6121
6122 if [ -n "${latest_docker}" ]; then
6123 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6124 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6125 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6126 else
6127 echo "(*) ERROR: Could not find Docker CE packages in repository"
6128 echo "(*) Please check repository configuration or use 'moby': true"
6129 exit 1
6130 fi
6131 fi
6132 # Install systemd libraries required by Docker CE
6133 echo "(*) Installing systemd libraries required by Docker CE..."
6134 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6135 echo "(*) WARNING: Could not install systemd libraries"
6136 echo "(*) Docker may fail to start without these"
6137 }
6138
6139 # Install with rpm --force --nodeps
6140 echo "(*) Installing Docker CE packages with dependency override..."
6141 rpm -Uvh --force --nodeps *.rpm
6142
6143 # Cleanup
6144 cd /
6145 rm -rf /tmp/docker-ce-install
6146
6147 echo "(*) Docker CE installation completed with dependency bypass"
6148 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6149 fi
6150 else
6151 # Standard installation for other RHEL-based systems
6152 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6153 fi
6154 # Install compose
6155 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6156 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6157 fi
6158 fi
6159 ;;
6160 esac
6161 fi
6162
6163 echo "Finished installing docker / moby!"
6164
6165 docker_home="/usr/libexec/docker"
6166 cli_plugins_dir="${docker_home}/cli-plugins"
6167
6168 # fallback for docker-compose
6169 fallback_compose(){
6170 local url=$1
6171 local repo_url=$(get_github_api_repo_url "$url")
6172 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6173 get_previous_version "${url}" "${repo_url}" compose_version
6174 echo -e "\nAttempting to install v${compose_version}"
6175 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6176 }
6177
6178 # If 'docker-compose' command is to be included
6179 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6180 case "${architecture}" in
6181 amd64|x86_64) target_compose_arch=x86_64 ;;
6182 arm64|aarch64) target_compose_arch=aarch64 ;;
6183 *)
6184 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6185 exit 1
6186 esac
6187
6188 docker_compose_path="/usr/local/bin/docker-compose"
6189 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6190 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6191 INSTALL_DOCKER_COMPOSE_SWITCH="false"
6192
6193 if [ "${target_compose_arch}" = "x86_64" ]; then
6194 echo "(*) Installing docker compose v1..."
6195 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6196 chmod +x ${docker_compose_path}
6197
6198 # Download the SHA256 checksum
6199 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6200 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6201 sha256sum -c docker-compose.sha256sum --ignore-missing
6202 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6203 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6204 exit 1
6205 else
6206 # Use pip to get a version that runs on this architecture
6207 check_packages python3-minimal python3-pip libffi-dev python3-venv
6208 echo "(*) Installing docker compose v1 via pip..."
6209 export PYTHONUSERBASE=/usr/local
6210 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6211 fi
6212 else
6213 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6214 docker_compose_url="https://github.com/docker/compose"
6215 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6216 echo "(*) Installing docker-compose ${compose_version}..."
6217 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6218 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6219 fallback_compose "$docker_compose_url"
6220 }
6221
6222 chmod +x ${docker_compose_path}
6223
6224 # Download the SHA256 checksum
6225 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6226 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6227 sha256sum -c docker-compose.sha256sum --ignore-missing
6228
6229 mkdir -p ${cli_plugins_dir}
6230 cp ${docker_compose_path} ${cli_plugins_dir}
6231 fi
6232 fi
6233
6234 # fallback method for compose-switch
6235 fallback_compose-switch() {
6236 local url=$1
6237 local repo_url=$(get_github_api_repo_url "$url")
6238 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6239 get_previous_version "$url" "$repo_url" compose_switch_version
6240 echo -e "\nAttempting to install v${compose_switch_version}"
6241 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6242 }
6243 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6244 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6245 if type docker-compose > /dev/null 2>&1; then
6246 echo "(*) Installing compose-switch..."
6247 current_compose_path="$(command -v docker-compose)"
6248 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6249 compose_switch_version="latest"
6250 compose_switch_url="https://github.com/docker/compose-switch"
6251 # Try to get latest version, fallback to known stable version if GitHub API fails
6252 set +e
6253 find_version_from_git_tags compose_switch_version "$compose_switch_url"
6254 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6255 echo "(*) GitHub API rate limited or failed, using fallback method"
6256 fallback_compose-switch "$compose_switch_url"
6257 fi
6258 set -e
6259
6260 # Map architecture for compose-switch downloads
6261 case "${architecture}" in
6262 amd64|x86_64) target_switch_arch=amd64 ;;
6263 arm64|aarch64) target_switch_arch=arm64 ;;
6264 *) target_switch_arch=${architecture} ;;
6265 esac
6266 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6267 chmod +x /usr/local/bin/compose-switch
6268 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6269 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6270 mv "${current_compose_path}" "${target_compose_path}"
6271 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6272 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6273 else
6274 err "Skipping installation of compose-switch as docker compose is unavailable..."
6275 fi
6276 fi
6277
6278 # If init file already exists, exit
6279 if [ -f "/usr/local/share/docker-init.sh" ]; then
6280 echo "/usr/local/share/docker-init.sh already exists, so exiting."
6281 # Clean up
6282 rm -rf /var/lib/apt/lists/*
6283 exit 0
6284 fi
6285 echo "docker-init doesn't exist, adding..."
6286
6287 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6288 groupadd -r docker
6289 fi
6290
6291 usermod -aG docker ${USERNAME}
6292
6293 # fallback for docker/buildx
6294 fallback_buildx() {
6295 local url=$1
6296 local repo_url=$(get_github_api_repo_url "$url")
6297 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6298 get_previous_version "$url" "$repo_url" buildx_version
6299 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6300 echo -e "\nAttempting to install v${buildx_version}"
6301 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6302 }
6303
6304 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6305 buildx_version="latest"
6306 docker_buildx_url="https://github.com/docker/buildx"
6307 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6308 echo "(*) Installing buildx ${buildx_version}..."
6309
6310 # Map architecture for buildx downloads
6311 case "${architecture}" in
6312 amd64|x86_64) target_buildx_arch=amd64 ;;
6313 arm64|aarch64) target_buildx_arch=arm64 ;;
6314 *) target_buildx_arch=${architecture} ;;
6315 esac
6316
6317 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6318
6319 cd /tmp
6320 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6321
6322 docker_home="/usr/libexec/docker"
6323 cli_plugins_dir="${docker_home}/cli-plugins"
6324
6325 mkdir -p ${cli_plugins_dir}
6326 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6327 chmod +x ${cli_plugins_dir}/docker-buildx
6328
6329 chown -R "${USERNAME}:docker" "${docker_home}"
6330 chmod -R g+r+w "${docker_home}"
6331 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6332 fi
6333
6334 DOCKER_DEFAULT_IP6_TABLES=""
6335 if [ "$DISABLE_IP6_TABLES" == true ]; then
6336 requested_version=""
6337 # checking whether the version requested either is in semver format or just a number denoting the major version
6338 # and, extracting the major version number out of the two scenarios
6339 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6340 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6341 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6342 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6343 requested_version=$DOCKER_VERSION
6344 fi
6345 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6346 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6347 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6348 fi
6349 fi
6350
6351 if [ ! -d /usr/local/share ]; then
6352 mkdir -p /usr/local/share
6353 fi
6354
6355 tee /usr/local/share/docker-init.sh > /dev/null \
6356 << EOF
6357 #!/bin/sh
6358 #-------------------------------------------------------------------------------------------------------------
6359 # Copyright (c) Microsoft Corporation. All rights reserved.
6360 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6361 #-------------------------------------------------------------------------------------------------------------
6362
6363 set -e
6364
6365 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6366 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6367 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6368 EOF
6369
6370 tee -a /usr/local/share/docker-init.sh > /dev/null \
6371 << 'EOF'
6372 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6373 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6374 find /run /var/run -iname 'docker*.pid' -delete || :
6375 find /run /var/run -iname 'container*.pid' -delete || :
6376
6377 # -- Start: dind wrapper script --
6378 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6379
6380 export container=docker
6381
6382 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6383 mount -t securityfs none /sys/kernel/security || {
6384 echo >&2 'Could not mount /sys/kernel/security.'
6385 echo >&2 'AppArmor detection and --privileged mode might break.'
6386 }
6387 fi
6388
6389 # Mount /tmp (conditionally)
6390 if ! mountpoint -q /tmp; then
6391 mount -t tmpfs none /tmp
6392 fi
6393
6394 set_cgroup_nesting()
6395 {
6396 # cgroup v2: enable nesting
6397 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6398 # move the processes from the root group to the /init group,
6399 # otherwise writing subtree_control fails with EBUSY.
6400 # An error during moving non-existent process (i.e., "cat") is ignored.
6401 mkdir -p /sys/fs/cgroup/init
6402 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6403 # enable controllers
6404 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6405 > /sys/fs/cgroup/cgroup.subtree_control
6406 fi
6407 }
6408
6409 # Set cgroup nesting, retrying if necessary
6410 retry_cgroup_nesting=0
6411
6412 until [ "${retry_cgroup_nesting}" -eq "5" ];
6413 do
6414 set +e
6415 set_cgroup_nesting
6416
6417 if [ $? -ne 0 ]; then
6418 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6419 else
6420 break
6421 fi
6422
6423 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6424 set -e
6425 done
6426
6427 # -- End: dind wrapper script --
6428
6429 # Handle DNS
6430 set +e
6431 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6432 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6433 then
6434 echo "Setting dockerd Azure DNS."
6435 CUSTOMDNS="--dns 168.63.129.16"
6436 else
6437 echo "Not setting dockerd DNS manually."
6438 CUSTOMDNS=""
6439 fi
6440 set -e
6441
6442 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6443 then
6444 DEFAULT_ADDRESS_POOL=""
6445 else
6446 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6447 fi
6448
6449 # Start docker/moby engine
6450 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6451 INNEREOF
6452 )"
6453
6454 sudo_if() {
6455 COMMAND="$*"
6456
6457 if [ "$(id -u)" -ne 0 ]; then
6458 sudo $COMMAND
6459 else
6460 $COMMAND
6461 fi
6462 }
6463
6464 retry_docker_start_count=0
6465 docker_ok="false"
6466
6467 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6468 do
6469 # Start using sudo if not invoked as root
6470 if [ "$(id -u)" -ne 0 ]; then
6471 sudo /bin/sh -c "${dockerd_start}"
6472 else
6473 eval "${dockerd_start}"
6474 fi
6475
6476 retry_count=0
6477 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6478 do
6479 sleep 1s
6480 set +e
6481 docker info > /dev/null 2>&1 && docker_ok="true"
6482 set -e
6483
6484 retry_count=`expr $retry_count + 1`
6485 done
6486
6487 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6488 echo "(*) Failed to start docker, retrying..."
6489 set +e
6490 sudo_if pkill dockerd
6491 sudo_if pkill containerd
6492 set -e
6493 fi
6494
6495 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6496 done
6497
6498 # Execute whatever commands were passed in (if any). This allows us
6499 # to set this script to ENTRYPOINT while still executing the default CMD.
6500 exec "$@"
6501 EOF
6502
6503 chmod +x /usr/local/share/docker-init.sh
6504 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6505
6506 # Clean up
6507 rm -rf /var/lib/apt/lists/*
6508
6509 echo 'docker-in-docker-debian script has completed!'"#),
6510 ]).await;
6511
6512 return Ok(http::Response::builder()
6513 .status(200)
6514 .body(AsyncBody::from(response))
6515 .unwrap());
6516 }
6517 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6518 let response = r#"
6519 {
6520 "schemaVersion": 2,
6521 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6522 "config": {
6523 "mediaType": "application/vnd.devcontainers",
6524 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6525 "size": 2
6526 },
6527 "layers": [
6528 {
6529 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6530 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6531 "size": 20992,
6532 "annotations": {
6533 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6534 }
6535 }
6536 ],
6537 "annotations": {
6538 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6539 "com.github.package.type": "devcontainer_feature"
6540 }
6541 }
6542 "#;
6543
6544 return Ok(http::Response::builder()
6545 .status(200)
6546 .body(http_client::AsyncBody::from(response))
6547 .unwrap());
6548 }
6549 if parts.uri.path()
6550 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6551 {
6552 let response = build_tarball(vec![
6553 ("./devcontainer-feature.json", r#"
6554 {
6555 "id": "go",
6556 "version": "1.3.3",
6557 "name": "Go",
6558 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6559 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6560 "options": {
6561 "version": {
6562 "type": "string",
6563 "proposals": [
6564 "latest",
6565 "none",
6566 "1.24",
6567 "1.23"
6568 ],
6569 "default": "latest",
6570 "description": "Select or enter a Go version to install"
6571 },
6572 "golangciLintVersion": {
6573 "type": "string",
6574 "default": "latest",
6575 "description": "Version of golangci-lint to install"
6576 }
6577 },
6578 "init": true,
6579 "customizations": {
6580 "vscode": {
6581 "extensions": [
6582 "golang.Go"
6583 ],
6584 "settings": {
6585 "github.copilot.chat.codeGeneration.instructions": [
6586 {
6587 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6588 }
6589 ]
6590 }
6591 }
6592 },
6593 "containerEnv": {
6594 "GOROOT": "/usr/local/go",
6595 "GOPATH": "/go",
6596 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6597 },
6598 "capAdd": [
6599 "SYS_PTRACE"
6600 ],
6601 "securityOpt": [
6602 "seccomp=unconfined"
6603 ],
6604 "installsAfter": [
6605 "ghcr.io/devcontainers/features/common-utils"
6606 ]
6607 }
6608 "#),
6609 ("./install.sh", r#"
6610 #!/usr/bin/env bash
6611 #-------------------------------------------------------------------------------------------------------------
6612 # Copyright (c) Microsoft Corporation. All rights reserved.
6613 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6614 #-------------------------------------------------------------------------------------------------------------
6615 #
6616 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6617 # Maintainer: The VS Code and Codespaces Teams
6618
6619 TARGET_GO_VERSION="${VERSION:-"latest"}"
6620 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6621
6622 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6623 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6624 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6625 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6626
6627 # https://www.google.com/linuxrepositories/
6628 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6629
6630 set -e
6631
6632 if [ "$(id -u)" -ne 0 ]; then
6633 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6634 exit 1
6635 fi
6636
6637 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6638 . /etc/os-release
6639 # Get an adjusted ID independent of distro variants
6640 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6641 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6642 ADJUSTED_ID="debian"
6643 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6644 ADJUSTED_ID="rhel"
6645 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6646 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6647 else
6648 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6649 fi
6650 else
6651 echo "Linux distro ${ID} not supported."
6652 exit 1
6653 fi
6654
6655 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6656 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6657 # Update the repo files to reference vault.centos.org.
6658 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6659 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6660 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6661 fi
6662
6663 # Setup INSTALL_CMD & PKG_MGR_CMD
6664 if type apt-get > /dev/null 2>&1; then
6665 PKG_MGR_CMD=apt-get
6666 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6667 elif type microdnf > /dev/null 2>&1; then
6668 PKG_MGR_CMD=microdnf
6669 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6670 elif type dnf > /dev/null 2>&1; then
6671 PKG_MGR_CMD=dnf
6672 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6673 else
6674 PKG_MGR_CMD=yum
6675 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6676 fi
6677
6678 # Clean up
6679 clean_up() {
6680 case ${ADJUSTED_ID} in
6681 debian)
6682 rm -rf /var/lib/apt/lists/*
6683 ;;
6684 rhel)
6685 rm -rf /var/cache/dnf/* /var/cache/yum/*
6686 rm -rf /tmp/yum.log
6687 rm -rf ${GPG_INSTALL_PATH}
6688 ;;
6689 esac
6690 }
6691 clean_up
6692
6693
6694 # Figure out correct version of a three part version number is not passed
6695 find_version_from_git_tags() {
6696 local variable_name=$1
6697 local requested_version=${!variable_name}
6698 if [ "${requested_version}" = "none" ]; then return; fi
6699 local repository=$2
6700 local prefix=${3:-"tags/v"}
6701 local separator=${4:-"."}
6702 local last_part_optional=${5:-"false"}
6703 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6704 local escaped_separator=${separator//./\\.}
6705 local last_part
6706 if [ "${last_part_optional}" = "true" ]; then
6707 last_part="(${escaped_separator}[0-9]+)?"
6708 else
6709 last_part="${escaped_separator}[0-9]+"
6710 fi
6711 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6712 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6713 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6714 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6715 else
6716 set +e
6717 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6718 set -e
6719 fi
6720 fi
6721 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6722 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6723 exit 1
6724 fi
6725 echo "${variable_name}=${!variable_name}"
6726 }
6727
6728 pkg_mgr_update() {
6729 case $ADJUSTED_ID in
6730 debian)
6731 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6732 echo "Running apt-get update..."
6733 ${PKG_MGR_CMD} update -y
6734 fi
6735 ;;
6736 rhel)
6737 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6738 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6739 echo "Running ${PKG_MGR_CMD} makecache ..."
6740 ${PKG_MGR_CMD} makecache
6741 fi
6742 else
6743 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6744 echo "Running ${PKG_MGR_CMD} check-update ..."
6745 set +e
6746 ${PKG_MGR_CMD} check-update
6747 rc=$?
6748 if [ $rc != 0 ] && [ $rc != 100 ]; then
6749 exit 1
6750 fi
6751 set -e
6752 fi
6753 fi
6754 ;;
6755 esac
6756 }
6757
6758 # Checks if packages are installed and installs them if not
6759 check_packages() {
6760 case ${ADJUSTED_ID} in
6761 debian)
6762 if ! dpkg -s "$@" > /dev/null 2>&1; then
6763 pkg_mgr_update
6764 ${INSTALL_CMD} "$@"
6765 fi
6766 ;;
6767 rhel)
6768 if ! rpm -q "$@" > /dev/null 2>&1; then
6769 pkg_mgr_update
6770 ${INSTALL_CMD} "$@"
6771 fi
6772 ;;
6773 esac
6774 }
6775
6776 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6777 rm -f /etc/profile.d/00-restore-env.sh
6778 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6779 chmod +x /etc/profile.d/00-restore-env.sh
6780
6781 # Some distributions do not install awk by default (e.g. Mariner)
6782 if ! type awk >/dev/null 2>&1; then
6783 check_packages awk
6784 fi
6785
6786 # Determine the appropriate non-root user
6787 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6788 USERNAME=""
6789 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6790 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6791 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6792 USERNAME=${CURRENT_USER}
6793 break
6794 fi
6795 done
6796 if [ "${USERNAME}" = "" ]; then
6797 USERNAME=root
6798 fi
6799 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6800 USERNAME=root
6801 fi
6802
6803 export DEBIAN_FRONTEND=noninteractive
6804
6805 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6806
6807 if [ $ADJUSTED_ID = "debian" ]; then
6808 check_packages g++ libc6-dev
6809 else
6810 check_packages gcc-c++ glibc-devel
6811 fi
6812 # Install curl, git, other dependencies if missing
6813 if ! type curl > /dev/null 2>&1; then
6814 check_packages curl
6815 fi
6816 if ! type git > /dev/null 2>&1; then
6817 check_packages git
6818 fi
6819 # Some systems, e.g. Mariner, still a few more packages
6820 if ! type as > /dev/null 2>&1; then
6821 check_packages binutils
6822 fi
6823 if ! [ -f /usr/include/linux/errno.h ]; then
6824 check_packages kernel-headers
6825 fi
6826 # Minimal RHEL install may need findutils installed
6827 if ! [ -f /usr/bin/find ]; then
6828 check_packages findutils
6829 fi
6830
6831 # Get closest match for version number specified
6832 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6833
6834 architecture="$(uname -m)"
6835 case $architecture in
6836 x86_64) architecture="amd64";;
6837 aarch64 | armv8*) architecture="arm64";;
6838 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6839 i?86) architecture="386";;
6840 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6841 esac
6842
6843 # Install Go
6844 umask 0002
6845 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6846 groupadd -r golang
6847 fi
6848 usermod -a -G golang "${USERNAME}"
6849 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6850
6851 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6852 # Use a temporary location for gpg keys to avoid polluting image
6853 export GNUPGHOME="/tmp/tmp-gnupg"
6854 mkdir -p ${GNUPGHOME}
6855 chmod 700 ${GNUPGHOME}
6856 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6857 gpg -q --import /tmp/tmp-gnupg/golang_key
6858 echo "Downloading Go ${TARGET_GO_VERSION}..."
6859 set +e
6860 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6861 exit_code=$?
6862 set -e
6863 if [ "$exit_code" != "0" ]; then
6864 echo "(!) Download failed."
6865 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6866 set +e
6867 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6868 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6869 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6870 # Handle Go's odd version pattern where "0" releases omit the last part
6871 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6872 ((minor=minor-1))
6873 TARGET_GO_VERSION="${major}.${minor}"
6874 # Look for latest version from previous minor release
6875 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6876 else
6877 ((breakfix=breakfix-1))
6878 if [ "${breakfix}" = "0" ]; then
6879 TARGET_GO_VERSION="${major}.${minor}"
6880 else
6881 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6882 fi
6883 fi
6884 set -e
6885 echo "Trying ${TARGET_GO_VERSION}..."
6886 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6887 fi
6888 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6889 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6890 echo "Extracting Go ${TARGET_GO_VERSION}..."
6891 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6892 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6893 else
6894 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6895 fi
6896
6897 # Install Go tools that are isImportant && !replacedByGopls based on
6898 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6899 GO_TOOLS="\
6900 golang.org/x/tools/gopls@latest \
6901 honnef.co/go/tools/cmd/staticcheck@latest \
6902 golang.org/x/lint/golint@latest \
6903 github.com/mgechev/revive@latest \
6904 github.com/go-delve/delve/cmd/dlv@latest \
6905 github.com/fatih/gomodifytags@latest \
6906 github.com/haya14busa/goplay/cmd/goplay@latest \
6907 github.com/cweill/gotests/gotests@latest \
6908 github.com/josharian/impl@latest"
6909
6910 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6911 echo "Installing common Go tools..."
6912 export PATH=${TARGET_GOROOT}/bin:${PATH}
6913 export GOPATH=/tmp/gotools
6914 export GOCACHE="${GOPATH}/cache"
6915
6916 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6917 cd "${GOPATH}"
6918
6919 # Use go get for versions of go under 1.16
6920 go_install_command=install
6921 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6922 export GO111MODULE=on
6923 go_install_command=get
6924 echo "Go version < 1.16, using go get."
6925 fi
6926
6927 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6928
6929 # Move Go tools into path
6930 if [ -d "${GOPATH}/bin" ]; then
6931 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6932 fi
6933
6934 # Install golangci-lint from precompiled binaries
6935 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6936 echo "Installing golangci-lint latest..."
6937 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6938 sh -s -- -b "${TARGET_GOPATH}/bin"
6939 else
6940 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6941 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6942 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6943 fi
6944
6945 # Remove Go tools temp directory
6946 rm -rf "${GOPATH}"
6947 fi
6948
6949
6950 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6951 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6952 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6953 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6954
6955 # Clean up
6956 clean_up
6957
6958 echo "Done!"
6959 "#),
6960 ])
6961 .await;
6962 return Ok(http::Response::builder()
6963 .status(200)
6964 .body(AsyncBody::from(response))
6965 .unwrap());
6966 }
6967 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6968 let response = r#"
6969 {
6970 "schemaVersion": 2,
6971 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6972 "config": {
6973 "mediaType": "application/vnd.devcontainers",
6974 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6975 "size": 2
6976 },
6977 "layers": [
6978 {
6979 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6980 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6981 "size": 19968,
6982 "annotations": {
6983 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6984 }
6985 }
6986 ],
6987 "annotations": {
6988 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6989 "com.github.package.type": "devcontainer_feature"
6990 }
6991 }"#;
6992 return Ok(http::Response::builder()
6993 .status(200)
6994 .body(AsyncBody::from(response))
6995 .unwrap());
6996 }
6997 if parts.uri.path()
6998 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6999 {
7000 let response = build_tarball(vec![
7001 (
7002 "./devcontainer-feature.json",
7003 r#"
7004{
7005 "id": "aws-cli",
7006 "version": "1.1.3",
7007 "name": "AWS CLI",
7008 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
7009 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
7010 "options": {
7011 "version": {
7012 "type": "string",
7013 "proposals": [
7014 "latest"
7015 ],
7016 "default": "latest",
7017 "description": "Select or enter an AWS CLI version."
7018 },
7019 "verbose": {
7020 "type": "boolean",
7021 "default": true,
7022 "description": "Suppress verbose output."
7023 }
7024 },
7025 "customizations": {
7026 "vscode": {
7027 "extensions": [
7028 "AmazonWebServices.aws-toolkit-vscode"
7029 ],
7030 "settings": {
7031 "github.copilot.chat.codeGeneration.instructions": [
7032 {
7033 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
7034 }
7035 ]
7036 }
7037 }
7038 },
7039 "installsAfter": [
7040 "ghcr.io/devcontainers/features/common-utils"
7041 ]
7042}
7043 "#,
7044 ),
7045 (
7046 "./install.sh",
7047 r#"#!/usr/bin/env bash
7048 #-------------------------------------------------------------------------------------------------------------
7049 # Copyright (c) Microsoft Corporation. All rights reserved.
7050 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7051 #-------------------------------------------------------------------------------------------------------------
7052 #
7053 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7054 # Maintainer: The VS Code and Codespaces Teams
7055
7056 set -e
7057
7058 # Clean up
7059 rm -rf /var/lib/apt/lists/*
7060
7061 VERSION=${VERSION:-"latest"}
7062 VERBOSE=${VERBOSE:-"true"}
7063
7064 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7065 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7066
7067 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7068 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7069 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7070 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7071 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7072 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7073 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7074 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7075 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7076 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7077 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7078 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7079 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7080 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7081 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7082 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7083 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7084 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7085 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7086 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7087 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7088 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7089 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7090 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7091 YLZATHZKTJyiqA==
7092 =vYOk
7093 -----END PGP PUBLIC KEY BLOCK-----"
7094
7095 if [ "$(id -u)" -ne 0 ]; then
7096 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7097 exit 1
7098 fi
7099
7100 apt_get_update()
7101 {
7102 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7103 echo "Running apt-get update..."
7104 apt-get update -y
7105 fi
7106 }
7107
7108 # Checks if packages are installed and installs them if not
7109 check_packages() {
7110 if ! dpkg -s "$@" > /dev/null 2>&1; then
7111 apt_get_update
7112 apt-get -y install --no-install-recommends "$@"
7113 fi
7114 }
7115
7116 export DEBIAN_FRONTEND=noninteractive
7117
7118 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7119
7120 verify_aws_cli_gpg_signature() {
7121 local filePath=$1
7122 local sigFilePath=$2
7123 local awsGpgKeyring=aws-cli-public-key.gpg
7124
7125 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7126 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7127 local status=$?
7128
7129 rm "./${awsGpgKeyring}"
7130
7131 return ${status}
7132 }
7133
7134 install() {
7135 local scriptZipFile=awscli.zip
7136 local scriptSigFile=awscli.sig
7137
7138 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7139 if [ "${VERSION}" != "latest" ]; then
7140 local versionStr=-${VERSION}
7141 fi
7142 architecture=$(dpkg --print-architecture)
7143 case "${architecture}" in
7144 amd64) architectureStr=x86_64 ;;
7145 arm64) architectureStr=aarch64 ;;
7146 *)
7147 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7148 exit 1
7149 esac
7150 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7151 curl "${scriptUrl}" -o "${scriptZipFile}"
7152 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7153
7154 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7155 if (( $? > 0 )); then
7156 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7157 exit 1
7158 fi
7159
7160 if [ "${VERBOSE}" = "false" ]; then
7161 unzip -q "${scriptZipFile}"
7162 else
7163 unzip "${scriptZipFile}"
7164 fi
7165
7166 ./aws/install
7167
7168 # kubectl bash completion
7169 mkdir -p /etc/bash_completion.d
7170 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7171
7172 # kubectl zsh completion
7173 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7174 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7175 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7176 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7177 fi
7178
7179 rm -rf ./aws
7180 }
7181
7182 echo "(*) Installing AWS CLI..."
7183
7184 install
7185
7186 # Clean up
7187 rm -rf /var/lib/apt/lists/*
7188
7189 echo "Done!""#,
7190 ),
7191 ("./scripts/", r#""#),
7192 (
7193 "./scripts/fetch-latest-completer-scripts.sh",
7194 r#"
7195 #!/bin/bash
7196 #-------------------------------------------------------------------------------------------------------------
7197 # Copyright (c) Microsoft Corporation. All rights reserved.
7198 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7199 #-------------------------------------------------------------------------------------------------------------
7200 #
7201 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7202 # Maintainer: The Dev Container spec maintainers
7203 #
7204 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7205 #
7206 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7207 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7208 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7209
7210 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7211 chmod +x "$BASH_COMPLETER_SCRIPT"
7212
7213 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7214 chmod +x "$ZSH_COMPLETER_SCRIPT"
7215 "#,
7216 ),
7217 ("./scripts/vendor/", r#""#),
7218 (
7219 "./scripts/vendor/aws_bash_completer",
7220 r#"
7221 # Typically that would be added under one of the following paths:
7222 # - /etc/bash_completion.d
7223 # - /usr/local/etc/bash_completion.d
7224 # - /usr/share/bash-completion/completions
7225
7226 complete -C aws_completer aws
7227 "#,
7228 ),
7229 (
7230 "./scripts/vendor/aws_zsh_completer.sh",
7231 r#"
7232 # Source this file to activate auto completion for zsh using the bash
7233 # compatibility helper. Make sure to run `compinit` before, which should be
7234 # given usually.
7235 #
7236 # % source /path/to/zsh_complete.sh
7237 #
7238 # Typically that would be called somewhere in your .zshrc.
7239 #
7240 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7241 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7242 #
7243 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7244 #
7245 # zsh releases prior to that version do not export the required env variables!
7246
7247 autoload -Uz bashcompinit
7248 bashcompinit -i
7249
7250 _bash_complete() {
7251 local ret=1
7252 local -a suf matches
7253 local -x COMP_POINT COMP_CWORD
7254 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7255 local -x COMP_LINE="$words"
7256 local -A savejobstates savejobtexts
7257
7258 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7259 (( COMP_CWORD = CURRENT - 1))
7260 COMP_WORDS=( $words )
7261 BASH_VERSINFO=( 2 05b 0 1 release )
7262
7263 savejobstates=( ${(kv)jobstates} )
7264 savejobtexts=( ${(kv)jobtexts} )
7265
7266 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7267
7268 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7269
7270 if [[ -n $matches ]]; then
7271 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7272 compset -P '*/' && matches=( ${matches##*/} )
7273 compset -S '/*' && matches=( ${matches%%/*} )
7274 compadd -Q -f "${suf[@]}" -a matches && ret=0
7275 else
7276 compadd -Q "${suf[@]}" -a matches && ret=0
7277 fi
7278 fi
7279
7280 if (( ret )); then
7281 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7282 _default "${suf[@]}" && ret=0
7283 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7284 _directories "${suf[@]}" && ret=0
7285 fi
7286 fi
7287
7288 return ret
7289 }
7290
7291 complete -C aws_completer aws
7292 "#,
7293 ),
7294 ]).await;
7295
7296 return Ok(http::Response::builder()
7297 .status(200)
7298 .body(AsyncBody::from(response))
7299 .unwrap());
7300 }
7301
7302 Ok(http::Response::builder()
7303 .status(404)
7304 .body(http_client::AsyncBody::default())
7305 .unwrap())
7306 })
7307 }
7308}