1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use regex::Regex;
10
11use fs::Fs;
12use http_client::HttpClient;
13use util::{ResultExt, command::Command, normalize_path};
14
15use crate::{
16 DevContainerConfig, DevContainerContext,
17 command_json::{CommandRunner, DefaultCommandRunner},
18 devcontainer_api::{DevContainerError, DevContainerUp},
19 devcontainer_json::{
20 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
21 deserialize_devcontainer_json,
22 },
23 docker::{
24 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
25 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
26 get_remote_dir_from_config,
27 },
28 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
29 get_oci_token,
30 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
31 safe_id_lower,
32};
33
34enum ConfigStatus {
35 Deserialized(DevContainer),
36 VariableParsed(DevContainer),
37}
38
39#[derive(Debug, Clone, Eq, PartialEq, Default)]
40pub(crate) struct DockerComposeResources {
41 files: Vec<PathBuf>,
42 config: DockerComposeConfig,
43}
44
45struct DevContainerManifest {
46 http_client: Arc<dyn HttpClient>,
47 fs: Arc<dyn Fs>,
48 docker_client: Arc<dyn DockerClient>,
49 command_runner: Arc<dyn CommandRunner>,
50 raw_config: String,
51 config: ConfigStatus,
52 local_environment: HashMap<String, String>,
53 local_project_directory: PathBuf,
54 config_directory: PathBuf,
55 file_name: String,
56 root_image: Option<DockerInspect>,
57 features_build_info: Option<FeaturesBuildInfo>,
58 features: Vec<FeatureManifest>,
59}
60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
61impl DevContainerManifest {
62 async fn new(
63 context: &DevContainerContext,
64 environment: HashMap<String, String>,
65 docker_client: Arc<dyn DockerClient>,
66 command_runner: Arc<dyn CommandRunner>,
67 local_config: DevContainerConfig,
68 local_project_path: &Path,
69 ) -> Result<Self, DevContainerError> {
70 let config_path = local_project_path.join(local_config.config_path.clone());
71 log::debug!("parsing devcontainer json found in {:?}", &config_path);
72 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
73 log::error!("Unable to read devcontainer contents: {e}");
74 DevContainerError::DevContainerParseFailed
75 })?;
76
77 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
78
79 let devcontainer_directory = config_path.parent().ok_or_else(|| {
80 log::error!("Dev container file should be in a directory");
81 DevContainerError::NotInValidProject
82 })?;
83 let file_name = config_path
84 .file_name()
85 .and_then(|f| f.to_str())
86 .ok_or_else(|| {
87 log::error!("Dev container file has no file name, or is invalid unicode");
88 DevContainerError::DevContainerParseFailed
89 })?;
90
91 Ok(Self {
92 fs: context.fs.clone(),
93 http_client: context.http_client.clone(),
94 docker_client,
95 command_runner,
96 raw_config: devcontainer_contents,
97 config: ConfigStatus::Deserialized(devcontainer),
98 local_project_directory: local_project_path.to_path_buf(),
99 local_environment: environment,
100 config_directory: devcontainer_directory.to_path_buf(),
101 file_name: file_name.to_string(),
102 root_image: None,
103 features_build_info: None,
104 features: Vec::new(),
105 })
106 }
107
108 fn devcontainer_id(&self) -> String {
109 let mut labels = self.identifying_labels();
110 labels.sort_by_key(|(key, _)| *key);
111
112 let mut hasher = DefaultHasher::new();
113 for (key, value) in &labels {
114 key.hash(&mut hasher);
115 value.hash(&mut hasher);
116 }
117
118 format!("{:016x}", hasher.finish())
119 }
120
121 fn identifying_labels(&self) -> Vec<(&str, String)> {
122 let labels = vec![
123 (
124 "devcontainer.local_folder",
125 (self.local_project_directory.display()).to_string(),
126 ),
127 (
128 "devcontainer.config_file",
129 (self.config_file().display()).to_string(),
130 ),
131 ];
132 labels
133 }
134
135 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
136 let mut replaced_content = content
137 .replace("${devcontainerId}", &self.devcontainer_id())
138 .replace(
139 "${containerWorkspaceFolderBasename}",
140 &self.remote_workspace_base_name().unwrap_or_default(),
141 )
142 .replace(
143 "${localWorkspaceFolderBasename}",
144 &self.local_workspace_base_name()?,
145 )
146 .replace(
147 "${containerWorkspaceFolder}",
148 &self
149 .remote_workspace_folder()
150 .map(|path| path.display().to_string())
151 .unwrap_or_default()
152 .replace('\\', "/"),
153 )
154 .replace(
155 "${localWorkspaceFolder}",
156 &self.local_workspace_folder().replace('\\', "/"),
157 );
158 for (k, v) in &self.local_environment {
159 let find = format!("${{localEnv:{k}}}");
160 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
161 }
162
163 Ok(replaced_content)
164 }
165
166 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
167 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
168 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
169
170 self.config = ConfigStatus::VariableParsed(parsed_config);
171
172 Ok(())
173 }
174
175 fn runtime_remote_env(
176 &self,
177 container_env: &HashMap<String, String>,
178 ) -> Result<HashMap<String, String>, DevContainerError> {
179 let mut merged_remote_env = container_env.clone();
180 // HOME is user-specific, and we will often not run as the image user
181 merged_remote_env.remove("HOME");
182 if let Some(remote_env) = self.dev_container().remote_env.clone() {
183 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
184 log::error!(
185 "Unexpected error serializing dev container remote_env: {e} - {:?}",
186 remote_env
187 );
188 DevContainerError::DevContainerParseFailed
189 })?;
190 for (k, v) in container_env {
191 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
192 }
193 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
194 .map_err(|e| {
195 log::error!(
196 "Unexpected error reserializing dev container remote env: {e} - {:?}",
197 &raw
198 );
199 DevContainerError::DevContainerParseFailed
200 })?;
201 for (k, v) in reserialized {
202 merged_remote_env.insert(k, v);
203 }
204 }
205 Ok(merged_remote_env)
206 }
207
208 fn config_file(&self) -> PathBuf {
209 self.config_directory.join(&self.file_name)
210 }
211
212 fn dev_container(&self) -> &DevContainer {
213 match &self.config {
214 ConfigStatus::Deserialized(dev_container) => dev_container,
215 ConfigStatus::VariableParsed(dev_container) => dev_container,
216 }
217 }
218
219 async fn dockerfile_location(&self) -> Option<PathBuf> {
220 let dev_container = self.dev_container();
221 match dev_container.build_type() {
222 DevContainerBuildType::Image(_) => None,
223 DevContainerBuildType::Dockerfile(build) => {
224 Some(self.config_directory.join(&build.dockerfile))
225 }
226 DevContainerBuildType::DockerCompose => {
227 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
228 return None;
229 };
230 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
231 else {
232 return None;
233 };
234 main_service.build.and_then(|b| {
235 let compose_file = docker_compose_manifest.files.first()?;
236 resolve_compose_dockerfile(
237 compose_file,
238 b.context.as_deref(),
239 b.dockerfile.as_deref()?,
240 )
241 })
242 }
243 DevContainerBuildType::None => None,
244 }
245 }
246
247 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
248 let mut hasher = DefaultHasher::new();
249 let prefix = match &self.dev_container().name {
250 Some(name) => &safe_id_lower(name),
251 None => "zed-dc",
252 };
253 let prefix = prefix.get(..6).unwrap_or(prefix);
254
255 dockerfile_build_path.hash(&mut hasher);
256
257 let hash = hasher.finish();
258 format!("{}-{:x}-features", prefix, hash)
259 }
260
261 /// Gets the base image from the devcontainer with the following precedence:
262 /// - The devcontainer image if an image is specified
263 /// - The image sourced in the Dockerfile if a Dockerfile is specified
264 /// - The image sourced in the docker-compose main service, if one is specified
265 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
266 /// If no such image is available, return an error
267 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
268 match self.dev_container().build_type() {
269 DevContainerBuildType::Image(image) => {
270 return Ok(image);
271 }
272 DevContainerBuildType::Dockerfile(build) => {
273 let dockerfile_contents = self.expanded_dockerfile_content().await?;
274 return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
275 || {
276 log::error!("Unable to find base image in Dockerfile");
277 DevContainerError::DevContainerParseFailed
278 },
279 );
280 }
281 DevContainerBuildType::DockerCompose => {
282 let docker_compose_manifest = self.docker_compose_manifest().await?;
283 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
284
285 if let Some(_) = main_service
286 .build
287 .as_ref()
288 .and_then(|b| b.dockerfile.as_ref())
289 {
290 let dockerfile_contents = self.expanded_dockerfile_content().await?;
291 return image_from_dockerfile(
292 dockerfile_contents,
293 &main_service.build.as_ref().and_then(|b| b.target.clone()),
294 )
295 .ok_or_else(|| {
296 log::error!("Unable to find base image in Dockerfile");
297 DevContainerError::DevContainerParseFailed
298 });
299 }
300 if let Some(image) = &main_service.image {
301 return Ok(image.to_string());
302 }
303
304 log::error!("No valid base image found in docker-compose configuration");
305 return Err(DevContainerError::DevContainerParseFailed);
306 }
307 DevContainerBuildType::None => {
308 log::error!("Not a valid devcontainer config for build");
309 return Err(DevContainerError::NotInValidProject);
310 }
311 }
312 }
313
314 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
315 let dev_container = match &self.config {
316 ConfigStatus::Deserialized(_) => {
317 log::error!(
318 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
319 );
320 return Err(DevContainerError::DevContainerParseFailed);
321 }
322 ConfigStatus::VariableParsed(dev_container) => dev_container,
323 };
324 let root_image_tag = self.get_base_image_from_config().await?;
325 let root_image = self.docker_client.inspect(&root_image_tag).await?;
326
327 let temp_base = std::env::temp_dir().join("devcontainer-zed");
328 let timestamp = std::time::SystemTime::now()
329 .duration_since(std::time::UNIX_EPOCH)
330 .map(|d| d.as_millis())
331 .unwrap_or(0);
332
333 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
334 let empty_context_dir = temp_base.join("empty-folder");
335
336 self.fs
337 .create_dir(&features_content_dir)
338 .await
339 .map_err(|e| {
340 log::error!("Failed to create features content dir: {e}");
341 DevContainerError::FilesystemError
342 })?;
343
344 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
345 log::error!("Failed to create empty context dir: {e}");
346 DevContainerError::FilesystemError
347 })?;
348
349 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
350 let image_tag =
351 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
352
353 let build_info = FeaturesBuildInfo {
354 dockerfile_path,
355 features_content_dir,
356 empty_context_dir,
357 build_image: dev_container.image.clone(),
358 image_tag,
359 };
360
361 let features = match &dev_container.features {
362 Some(features) => features,
363 None => &HashMap::new(),
364 };
365
366 let container_user = get_container_user_from_config(&root_image, self)?;
367 let remote_user = get_remote_user_from_config(&root_image, self)?;
368
369 let builtin_env_content = format!(
370 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
371 container_user, remote_user
372 );
373
374 let builtin_env_path = build_info
375 .features_content_dir
376 .join("devcontainer-features.builtin.env");
377
378 self.fs
379 .write(&builtin_env_path, &builtin_env_content.as_bytes())
380 .await
381 .map_err(|e| {
382 log::error!("Failed to write builtin env file: {e}");
383 DevContainerError::FilesystemError
384 })?;
385
386 let ordered_features =
387 resolve_feature_order(features, &dev_container.override_feature_install_order);
388
389 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
390 if matches!(options, FeatureOptions::Bool(false)) {
391 log::debug!(
392 "Feature '{}' is disabled (set to false), skipping",
393 feature_ref
394 );
395 continue;
396 }
397
398 let feature_id = extract_feature_id(feature_ref);
399 let consecutive_id = format!("{}_{}", feature_id, index);
400 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
401
402 self.fs.create_dir(&feature_dir).await.map_err(|e| {
403 log::error!(
404 "Failed to create feature directory for {}: {e}",
405 feature_ref
406 );
407 DevContainerError::FilesystemError
408 })?;
409
410 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
411 log::error!(
412 "Feature '{}' is not a supported OCI feature reference",
413 feature_ref
414 );
415 DevContainerError::DevContainerParseFailed
416 })?;
417 let TokenResponse { token } =
418 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
419 .await
420 .map_err(|e| {
421 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
422 DevContainerError::ResourceFetchFailed
423 })?;
424 let manifest = get_oci_manifest(
425 &oci_ref.registry,
426 &oci_ref.path,
427 &token,
428 &self.http_client,
429 &oci_ref.version,
430 None,
431 )
432 .await
433 .map_err(|e| {
434 log::error!(
435 "Failed to fetch OCI manifest for feature '{}': {e}",
436 feature_ref
437 );
438 DevContainerError::ResourceFetchFailed
439 })?;
440 let digest = &manifest
441 .layers
442 .first()
443 .ok_or_else(|| {
444 log::error!(
445 "OCI manifest for feature '{}' contains no layers",
446 feature_ref
447 );
448 DevContainerError::ResourceFetchFailed
449 })?
450 .digest;
451 download_oci_tarball(
452 &token,
453 &oci_ref.registry,
454 &oci_ref.path,
455 digest,
456 "application/vnd.devcontainers.layer.v1+tar",
457 &feature_dir,
458 &self.http_client,
459 &self.fs,
460 None,
461 )
462 .await?;
463
464 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
465 if !self.fs.is_file(feature_json_path).await {
466 let message = format!(
467 "No devcontainer-feature.json found in {:?}, no defaults to apply",
468 feature_json_path
469 );
470 log::error!("{}", &message);
471 return Err(DevContainerError::ResourceFetchFailed);
472 }
473
474 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
475 log::error!("error reading devcontainer-feature.json: {:?}", e);
476 DevContainerError::FilesystemError
477 })?;
478
479 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
480
481 let feature_json: DevContainerFeatureJson =
482 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
483 log::error!("Failed to parse devcontainer-feature.json: {e}");
484 DevContainerError::ResourceFetchFailed
485 })?;
486
487 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
488
489 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
490
491 let env_content = feature_manifest
492 .write_feature_env(&self.fs, options)
493 .await?;
494
495 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
496
497 self.fs
498 .write(
499 &feature_manifest
500 .file_path()
501 .join("devcontainer-features-install.sh"),
502 &wrapper_content.as_bytes(),
503 )
504 .await
505 .map_err(|e| {
506 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
507 DevContainerError::FilesystemError
508 })?;
509
510 self.features.push(feature_manifest);
511 }
512
513 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
514
515 let is_compose = match dev_container.build_type() {
516 DevContainerBuildType::DockerCompose => true,
517 _ => false,
518 };
519 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
520
521 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
522 self.fs.load(location).await.log_err()
523 } else {
524 None
525 };
526
527 let build_target = if is_compose {
528 find_primary_service(&self.docker_compose_manifest().await?, self)?
529 .1
530 .build
531 .and_then(|b| b.target)
532 } else {
533 dev_container.build.as_ref().and_then(|b| b.target.clone())
534 };
535
536 let dockerfile_content = dockerfile_base_content
537 .map(|content| {
538 dockerfile_inject_alias(
539 &content,
540 "dev_container_auto_added_stage_label",
541 build_target,
542 )
543 })
544 .unwrap_or_default();
545
546 let dockerfile_content = self.generate_dockerfile_extended(
547 &container_user,
548 &remote_user,
549 dockerfile_content,
550 use_buildkit,
551 );
552
553 self.fs
554 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
555 .await
556 .map_err(|e| {
557 log::error!("Failed to write Dockerfile.extended: {e}");
558 DevContainerError::FilesystemError
559 })?;
560
561 log::debug!(
562 "Features build resources written to {:?}",
563 build_info.features_content_dir
564 );
565
566 self.root_image = Some(root_image);
567 self.features_build_info = Some(build_info);
568
569 Ok(())
570 }
571
572 fn generate_dockerfile_extended(
573 &self,
574 container_user: &str,
575 remote_user: &str,
576 dockerfile_content: String,
577 use_buildkit: bool,
578 ) -> String {
579 #[cfg(not(target_os = "windows"))]
580 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
581 #[cfg(target_os = "windows")]
582 let update_remote_user_uid = false;
583 let feature_layers: String = self
584 .features
585 .iter()
586 .map(|manifest| {
587 manifest.generate_dockerfile_feature_layer(
588 use_buildkit,
589 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
590 )
591 })
592 .collect();
593
594 let container_home_cmd = get_ent_passwd_shell_command(container_user);
595 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
596
597 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
598
599 let feature_content_source_stage = if use_buildkit {
600 "".to_string()
601 } else {
602 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
603 .to_string()
604 };
605
606 let builtin_env_source_path = if use_buildkit {
607 "./devcontainer-features.builtin.env"
608 } else {
609 "/tmp/build-features/devcontainer-features.builtin.env"
610 };
611
612 let mut extended_dockerfile = format!(
613 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
614
615{dockerfile_content}
616{feature_content_source_stage}
617FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
618USER root
619COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
620RUN chmod -R 0755 /tmp/build-features/
621
622FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
623
624USER root
625
626RUN mkdir -p {dest}
627COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
628
629RUN \
630echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
631echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
632
633{feature_layers}
634
635ARG _DEV_CONTAINERS_IMAGE_USER=root
636USER $_DEV_CONTAINERS_IMAGE_USER
637"#
638 );
639
640 // If we're not adding a uid update layer, then we should add env vars to this layer instead
641 if !update_remote_user_uid {
642 extended_dockerfile = format!(
643 r#"{extended_dockerfile}
644# Ensure that /etc/profile does not clobber the existing path
645RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
646"#
647 );
648
649 for feature in &self.features {
650 let container_env_layer = feature.generate_dockerfile_env();
651 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
652 }
653
654 if let Some(env) = &self.dev_container().container_env {
655 for (key, value) in env {
656 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
657 }
658 }
659 }
660
661 extended_dockerfile
662 }
663
664 fn build_merged_resources(
665 &self,
666 base_image: DockerInspect,
667 ) -> Result<DockerBuildResources, DevContainerError> {
668 let dev_container = match &self.config {
669 ConfigStatus::Deserialized(_) => {
670 log::error!(
671 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
672 );
673 return Err(DevContainerError::DevContainerParseFailed);
674 }
675 ConfigStatus::VariableParsed(dev_container) => dev_container,
676 };
677 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
678
679 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
680
681 mounts.append(&mut feature_mounts);
682
683 let privileged = dev_container.privileged.unwrap_or(false)
684 || self.features.iter().any(|f| f.privileged());
685
686 let mut entrypoint_script_lines = vec![
687 "echo Container started".to_string(),
688 "trap \"exit 0\" 15".to_string(),
689 ];
690
691 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
692 entrypoint_script_lines.push(entrypoint.clone());
693 }
694 entrypoint_script_lines.append(&mut vec![
695 "exec \"$@\"".to_string(),
696 "while sleep 1 & wait $!; do :; done".to_string(),
697 ]);
698
699 Ok(DockerBuildResources {
700 image: base_image,
701 additional_mounts: mounts,
702 privileged,
703 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
704 })
705 }
706
707 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
708 if let ConfigStatus::Deserialized(_) = &self.config {
709 log::error!(
710 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
711 );
712 return Err(DevContainerError::DevContainerParseFailed);
713 }
714 let dev_container = self.dev_container();
715 match dev_container.build_type() {
716 DevContainerBuildType::Image(base_image) => {
717 let built_docker_image = self.build_docker_image().await?;
718
719 let built_docker_image = self
720 .update_remote_user_uid(built_docker_image, &base_image)
721 .await?;
722
723 let resources = self.build_merged_resources(built_docker_image)?;
724 Ok(DevContainerBuildResources::Docker(resources))
725 }
726 DevContainerBuildType::Dockerfile(_) => {
727 let built_docker_image = self.build_docker_image().await?;
728 let Some(features_build_info) = &self.features_build_info else {
729 log::error!(
730 "Can't attempt to build update UID dockerfile before initial docker build"
731 );
732 return Err(DevContainerError::DevContainerParseFailed);
733 };
734 let built_docker_image = self
735 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
736 .await?;
737
738 let resources = self.build_merged_resources(built_docker_image)?;
739 Ok(DevContainerBuildResources::Docker(resources))
740 }
741 DevContainerBuildType::DockerCompose => {
742 log::debug!("Using docker compose. Building extended compose files");
743 let docker_compose_resources = self.build_and_extend_compose_files().await?;
744
745 return Ok(DevContainerBuildResources::DockerCompose(
746 docker_compose_resources,
747 ));
748 }
749 DevContainerBuildType::None => {
750 return Err(DevContainerError::DevContainerParseFailed);
751 }
752 }
753 }
754
755 async fn run_dev_container(
756 &self,
757 build_resources: DevContainerBuildResources,
758 ) -> Result<DevContainerUp, DevContainerError> {
759 let ConfigStatus::VariableParsed(_) = &self.config else {
760 log::error!(
761 "Variables have not been parsed; cannot proceed with running the dev container"
762 );
763 return Err(DevContainerError::DevContainerParseFailed);
764 };
765 let running_container = match build_resources {
766 DevContainerBuildResources::DockerCompose(resources) => {
767 self.run_docker_compose(resources).await?
768 }
769 DevContainerBuildResources::Docker(resources) => {
770 self.run_docker_image(resources).await?
771 }
772 };
773
774 let remote_user = get_remote_user_from_config(&running_container, self)?;
775 let remote_workspace_folder = get_remote_dir_from_config(
776 &running_container,
777 (&self.local_project_directory.display()).to_string(),
778 )?;
779
780 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
781
782 Ok(DevContainerUp {
783 container_id: running_container.id,
784 remote_user,
785 remote_workspace_folder,
786 extension_ids: self.extension_ids(),
787 remote_env,
788 })
789 }
790
791 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
792 let dev_container = match &self.config {
793 ConfigStatus::Deserialized(_) => {
794 log::error!(
795 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
796 );
797 return Err(DevContainerError::DevContainerParseFailed);
798 }
799 ConfigStatus::VariableParsed(dev_container) => dev_container,
800 };
801 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
802 return Err(DevContainerError::DevContainerParseFailed);
803 };
804 let docker_compose_full_paths = docker_compose_files
805 .iter()
806 .map(|relative| self.config_directory.join(relative))
807 .collect::<Vec<PathBuf>>();
808
809 let Some(config) = self
810 .docker_client
811 .get_docker_compose_config(&docker_compose_full_paths)
812 .await?
813 else {
814 log::error!("Output could not deserialize into DockerComposeConfig");
815 return Err(DevContainerError::DevContainerParseFailed);
816 };
817 Ok(DockerComposeResources {
818 files: docker_compose_full_paths,
819 config,
820 })
821 }
822
823 async fn build_and_extend_compose_files(
824 &self,
825 ) -> Result<DockerComposeResources, DevContainerError> {
826 let dev_container = match &self.config {
827 ConfigStatus::Deserialized(_) => {
828 log::error!(
829 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
830 );
831 return Err(DevContainerError::DevContainerParseFailed);
832 }
833 ConfigStatus::VariableParsed(dev_container) => dev_container,
834 };
835
836 let Some(features_build_info) = &self.features_build_info else {
837 log::error!(
838 "Cannot build and extend compose files: features build info is not yet constructed"
839 );
840 return Err(DevContainerError::DevContainerParseFailed);
841 };
842 let mut docker_compose_resources = self.docker_compose_manifest().await?;
843 let supports_buildkit = self.docker_client.supports_compose_buildkit();
844
845 let (main_service_name, main_service) =
846 find_primary_service(&docker_compose_resources, self)?;
847 let (built_service_image, built_service_image_tag) = if main_service
848 .build
849 .as_ref()
850 .map(|b| b.dockerfile.as_ref())
851 .is_some()
852 {
853 if !supports_buildkit {
854 self.build_feature_content_image().await?;
855 }
856
857 let dockerfile_path = &features_build_info.dockerfile_path;
858
859 let build_args = if !supports_buildkit {
860 HashMap::from([
861 (
862 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
863 "dev_container_auto_added_stage_label".to_string(),
864 ),
865 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
866 ])
867 } else {
868 HashMap::from([
869 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
870 (
871 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
872 "dev_container_auto_added_stage_label".to_string(),
873 ),
874 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
875 ])
876 };
877
878 let additional_contexts = if !supports_buildkit {
879 None
880 } else {
881 Some(HashMap::from([(
882 "dev_containers_feature_content_source".to_string(),
883 features_build_info
884 .features_content_dir
885 .display()
886 .to_string(),
887 )]))
888 };
889
890 let build_override = DockerComposeConfig {
891 name: None,
892 services: HashMap::from([(
893 main_service_name.clone(),
894 DockerComposeService {
895 image: Some(features_build_info.image_tag.clone()),
896 entrypoint: None,
897 cap_add: None,
898 security_opt: None,
899 labels: None,
900 build: Some(DockerComposeServiceBuild {
901 context: Some(
902 main_service
903 .build
904 .as_ref()
905 .and_then(|b| b.context.clone())
906 .unwrap_or_else(|| {
907 features_build_info.empty_context_dir.display().to_string()
908 }),
909 ),
910 dockerfile: Some(dockerfile_path.display().to_string()),
911 target: Some("dev_containers_target_stage".to_string()),
912 args: Some(build_args),
913 additional_contexts,
914 }),
915 volumes: Vec::new(),
916 ..Default::default()
917 },
918 )]),
919 volumes: HashMap::new(),
920 };
921
922 let temp_base = std::env::temp_dir().join("devcontainer-zed");
923 let config_location = temp_base.join("docker_compose_build.json");
924
925 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
926 log::error!("Error serializing docker compose runtime override: {e}");
927 DevContainerError::DevContainerParseFailed
928 })?;
929
930 self.fs
931 .write(&config_location, config_json.as_bytes())
932 .await
933 .map_err(|e| {
934 log::error!("Error writing the runtime override file: {e}");
935 DevContainerError::FilesystemError
936 })?;
937
938 docker_compose_resources.files.push(config_location);
939
940 self.docker_client
941 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
942 .await?;
943 (
944 self.docker_client
945 .inspect(&features_build_info.image_tag)
946 .await?,
947 &features_build_info.image_tag,
948 )
949 } else if let Some(image) = &main_service.image {
950 if dev_container
951 .features
952 .as_ref()
953 .is_none_or(|features| features.is_empty())
954 {
955 (self.docker_client.inspect(image).await?, image)
956 } else {
957 if !supports_buildkit {
958 self.build_feature_content_image().await?;
959 }
960
961 let dockerfile_path = &features_build_info.dockerfile_path;
962
963 let build_args = if !supports_buildkit {
964 HashMap::from([
965 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
966 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
967 ])
968 } else {
969 HashMap::from([
970 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
971 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
972 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
973 ])
974 };
975
976 let additional_contexts = if !supports_buildkit {
977 None
978 } else {
979 Some(HashMap::from([(
980 "dev_containers_feature_content_source".to_string(),
981 features_build_info
982 .features_content_dir
983 .display()
984 .to_string(),
985 )]))
986 };
987
988 let build_override = DockerComposeConfig {
989 name: None,
990 services: HashMap::from([(
991 main_service_name.clone(),
992 DockerComposeService {
993 image: Some(features_build_info.image_tag.clone()),
994 entrypoint: None,
995 cap_add: None,
996 security_opt: None,
997 labels: None,
998 build: Some(DockerComposeServiceBuild {
999 context: Some(
1000 features_build_info.empty_context_dir.display().to_string(),
1001 ),
1002 dockerfile: Some(dockerfile_path.display().to_string()),
1003 target: Some("dev_containers_target_stage".to_string()),
1004 args: Some(build_args),
1005 additional_contexts,
1006 }),
1007 volumes: Vec::new(),
1008 ..Default::default()
1009 },
1010 )]),
1011 volumes: HashMap::new(),
1012 };
1013
1014 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1015 let config_location = temp_base.join("docker_compose_build.json");
1016
1017 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1018 log::error!("Error serializing docker compose runtime override: {e}");
1019 DevContainerError::DevContainerParseFailed
1020 })?;
1021
1022 self.fs
1023 .write(&config_location, config_json.as_bytes())
1024 .await
1025 .map_err(|e| {
1026 log::error!("Error writing the runtime override file: {e}");
1027 DevContainerError::FilesystemError
1028 })?;
1029
1030 docker_compose_resources.files.push(config_location);
1031
1032 self.docker_client
1033 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1034 .await?;
1035
1036 (
1037 self.docker_client
1038 .inspect(&features_build_info.image_tag)
1039 .await?,
1040 &features_build_info.image_tag,
1041 )
1042 }
1043 } else {
1044 log::error!("Docker compose must have either image or dockerfile defined");
1045 return Err(DevContainerError::DevContainerParseFailed);
1046 };
1047
1048 let built_service_image = self
1049 .update_remote_user_uid(built_service_image, built_service_image_tag)
1050 .await?;
1051
1052 let resources = self.build_merged_resources(built_service_image)?;
1053
1054 let network_mode = main_service.network_mode.as_ref();
1055 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1056 let runtime_override_file = self
1057 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1058 .await?;
1059
1060 docker_compose_resources.files.push(runtime_override_file);
1061
1062 Ok(docker_compose_resources)
1063 }
1064
1065 async fn write_runtime_override_file(
1066 &self,
1067 main_service_name: &str,
1068 network_mode_service: Option<&str>,
1069 resources: DockerBuildResources,
1070 ) -> Result<PathBuf, DevContainerError> {
1071 let config =
1072 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1073 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1074 let config_location = temp_base.join("docker_compose_runtime.json");
1075
1076 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1077 log::error!("Error serializing docker compose runtime override: {e}");
1078 DevContainerError::DevContainerParseFailed
1079 })?;
1080
1081 self.fs
1082 .write(&config_location, config_json.as_bytes())
1083 .await
1084 .map_err(|e| {
1085 log::error!("Error writing the runtime override file: {e}");
1086 DevContainerError::FilesystemError
1087 })?;
1088
1089 Ok(config_location)
1090 }
1091
1092 fn build_runtime_override(
1093 &self,
1094 main_service_name: &str,
1095 network_mode_service: Option<&str>,
1096 resources: DockerBuildResources,
1097 ) -> Result<DockerComposeConfig, DevContainerError> {
1098 let mut runtime_labels = HashMap::new();
1099
1100 if let Some(metadata) = &resources.image.config.labels.metadata {
1101 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1102 log::error!("Error serializing docker image metadata: {e}");
1103 DevContainerError::ContainerNotValid(resources.image.id.clone())
1104 })?;
1105
1106 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1107 }
1108
1109 for (k, v) in self.identifying_labels() {
1110 runtime_labels.insert(k.to_string(), v.to_string());
1111 }
1112
1113 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1114 .additional_mounts
1115 .iter()
1116 .filter_map(|mount| {
1117 if let Some(mount_type) = &mount.mount_type
1118 && mount_type.to_lowercase() == "volume"
1119 && let Some(source) = &mount.source
1120 {
1121 Some((
1122 source.clone(),
1123 DockerComposeVolume {
1124 name: source.clone(),
1125 },
1126 ))
1127 } else {
1128 None
1129 }
1130 })
1131 .collect();
1132
1133 let volumes: Vec<MountDefinition> = resources
1134 .additional_mounts
1135 .iter()
1136 .map(|v| MountDefinition {
1137 source: v.source.clone(),
1138 target: v.target.clone(),
1139 mount_type: v.mount_type.clone(),
1140 })
1141 .collect();
1142
1143 let mut main_service = DockerComposeService {
1144 entrypoint: Some(vec![
1145 "/bin/sh".to_string(),
1146 "-c".to_string(),
1147 resources.entrypoint_script,
1148 "-".to_string(),
1149 ]),
1150 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1151 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1152 labels: Some(runtime_labels),
1153 volumes,
1154 privileged: Some(resources.privileged),
1155 ..Default::default()
1156 };
1157 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1158 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1159 if let Some(forward_ports) = &self.dev_container().forward_ports {
1160 let main_service_ports: Vec<String> = forward_ports
1161 .iter()
1162 .filter_map(|f| match f {
1163 ForwardPort::Number(port) => Some(port.to_string()),
1164 ForwardPort::String(port) => {
1165 let parts: Vec<&str> = port.split(":").collect();
1166 if parts.len() <= 1 {
1167 Some(port.to_string())
1168 } else if parts.len() == 2 {
1169 if parts[0] == main_service_name {
1170 Some(parts[1].to_string())
1171 } else {
1172 None
1173 }
1174 } else {
1175 None
1176 }
1177 }
1178 })
1179 .collect();
1180 for port in main_service_ports {
1181 // If the main service uses a different service's network bridge, append to that service's ports instead
1182 if let Some(network_service_name) = network_mode_service {
1183 if let Some(service) = service_declarations.get_mut(network_service_name) {
1184 service.ports.push(DockerComposeServicePort {
1185 target: port.clone(),
1186 published: port.clone(),
1187 ..Default::default()
1188 });
1189 } else {
1190 service_declarations.insert(
1191 network_service_name.to_string(),
1192 DockerComposeService {
1193 ports: vec![DockerComposeServicePort {
1194 target: port.clone(),
1195 published: port.clone(),
1196 ..Default::default()
1197 }],
1198 ..Default::default()
1199 },
1200 );
1201 }
1202 } else {
1203 main_service.ports.push(DockerComposeServicePort {
1204 target: port.clone(),
1205 published: port.clone(),
1206 ..Default::default()
1207 });
1208 }
1209 }
1210 let other_service_ports: Vec<(&str, &str)> = forward_ports
1211 .iter()
1212 .filter_map(|f| match f {
1213 ForwardPort::Number(_) => None,
1214 ForwardPort::String(port) => {
1215 let parts: Vec<&str> = port.split(":").collect();
1216 if parts.len() != 2 {
1217 None
1218 } else {
1219 if parts[0] == main_service_name {
1220 None
1221 } else {
1222 Some((parts[0], parts[1]))
1223 }
1224 }
1225 }
1226 })
1227 .collect();
1228 for (service_name, port) in other_service_ports {
1229 if let Some(service) = service_declarations.get_mut(service_name) {
1230 service.ports.push(DockerComposeServicePort {
1231 target: port.to_string(),
1232 published: port.to_string(),
1233 ..Default::default()
1234 });
1235 } else {
1236 service_declarations.insert(
1237 service_name.to_string(),
1238 DockerComposeService {
1239 ports: vec![DockerComposeServicePort {
1240 target: port.to_string(),
1241 published: port.to_string(),
1242 ..Default::default()
1243 }],
1244 ..Default::default()
1245 },
1246 );
1247 }
1248 }
1249 }
1250
1251 service_declarations.insert(main_service_name.to_string(), main_service);
1252 let new_docker_compose_config = DockerComposeConfig {
1253 name: None,
1254 services: service_declarations,
1255 volumes: config_volumes,
1256 };
1257
1258 Ok(new_docker_compose_config)
1259 }
1260
1261 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1262 let dev_container = match &self.config {
1263 ConfigStatus::Deserialized(_) => {
1264 log::error!(
1265 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1266 );
1267 return Err(DevContainerError::DevContainerParseFailed);
1268 }
1269 ConfigStatus::VariableParsed(dev_container) => dev_container,
1270 };
1271
1272 match dev_container.build_type() {
1273 DevContainerBuildType::Image(image_tag) => {
1274 let base_image = self.docker_client.inspect(&image_tag).await?;
1275 if dev_container
1276 .features
1277 .as_ref()
1278 .is_none_or(|features| features.is_empty())
1279 {
1280 log::debug!("No features to add. Using base image");
1281 return Ok(base_image);
1282 }
1283 }
1284 DevContainerBuildType::Dockerfile(_) => {}
1285 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1286 return Err(DevContainerError::DevContainerParseFailed);
1287 }
1288 };
1289
1290 let mut command = self.create_docker_build()?;
1291
1292 let output = self
1293 .command_runner
1294 .run_command(&mut command)
1295 .await
1296 .map_err(|e| {
1297 log::error!("Error building docker image: {e}");
1298 DevContainerError::CommandFailed(command.get_program().display().to_string())
1299 })?;
1300
1301 if !output.status.success() {
1302 let stderr = String::from_utf8_lossy(&output.stderr);
1303 log::error!("docker buildx build failed: {stderr}");
1304 return Err(DevContainerError::CommandFailed(
1305 command.get_program().display().to_string(),
1306 ));
1307 }
1308
1309 // After a successful build, inspect the newly tagged image to get its metadata
1310 let Some(features_build_info) = &self.features_build_info else {
1311 log::error!("Features build info expected, but not created");
1312 return Err(DevContainerError::DevContainerParseFailed);
1313 };
1314 let image = self
1315 .docker_client
1316 .inspect(&features_build_info.image_tag)
1317 .await?;
1318
1319 Ok(image)
1320 }
1321
1322 #[cfg(target_os = "windows")]
1323 async fn update_remote_user_uid(
1324 &self,
1325 image: DockerInspect,
1326 _base_image: &str,
1327 ) -> Result<DockerInspect, DevContainerError> {
1328 Ok(image)
1329 }
1330 #[cfg(not(target_os = "windows"))]
1331 async fn update_remote_user_uid(
1332 &self,
1333 image: DockerInspect,
1334 base_image: &str,
1335 ) -> Result<DockerInspect, DevContainerError> {
1336 let dev_container = self.dev_container();
1337
1338 let Some(features_build_info) = &self.features_build_info else {
1339 return Ok(image);
1340 };
1341
1342 // updateRemoteUserUID defaults to true per the devcontainers spec
1343 if dev_container.update_remote_user_uid == Some(false) {
1344 return Ok(image);
1345 }
1346
1347 let remote_user = get_remote_user_from_config(&image, self)?;
1348 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1349 return Ok(image);
1350 }
1351
1352 let image_user = image
1353 .config
1354 .image_user
1355 .as_deref()
1356 .unwrap_or("root")
1357 .to_string();
1358
1359 let host_uid = Command::new("id")
1360 .arg("-u")
1361 .output()
1362 .await
1363 .map_err(|e| {
1364 log::error!("Failed to get host UID: {e}");
1365 DevContainerError::CommandFailed("id -u".to_string())
1366 })
1367 .and_then(|output| {
1368 String::from_utf8_lossy(&output.stdout)
1369 .trim()
1370 .parse::<u32>()
1371 .map_err(|e| {
1372 log::error!("Failed to parse host UID: {e}");
1373 DevContainerError::CommandFailed("id -u".to_string())
1374 })
1375 })?;
1376
1377 let host_gid = Command::new("id")
1378 .arg("-g")
1379 .output()
1380 .await
1381 .map_err(|e| {
1382 log::error!("Failed to get host GID: {e}");
1383 DevContainerError::CommandFailed("id -g".to_string())
1384 })
1385 .and_then(|output| {
1386 String::from_utf8_lossy(&output.stdout)
1387 .trim()
1388 .parse::<u32>()
1389 .map_err(|e| {
1390 log::error!("Failed to parse host GID: {e}");
1391 DevContainerError::CommandFailed("id -g".to_string())
1392 })
1393 })?;
1394
1395 let dockerfile_content = self.generate_update_uid_dockerfile();
1396
1397 let dockerfile_path = features_build_info
1398 .features_content_dir
1399 .join("updateUID.Dockerfile");
1400 self.fs
1401 .write(&dockerfile_path, dockerfile_content.as_bytes())
1402 .await
1403 .map_err(|e| {
1404 log::error!("Failed to write updateUID Dockerfile: {e}");
1405 DevContainerError::FilesystemError
1406 })?;
1407
1408 let updated_image_tag = features_build_info.image_tag.clone();
1409
1410 let mut command = Command::new(self.docker_client.docker_cli());
1411 command.args(["build"]);
1412 command.args(["-f", &dockerfile_path.display().to_string()]);
1413 command.args(["-t", &updated_image_tag]);
1414 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1415 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1416 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1417 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1418 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1419 command.arg(features_build_info.empty_context_dir.display().to_string());
1420
1421 let output = self
1422 .command_runner
1423 .run_command(&mut command)
1424 .await
1425 .map_err(|e| {
1426 log::error!("Error building UID update image: {e}");
1427 DevContainerError::CommandFailed(command.get_program().display().to_string())
1428 })?;
1429
1430 if !output.status.success() {
1431 let stderr = String::from_utf8_lossy(&output.stderr);
1432 log::error!("UID update build failed: {stderr}");
1433 return Err(DevContainerError::CommandFailed(
1434 command.get_program().display().to_string(),
1435 ));
1436 }
1437
1438 self.docker_client.inspect(&updated_image_tag).await
1439 }
1440
1441 #[cfg(not(target_os = "windows"))]
1442 fn generate_update_uid_dockerfile(&self) -> String {
1443 let mut dockerfile = r#"ARG BASE_IMAGE
1444FROM $BASE_IMAGE
1445
1446USER root
1447
1448ARG REMOTE_USER
1449ARG NEW_UID
1450ARG NEW_GID
1451SHELL ["/bin/sh", "-c"]
1452RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1453 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1454 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1455 if [ -z "$OLD_UID" ]; then \
1456 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1457 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1458 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1459 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1460 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1461 else \
1462 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1463 FREE_GID=65532; \
1464 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1465 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1466 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1467 fi; \
1468 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1469 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1470 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1471 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1472 fi; \
1473 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1474 fi;
1475
1476ARG IMAGE_USER
1477USER $IMAGE_USER
1478
1479# Ensure that /etc/profile does not clobber the existing path
1480RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1481"#.to_string();
1482 for feature in &self.features {
1483 let container_env_layer = feature.generate_dockerfile_env();
1484 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1485 }
1486
1487 if let Some(env) = &self.dev_container().container_env {
1488 for (key, value) in env {
1489 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1490 }
1491 }
1492 dockerfile
1493 }
1494
1495 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1496 let Some(features_build_info) = &self.features_build_info else {
1497 log::error!("Features build info not available for building feature content image");
1498 return Err(DevContainerError::DevContainerParseFailed);
1499 };
1500 let features_content_dir = &features_build_info.features_content_dir;
1501
1502 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1503 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1504
1505 self.fs
1506 .write(&dockerfile_path, dockerfile_content.as_bytes())
1507 .await
1508 .map_err(|e| {
1509 log::error!("Failed to write feature content Dockerfile: {e}");
1510 DevContainerError::FilesystemError
1511 })?;
1512
1513 let mut command = Command::new(self.docker_client.docker_cli());
1514 command.args([
1515 "build",
1516 "-t",
1517 "dev_container_feature_content_temp",
1518 "-f",
1519 &dockerfile_path.display().to_string(),
1520 &features_content_dir.display().to_string(),
1521 ]);
1522
1523 let output = self
1524 .command_runner
1525 .run_command(&mut command)
1526 .await
1527 .map_err(|e| {
1528 log::error!("Error building feature content image: {e}");
1529 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1530 })?;
1531
1532 if !output.status.success() {
1533 let stderr = String::from_utf8_lossy(&output.stderr);
1534 log::error!("Feature content image build failed: {stderr}");
1535 return Err(DevContainerError::CommandFailed(
1536 self.docker_client.docker_cli(),
1537 ));
1538 }
1539
1540 Ok(())
1541 }
1542
1543 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1544 let dev_container = match &self.config {
1545 ConfigStatus::Deserialized(_) => {
1546 log::error!(
1547 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1548 );
1549 return Err(DevContainerError::DevContainerParseFailed);
1550 }
1551 ConfigStatus::VariableParsed(dev_container) => dev_container,
1552 };
1553
1554 let Some(features_build_info) = &self.features_build_info else {
1555 log::error!(
1556 "Cannot create docker build command; features build info has not been constructed"
1557 );
1558 return Err(DevContainerError::DevContainerParseFailed);
1559 };
1560 let mut command = Command::new(self.docker_client.docker_cli());
1561
1562 command.args(["buildx", "build"]);
1563
1564 // --load is short for --output=docker, loading the built image into the local docker images
1565 command.arg("--load");
1566
1567 // BuildKit build context: provides the features content directory as a named context
1568 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1569 command.args([
1570 "--build-context",
1571 &format!(
1572 "dev_containers_feature_content_source={}",
1573 features_build_info.features_content_dir.display()
1574 ),
1575 ]);
1576
1577 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1578 if let Some(build_image) = &features_build_info.build_image {
1579 command.args([
1580 "--build-arg",
1581 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1582 ]);
1583 } else {
1584 command.args([
1585 "--build-arg",
1586 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1587 ]);
1588 }
1589
1590 command.args([
1591 "--build-arg",
1592 &format!(
1593 "_DEV_CONTAINERS_IMAGE_USER={}",
1594 self.root_image
1595 .as_ref()
1596 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1597 .unwrap_or(&"root".to_string())
1598 ),
1599 ]);
1600
1601 command.args([
1602 "--build-arg",
1603 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1604 ]);
1605
1606 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1607 for (key, value) in args {
1608 command.args(["--build-arg", &format!("{}={}", key, value)]);
1609 }
1610 }
1611
1612 command.args(["--target", "dev_containers_target_stage"]);
1613
1614 command.args([
1615 "-f",
1616 &features_build_info.dockerfile_path.display().to_string(),
1617 ]);
1618
1619 command.args(["-t", &features_build_info.image_tag]);
1620
1621 if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1622 command.arg(self.config_directory.display().to_string());
1623 } else {
1624 // Use an empty folder as the build context to avoid pulling in unneeded files.
1625 // The actual feature content is supplied via the BuildKit build context above.
1626 command.arg(features_build_info.empty_context_dir.display().to_string());
1627 }
1628
1629 Ok(command)
1630 }
1631
1632 async fn run_docker_compose(
1633 &self,
1634 resources: DockerComposeResources,
1635 ) -> Result<DockerInspect, DevContainerError> {
1636 let mut command = Command::new(self.docker_client.docker_cli());
1637 command.args(&["compose", "--project-name", &self.project_name()]);
1638 for docker_compose_file in resources.files {
1639 command.args(&["-f", &docker_compose_file.display().to_string()]);
1640 }
1641 command.args(&["up", "-d"]);
1642
1643 let output = self
1644 .command_runner
1645 .run_command(&mut command)
1646 .await
1647 .map_err(|e| {
1648 log::error!("Error running docker compose up: {e}");
1649 DevContainerError::CommandFailed(command.get_program().display().to_string())
1650 })?;
1651
1652 if !output.status.success() {
1653 let stderr = String::from_utf8_lossy(&output.stderr);
1654 log::error!("Non-success status from docker compose up: {}", stderr);
1655 return Err(DevContainerError::CommandFailed(
1656 command.get_program().display().to_string(),
1657 ));
1658 }
1659
1660 if let Some(docker_ps) = self.check_for_existing_container().await? {
1661 log::debug!("Found newly created dev container");
1662 return self.docker_client.inspect(&docker_ps.id).await;
1663 }
1664
1665 log::error!("Could not find existing container after docker compose up");
1666
1667 Err(DevContainerError::DevContainerParseFailed)
1668 }
1669
1670 async fn run_docker_image(
1671 &self,
1672 build_resources: DockerBuildResources,
1673 ) -> Result<DockerInspect, DevContainerError> {
1674 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1675
1676 let output = self
1677 .command_runner
1678 .run_command(&mut docker_run_command)
1679 .await
1680 .map_err(|e| {
1681 log::error!("Error running docker run: {e}");
1682 DevContainerError::CommandFailed(
1683 docker_run_command.get_program().display().to_string(),
1684 )
1685 })?;
1686
1687 if !output.status.success() {
1688 let std_err = String::from_utf8_lossy(&output.stderr);
1689 log::error!("Non-success status from docker run. StdErr: {std_err}");
1690 return Err(DevContainerError::CommandFailed(
1691 docker_run_command.get_program().display().to_string(),
1692 ));
1693 }
1694
1695 log::debug!("Checking for container that was started");
1696 let Some(docker_ps) = self.check_for_existing_container().await? else {
1697 log::error!("Could not locate container just created");
1698 return Err(DevContainerError::DevContainerParseFailed);
1699 };
1700 self.docker_client.inspect(&docker_ps.id).await
1701 }
1702
1703 fn local_workspace_folder(&self) -> String {
1704 self.local_project_directory.display().to_string()
1705 }
1706 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1707 self.local_project_directory
1708 .file_name()
1709 .map(|f| f.display().to_string())
1710 .ok_or(DevContainerError::DevContainerParseFailed)
1711 }
1712
1713 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1714 self.dev_container()
1715 .workspace_folder
1716 .as_ref()
1717 .map(|folder| PathBuf::from(folder))
1718 .or(Some(
1719 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1720 ))
1721 .ok_or(DevContainerError::DevContainerParseFailed)
1722 }
1723 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1724 self.remote_workspace_folder().and_then(|f| {
1725 f.file_name()
1726 .map(|file_name| file_name.display().to_string())
1727 .ok_or(DevContainerError::DevContainerParseFailed)
1728 })
1729 }
1730
1731 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1732 if let Some(mount) = &self.dev_container().workspace_mount {
1733 return Ok(mount.clone());
1734 }
1735 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1736 return Err(DevContainerError::DevContainerParseFailed);
1737 };
1738
1739 Ok(MountDefinition {
1740 source: Some(self.local_workspace_folder()),
1741 target: format!("/workspaces/{}", project_directory_name.display()),
1742 mount_type: None,
1743 })
1744 }
1745
1746 fn create_docker_run_command(
1747 &self,
1748 build_resources: DockerBuildResources,
1749 ) -> Result<Command, DevContainerError> {
1750 let remote_workspace_mount = self.remote_workspace_mount()?;
1751
1752 let docker_cli = self.docker_client.docker_cli();
1753 let mut command = Command::new(&docker_cli);
1754
1755 command.arg("run");
1756
1757 if build_resources.privileged {
1758 command.arg("--privileged");
1759 }
1760
1761 let run_args = match &self.dev_container().run_args {
1762 Some(run_args) => run_args,
1763 None => &Vec::new(),
1764 };
1765
1766 for arg in run_args {
1767 command.arg(arg);
1768 }
1769
1770 let run_if_missing = {
1771 |arg_name: &str, arg: &str, command: &mut Command| {
1772 if !run_args
1773 .iter()
1774 .any(|arg| arg.strip_prefix(arg_name).is_some())
1775 {
1776 command.arg(arg);
1777 }
1778 }
1779 };
1780
1781 if &docker_cli == "podman" {
1782 run_if_missing(
1783 "--security-opt",
1784 "--security-opt=label=disable",
1785 &mut command,
1786 );
1787 run_if_missing("--userns", "--userns=keep-id", &mut command);
1788 }
1789
1790 run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1791 command.arg("-d");
1792 command.arg("--mount");
1793 command.arg(remote_workspace_mount.to_string());
1794
1795 for mount in &build_resources.additional_mounts {
1796 command.arg("--mount");
1797 command.arg(mount.to_string());
1798 }
1799
1800 for (key, val) in self.identifying_labels() {
1801 command.arg("-l");
1802 command.arg(format!("{}={}", key, val));
1803 }
1804
1805 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1806 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1807 log::error!("Problem serializing image metadata: {e}");
1808 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1809 })?;
1810 command.arg("-l");
1811 command.arg(format!(
1812 "{}={}",
1813 "devcontainer.metadata", serialized_metadata
1814 ));
1815 }
1816
1817 if let Some(forward_ports) = &self.dev_container().forward_ports {
1818 for port in forward_ports {
1819 if let ForwardPort::Number(port_number) = port {
1820 command.arg("-p");
1821 command.arg(format!("{port_number}:{port_number}"));
1822 }
1823 }
1824 }
1825 for app_port in &self.dev_container().app_port {
1826 command.arg("-p");
1827 command.arg(app_port);
1828 }
1829
1830 command.arg("--entrypoint");
1831 command.arg("/bin/sh");
1832 command.arg(&build_resources.image.id);
1833 command.arg("-c");
1834
1835 command.arg(build_resources.entrypoint_script);
1836 command.arg("-");
1837
1838 Ok(command)
1839 }
1840
1841 fn extension_ids(&self) -> Vec<String> {
1842 self.dev_container()
1843 .customizations
1844 .as_ref()
1845 .map(|c| c.zed.extensions.clone())
1846 .unwrap_or_default()
1847 }
1848
1849 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1850 self.run_initialize_commands().await?;
1851
1852 self.download_feature_and_dockerfile_resources().await?;
1853
1854 let build_resources = self.build_resources().await?;
1855
1856 let devcontainer_up = self.run_dev_container(build_resources).await?;
1857
1858 self.run_remote_scripts(&devcontainer_up, true).await?;
1859
1860 Ok(devcontainer_up)
1861 }
1862
1863 async fn run_remote_scripts(
1864 &self,
1865 devcontainer_up: &DevContainerUp,
1866 new_container: bool,
1867 ) -> Result<(), DevContainerError> {
1868 let ConfigStatus::VariableParsed(config) = &self.config else {
1869 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1870 return Err(DevContainerError::DevContainerScriptsFailed);
1871 };
1872 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1873
1874 if new_container {
1875 if let Some(on_create_command) = &config.on_create_command {
1876 for (command_name, command) in on_create_command.script_commands() {
1877 log::debug!("Running on create command {command_name}");
1878 self.docker_client
1879 .run_docker_exec(
1880 &devcontainer_up.container_id,
1881 &remote_folder,
1882 &devcontainer_up.remote_user,
1883 &devcontainer_up.remote_env,
1884 command,
1885 )
1886 .await?;
1887 }
1888 }
1889 if let Some(update_content_command) = &config.update_content_command {
1890 for (command_name, command) in update_content_command.script_commands() {
1891 log::debug!("Running update content command {command_name}");
1892 self.docker_client
1893 .run_docker_exec(
1894 &devcontainer_up.container_id,
1895 &remote_folder,
1896 &devcontainer_up.remote_user,
1897 &devcontainer_up.remote_env,
1898 command,
1899 )
1900 .await?;
1901 }
1902 }
1903
1904 if let Some(post_create_command) = &config.post_create_command {
1905 for (command_name, command) in post_create_command.script_commands() {
1906 log::debug!("Running post create command {command_name}");
1907 self.docker_client
1908 .run_docker_exec(
1909 &devcontainer_up.container_id,
1910 &remote_folder,
1911 &devcontainer_up.remote_user,
1912 &devcontainer_up.remote_env,
1913 command,
1914 )
1915 .await?;
1916 }
1917 }
1918 if let Some(post_start_command) = &config.post_start_command {
1919 for (command_name, command) in post_start_command.script_commands() {
1920 log::debug!("Running post start command {command_name}");
1921 self.docker_client
1922 .run_docker_exec(
1923 &devcontainer_up.container_id,
1924 &remote_folder,
1925 &devcontainer_up.remote_user,
1926 &devcontainer_up.remote_env,
1927 command,
1928 )
1929 .await?;
1930 }
1931 }
1932 }
1933 if let Some(post_attach_command) = &config.post_attach_command {
1934 for (command_name, command) in post_attach_command.script_commands() {
1935 log::debug!("Running post attach command {command_name}");
1936 self.docker_client
1937 .run_docker_exec(
1938 &devcontainer_up.container_id,
1939 &remote_folder,
1940 &devcontainer_up.remote_user,
1941 &devcontainer_up.remote_env,
1942 command,
1943 )
1944 .await?;
1945 }
1946 }
1947
1948 Ok(())
1949 }
1950
1951 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1952 let ConfigStatus::VariableParsed(config) = &self.config else {
1953 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1954 return Err(DevContainerError::DevContainerParseFailed);
1955 };
1956
1957 if let Some(initialize_command) = &config.initialize_command {
1958 log::debug!("Running initialize command");
1959 initialize_command
1960 .run(&self.command_runner, &self.local_project_directory)
1961 .await
1962 } else {
1963 log::warn!("No initialize command found");
1964 Ok(())
1965 }
1966 }
1967
1968 async fn check_for_existing_devcontainer(
1969 &self,
1970 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1971 if let Some(docker_ps) = self.check_for_existing_container().await? {
1972 log::debug!("Dev container already found. Proceeding with it");
1973
1974 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1975
1976 if !docker_inspect.is_running() {
1977 log::debug!("Container not running. Will attempt to start, and then proceed");
1978 self.docker_client.start_container(&docker_ps.id).await?;
1979 }
1980
1981 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1982
1983 let remote_folder = get_remote_dir_from_config(
1984 &docker_inspect,
1985 (&self.local_project_directory.display()).to_string(),
1986 )?;
1987
1988 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1989
1990 let dev_container_up = DevContainerUp {
1991 container_id: docker_ps.id,
1992 remote_user: remote_user,
1993 remote_workspace_folder: remote_folder,
1994 extension_ids: self.extension_ids(),
1995 remote_env,
1996 };
1997
1998 self.run_remote_scripts(&dev_container_up, false).await?;
1999
2000 Ok(Some(dev_container_up))
2001 } else {
2002 log::debug!("Existing container not found.");
2003
2004 Ok(None)
2005 }
2006 }
2007
2008 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2009 self.docker_client
2010 .find_process_by_filters(
2011 self.identifying_labels()
2012 .iter()
2013 .map(|(k, v)| format!("label={k}={v}"))
2014 .collect(),
2015 )
2016 .await
2017 }
2018
2019 fn project_name(&self) -> String {
2020 if let Some(name) = &self.dev_container().name {
2021 safe_id_lower(name)
2022 } else {
2023 let alternate_name = &self
2024 .local_workspace_base_name()
2025 .unwrap_or(self.local_workspace_folder());
2026 safe_id_lower(alternate_name)
2027 }
2028 }
2029
2030 async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2031 let Some(dockerfile_path) = self.dockerfile_location().await else {
2032 log::error!("Tried to expand dockerfile for an image-type config");
2033 return Err(DevContainerError::DevContainerParseFailed);
2034 };
2035
2036 let devcontainer_args = self
2037 .dev_container()
2038 .build
2039 .as_ref()
2040 .and_then(|b| b.args.clone())
2041 .unwrap_or_default();
2042 let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2043 log::error!("Failed to load Dockerfile: {e}");
2044 DevContainerError::FilesystemError
2045 })?;
2046 let mut parsed_lines: Vec<String> = Vec::new();
2047 let mut inline_args: Vec<(String, String)> = Vec::new();
2048 let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2049
2050 for line in contents.lines() {
2051 let mut parsed_line = line.to_string();
2052 // Replace from devcontainer args first, since they take precedence
2053 for (key, value) in &devcontainer_args {
2054 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2055 }
2056 for (key, value) in &inline_args {
2057 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2058 }
2059 if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2060 let trimmed = arg_directives.trim();
2061 let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2062 for (i, captures) in key_matches.iter().enumerate() {
2063 let key = captures[1].to_string();
2064 // Insert the devcontainer overrides here if needed
2065 let value_start = captures.get(0).expect("full match").end();
2066 let value_end = if i + 1 < key_matches.len() {
2067 key_matches[i + 1].get(0).expect("full match").start()
2068 } else {
2069 trimmed.len()
2070 };
2071 let raw_value = trimmed[value_start..value_end].trim();
2072 let value = if raw_value.starts_with('"')
2073 && raw_value.ends_with('"')
2074 && raw_value.len() > 1
2075 {
2076 &raw_value[1..raw_value.len() - 1]
2077 } else {
2078 raw_value
2079 };
2080 inline_args.push((key, value.to_string()));
2081 }
2082 }
2083 parsed_lines.push(parsed_line);
2084 }
2085
2086 Ok(parsed_lines.join("\n"))
2087 }
2088}
2089
2090/// Holds all the information needed to construct a `docker buildx build` command
2091/// that extends a base image with dev container features.
2092///
2093/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2094/// (cli/src/spec-node/containerFeatures.ts).
2095#[derive(Debug, Eq, PartialEq)]
2096pub(crate) struct FeaturesBuildInfo {
2097 /// Path to the generated Dockerfile.extended
2098 pub dockerfile_path: PathBuf,
2099 /// Path to the features content directory (used as a BuildKit build context)
2100 pub features_content_dir: PathBuf,
2101 /// Path to an empty directory used as the Docker build context
2102 pub empty_context_dir: PathBuf,
2103 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2104 pub build_image: Option<String>,
2105 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2106 pub image_tag: String,
2107}
2108
2109pub(crate) async fn read_devcontainer_configuration(
2110 config: DevContainerConfig,
2111 context: &DevContainerContext,
2112 environment: HashMap<String, String>,
2113) -> Result<DevContainer, DevContainerError> {
2114 let docker = if context.use_podman {
2115 Docker::new("podman").await
2116 } else {
2117 Docker::new("docker").await
2118 };
2119 let mut dev_container = DevContainerManifest::new(
2120 context,
2121 environment,
2122 Arc::new(docker),
2123 Arc::new(DefaultCommandRunner::new()),
2124 config,
2125 &context.project_directory.as_ref(),
2126 )
2127 .await?;
2128 dev_container.parse_nonremote_vars()?;
2129 Ok(dev_container.dev_container().clone())
2130}
2131
2132pub(crate) async fn spawn_dev_container(
2133 context: &DevContainerContext,
2134 environment: HashMap<String, String>,
2135 config: DevContainerConfig,
2136 local_project_path: &Path,
2137) -> Result<DevContainerUp, DevContainerError> {
2138 let docker = if context.use_podman {
2139 Docker::new("podman").await
2140 } else {
2141 Docker::new("docker").await
2142 };
2143 let mut devcontainer_manifest = DevContainerManifest::new(
2144 context,
2145 environment,
2146 Arc::new(docker),
2147 Arc::new(DefaultCommandRunner::new()),
2148 config,
2149 local_project_path,
2150 )
2151 .await?;
2152
2153 devcontainer_manifest.parse_nonremote_vars()?;
2154
2155 log::debug!("Checking for existing container");
2156 if let Some(devcontainer) = devcontainer_manifest
2157 .check_for_existing_devcontainer()
2158 .await?
2159 {
2160 Ok(devcontainer)
2161 } else {
2162 log::debug!("Existing container not found. Building");
2163
2164 devcontainer_manifest.build_and_run().await
2165 }
2166}
2167
2168#[derive(Debug)]
2169struct DockerBuildResources {
2170 image: DockerInspect,
2171 additional_mounts: Vec<MountDefinition>,
2172 privileged: bool,
2173 entrypoint_script: String,
2174}
2175
2176#[derive(Debug)]
2177enum DevContainerBuildResources {
2178 DockerCompose(DockerComposeResources),
2179 Docker(DockerBuildResources),
2180}
2181
2182fn find_primary_service(
2183 docker_compose: &DockerComposeResources,
2184 devcontainer: &DevContainerManifest,
2185) -> Result<(String, DockerComposeService), DevContainerError> {
2186 let Some(service_name) = &devcontainer.dev_container().service else {
2187 return Err(DevContainerError::DevContainerParseFailed);
2188 };
2189
2190 match docker_compose.config.services.get(service_name) {
2191 Some(service) => Ok((service_name.clone(), service.clone())),
2192 None => Err(DevContainerError::DevContainerParseFailed),
2193 }
2194}
2195
2196/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2197/// `dockerfile` is relative to the build `context`, and `context` is relative to
2198/// the compose file's directory.
2199fn resolve_compose_dockerfile(
2200 compose_file: &Path,
2201 context: Option<&str>,
2202 dockerfile: &str,
2203) -> Option<PathBuf> {
2204 let dockerfile = PathBuf::from(dockerfile);
2205 if dockerfile.is_absolute() {
2206 return Some(dockerfile);
2207 }
2208 let compose_dir = compose_file.parent()?;
2209 let context_dir = match context {
2210 Some(ctx) => {
2211 let ctx = PathBuf::from(ctx);
2212 if ctx.is_absolute() {
2213 ctx
2214 } else {
2215 normalize_path(&compose_dir.join(ctx))
2216 }
2217 }
2218 None => compose_dir.to_path_buf(),
2219 };
2220 Some(context_dir.join(dockerfile))
2221}
2222
2223/// Destination folder inside the container where feature content is staged during build.
2224/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2225const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2226
2227/// Escapes regex special characters in a string.
2228fn escape_regex_chars(input: &str) -> String {
2229 let mut result = String::with_capacity(input.len() * 2);
2230 for c in input.chars() {
2231 if ".*+?^${}()|[]\\".contains(c) {
2232 result.push('\\');
2233 }
2234 result.push(c);
2235 }
2236 result
2237}
2238
2239/// Extracts the short feature ID from a full feature reference string.
2240///
2241/// Examples:
2242/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2243/// - `ghcr.io/user/repo/go` → `go`
2244/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2245/// - `./myFeature` → `myFeature`
2246fn extract_feature_id(feature_ref: &str) -> &str {
2247 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2248 &feature_ref[..at_idx]
2249 } else {
2250 let last_slash = feature_ref.rfind('/');
2251 let last_colon = feature_ref.rfind(':');
2252 match (last_slash, last_colon) {
2253 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2254 _ => feature_ref,
2255 }
2256 };
2257 match without_version.rfind('/') {
2258 Some(idx) => &without_version[idx + 1..],
2259 None => without_version,
2260 }
2261}
2262
2263/// Generates a shell command that looks up a user's passwd entry.
2264///
2265/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2266/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2267fn get_ent_passwd_shell_command(user: &str) -> String {
2268 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2269 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2270 format!(
2271 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2272 shell = escaped_for_shell,
2273 re = escaped_for_regex,
2274 )
2275}
2276
2277/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2278///
2279/// Features listed in the override come first (in the specified order), followed
2280/// by any remaining features sorted lexicographically by their full reference ID.
2281fn resolve_feature_order<'a>(
2282 features: &'a HashMap<String, FeatureOptions>,
2283 override_order: &Option<Vec<String>>,
2284) -> Vec<(&'a String, &'a FeatureOptions)> {
2285 if let Some(order) = override_order {
2286 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2287 for ordered_id in order {
2288 if let Some((key, options)) = features.get_key_value(ordered_id) {
2289 ordered.push((key, options));
2290 }
2291 }
2292 let mut remaining: Vec<_> = features
2293 .iter()
2294 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2295 .collect();
2296 remaining.sort_by_key(|(id, _)| id.as_str());
2297 ordered.extend(remaining);
2298 ordered
2299 } else {
2300 let mut entries: Vec<_> = features.iter().collect();
2301 entries.sort_by_key(|(id, _)| id.as_str());
2302 entries
2303 }
2304}
2305
2306/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2307///
2308/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2309/// `containerFeaturesConfiguration.ts`.
2310fn generate_install_wrapper(
2311 feature_ref: &str,
2312 feature_id: &str,
2313 env_variables: &str,
2314) -> Result<String, DevContainerError> {
2315 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2316 log::error!("Error escaping feature ref {feature_ref}: {e}");
2317 DevContainerError::DevContainerParseFailed
2318 })?;
2319 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2320 log::error!("Error escaping feature {feature_id}: {e}");
2321 DevContainerError::DevContainerParseFailed
2322 })?;
2323 let options_indented: String = env_variables
2324 .lines()
2325 .filter(|l| !l.is_empty())
2326 .map(|l| format!(" {}", l))
2327 .collect::<Vec<_>>()
2328 .join("\n");
2329 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2330 log::error!("Error escaping options {options_indented}: {e}");
2331 DevContainerError::DevContainerParseFailed
2332 })?;
2333
2334 let script = format!(
2335 r#"#!/bin/sh
2336set -e
2337
2338on_exit () {{
2339 [ $? -eq 0 ] && exit
2340 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2341}}
2342
2343trap on_exit EXIT
2344
2345echo ===========================================================================
2346echo 'Feature : {escaped_name}'
2347echo 'Id : {escaped_id}'
2348echo 'Options :'
2349echo {escaped_options}
2350echo ===========================================================================
2351
2352set -a
2353. ../devcontainer-features.builtin.env
2354. ./devcontainer-features.env
2355set +a
2356
2357chmod +x ./install.sh
2358./install.sh
2359"#
2360 );
2361
2362 Ok(script)
2363}
2364
2365fn dockerfile_inject_alias(
2366 dockerfile_content: &str,
2367 alias: &str,
2368 build_target: Option<String>,
2369) -> String {
2370 match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2371 Some(target) => format!(
2372 r#"{dockerfile_content}
2373FROM {target} AS {alias}"#
2374 ),
2375 None => dockerfile_content.to_string(),
2376 }
2377}
2378
2379fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2380 dockerfile_contents
2381 .lines()
2382 .filter(|line| line.starts_with("FROM"))
2383 .rfind(|from_line| match &target {
2384 Some(target) => {
2385 let parts = from_line.split(' ').collect::<Vec<&str>>();
2386 if parts.len() >= 3
2387 && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2388 {
2389 parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2390 } else {
2391 false
2392 }
2393 }
2394 None => true,
2395 })
2396 .and_then(|from_line| {
2397 from_line
2398 .split(' ')
2399 .collect::<Vec<&str>>()
2400 .get(1)
2401 .map(|s| s.to_string())
2402 })
2403}
2404
2405fn get_remote_user_from_config(
2406 docker_config: &DockerInspect,
2407 devcontainer: &DevContainerManifest,
2408) -> Result<String, DevContainerError> {
2409 if let DevContainer {
2410 remote_user: Some(user),
2411 ..
2412 } = &devcontainer.dev_container()
2413 {
2414 return Ok(user.clone());
2415 }
2416 if let Some(metadata) = &docker_config.config.labels.metadata {
2417 for metadatum in metadata {
2418 if let Some(remote_user) = metadatum.get("remoteUser") {
2419 if let Some(remote_user_str) = remote_user.as_str() {
2420 return Ok(remote_user_str.to_string());
2421 }
2422 }
2423 }
2424 }
2425 if let Some(image_user) = &docker_config.config.image_user {
2426 if !image_user.is_empty() {
2427 return Ok(image_user.to_string());
2428 }
2429 }
2430 Ok("root".to_string())
2431}
2432
2433// This should come from spec - see the docs
2434fn get_container_user_from_config(
2435 docker_config: &DockerInspect,
2436 devcontainer: &DevContainerManifest,
2437) -> Result<String, DevContainerError> {
2438 if let Some(user) = &devcontainer.dev_container().container_user {
2439 return Ok(user.to_string());
2440 }
2441 if let Some(metadata) = &docker_config.config.labels.metadata {
2442 for metadatum in metadata {
2443 if let Some(container_user) = metadatum.get("containerUser") {
2444 if let Some(container_user_str) = container_user.as_str() {
2445 return Ok(container_user_str.to_string());
2446 }
2447 }
2448 }
2449 }
2450 if let Some(image_user) = &docker_config.config.image_user {
2451 return Ok(image_user.to_string());
2452 }
2453
2454 Ok("root".to_string())
2455}
2456
2457#[cfg(test)]
2458mod test {
2459 use std::{
2460 collections::HashMap,
2461 ffi::OsStr,
2462 path::{Path, PathBuf},
2463 process::{ExitStatus, Output},
2464 sync::{Arc, Mutex},
2465 };
2466
2467 use async_trait::async_trait;
2468 use fs::{FakeFs, Fs};
2469 use gpui::{AppContext, TestAppContext};
2470 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2471 use project::{
2472 ProjectEnvironment,
2473 worktree_store::{WorktreeIdCounter, WorktreeStore},
2474 };
2475 use serde_json_lenient::Value;
2476 use util::{command::Command, paths::SanitizedPath};
2477
2478 #[cfg(not(target_os = "windows"))]
2479 use crate::docker::DockerComposeServicePort;
2480 use crate::{
2481 DevContainerConfig, DevContainerContext,
2482 command_json::CommandRunner,
2483 devcontainer_api::DevContainerError,
2484 devcontainer_json::MountDefinition,
2485 devcontainer_manifest::{
2486 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2487 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2488 image_from_dockerfile, resolve_compose_dockerfile,
2489 },
2490 docker::{
2491 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2492 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2493 DockerPs,
2494 },
2495 oci::TokenResponse,
2496 };
2497 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2498
2499 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2500 let buffer = futures::io::Cursor::new(Vec::new());
2501 let mut builder = async_tar::Builder::new(buffer);
2502 for (file_name, content) in content {
2503 if content.is_empty() {
2504 let mut header = async_tar::Header::new_gnu();
2505 header.set_size(0);
2506 header.set_mode(0o755);
2507 header.set_entry_type(async_tar::EntryType::Directory);
2508 header.set_cksum();
2509 builder
2510 .append_data(&mut header, file_name, &[] as &[u8])
2511 .await
2512 .unwrap();
2513 } else {
2514 let data = content.as_bytes();
2515 let mut header = async_tar::Header::new_gnu();
2516 header.set_size(data.len() as u64);
2517 header.set_mode(0o755);
2518 header.set_entry_type(async_tar::EntryType::Regular);
2519 header.set_cksum();
2520 builder
2521 .append_data(&mut header, file_name, data)
2522 .await
2523 .unwrap();
2524 }
2525 }
2526 let buffer = builder.into_inner().await.unwrap();
2527 buffer.into_inner()
2528 }
2529
2530 fn test_project_filename() -> String {
2531 PathBuf::from(TEST_PROJECT_PATH)
2532 .file_name()
2533 .expect("is valid")
2534 .display()
2535 .to_string()
2536 }
2537
2538 async fn init_devcontainer_config(
2539 fs: &Arc<FakeFs>,
2540 devcontainer_contents: &str,
2541 ) -> DevContainerConfig {
2542 fs.insert_tree(
2543 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2544 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2545 )
2546 .await;
2547
2548 DevContainerConfig::default_config()
2549 }
2550
2551 struct TestDependencies {
2552 fs: Arc<FakeFs>,
2553 _http_client: Arc<dyn HttpClient>,
2554 docker: Arc<FakeDocker>,
2555 command_runner: Arc<TestCommandRunner>,
2556 }
2557
2558 async fn init_default_devcontainer_manifest(
2559 cx: &mut TestAppContext,
2560 devcontainer_contents: &str,
2561 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2562 let fs = FakeFs::new(cx.executor());
2563 let http_client = fake_http_client();
2564 let command_runner = Arc::new(TestCommandRunner::new());
2565 let docker = Arc::new(FakeDocker::new());
2566 let environment = HashMap::new();
2567
2568 init_devcontainer_manifest(
2569 cx,
2570 fs,
2571 http_client,
2572 docker,
2573 command_runner,
2574 environment,
2575 devcontainer_contents,
2576 )
2577 .await
2578 }
2579
2580 async fn init_devcontainer_manifest(
2581 cx: &mut TestAppContext,
2582 fs: Arc<FakeFs>,
2583 http_client: Arc<dyn HttpClient>,
2584 docker_client: Arc<FakeDocker>,
2585 command_runner: Arc<TestCommandRunner>,
2586 environment: HashMap<String, String>,
2587 devcontainer_contents: &str,
2588 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2589 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2590 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2591 let worktree_store =
2592 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2593 let project_environment =
2594 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2595
2596 let context = DevContainerContext {
2597 project_directory: SanitizedPath::cast_arc(project_path),
2598 use_podman: false,
2599 fs: fs.clone(),
2600 http_client: http_client.clone(),
2601 environment: project_environment.downgrade(),
2602 };
2603
2604 let test_dependencies = TestDependencies {
2605 fs: fs.clone(),
2606 _http_client: http_client.clone(),
2607 docker: docker_client.clone(),
2608 command_runner: command_runner.clone(),
2609 };
2610 let manifest = DevContainerManifest::new(
2611 &context,
2612 environment,
2613 docker_client,
2614 command_runner,
2615 local_config,
2616 &PathBuf::from(TEST_PROJECT_PATH),
2617 )
2618 .await?;
2619
2620 Ok((test_dependencies, manifest))
2621 }
2622
2623 #[gpui::test]
2624 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2625 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2626 cx,
2627 r#"
2628// These are some external comments. serde_lenient should handle them
2629{
2630 // These are some internal comments
2631 "image": "image",
2632 "remoteUser": "root",
2633}
2634 "#,
2635 )
2636 .await
2637 .unwrap();
2638
2639 let mut metadata = HashMap::new();
2640 metadata.insert(
2641 "remoteUser".to_string(),
2642 serde_json_lenient::Value::String("vsCode".to_string()),
2643 );
2644 let given_docker_config = DockerInspect {
2645 id: "docker_id".to_string(),
2646 config: DockerInspectConfig {
2647 labels: DockerConfigLabels {
2648 metadata: Some(vec![metadata]),
2649 },
2650 image_user: None,
2651 env: Vec::new(),
2652 },
2653 mounts: None,
2654 state: None,
2655 };
2656
2657 let remote_user =
2658 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2659
2660 assert_eq!(remote_user, "root".to_string())
2661 }
2662
2663 #[gpui::test]
2664 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2665 let (_, devcontainer_manifest) =
2666 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2667 let mut metadata = HashMap::new();
2668 metadata.insert(
2669 "remoteUser".to_string(),
2670 serde_json_lenient::Value::String("vsCode".to_string()),
2671 );
2672 let given_docker_config = DockerInspect {
2673 id: "docker_id".to_string(),
2674 config: DockerInspectConfig {
2675 labels: DockerConfigLabels {
2676 metadata: Some(vec![metadata]),
2677 },
2678 image_user: None,
2679 env: Vec::new(),
2680 },
2681 mounts: None,
2682 state: None,
2683 };
2684
2685 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2686
2687 assert!(remote_user.is_ok());
2688 let remote_user = remote_user.expect("ok");
2689 assert_eq!(&remote_user, "vsCode")
2690 }
2691
2692 #[test]
2693 fn should_extract_feature_id_from_references() {
2694 assert_eq!(
2695 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2696 "aws-cli"
2697 );
2698 assert_eq!(
2699 extract_feature_id("ghcr.io/devcontainers/features/go"),
2700 "go"
2701 );
2702 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2703 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2704 assert_eq!(
2705 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2706 "rust"
2707 );
2708 }
2709
2710 #[gpui::test]
2711 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2712 let mut metadata = HashMap::new();
2713 metadata.insert(
2714 "remoteUser".to_string(),
2715 serde_json_lenient::Value::String("vsCode".to_string()),
2716 );
2717
2718 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2719 cx,
2720 r#"{
2721 "name": "TODO"
2722 }"#,
2723 )
2724 .await
2725 .unwrap();
2726 let build_resources = DockerBuildResources {
2727 image: DockerInspect {
2728 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2729 config: DockerInspectConfig {
2730 labels: DockerConfigLabels { metadata: None },
2731 image_user: None,
2732 env: Vec::new(),
2733 },
2734 mounts: None,
2735 state: None,
2736 },
2737 additional_mounts: vec![],
2738 privileged: false,
2739 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2740 };
2741 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2742
2743 assert!(docker_run_command.is_ok());
2744 let docker_run_command = docker_run_command.expect("ok");
2745
2746 assert_eq!(docker_run_command.get_program(), "docker");
2747 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2748 .join(".devcontainer")
2749 .join("devcontainer.json");
2750 let expected_config_file_label = expected_config_file_label.display();
2751 assert_eq!(
2752 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2753 vec![
2754 OsStr::new("run"),
2755 OsStr::new("--sig-proxy=false"),
2756 OsStr::new("-d"),
2757 OsStr::new("--mount"),
2758 OsStr::new(
2759 "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2760 ),
2761 OsStr::new("-l"),
2762 OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2763 OsStr::new("-l"),
2764 OsStr::new(&format!(
2765 "devcontainer.config_file={expected_config_file_label}"
2766 )),
2767 OsStr::new("--entrypoint"),
2768 OsStr::new("/bin/sh"),
2769 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2770 OsStr::new("-c"),
2771 OsStr::new(
2772 "
2773 echo Container started
2774 trap \"exit 0\" 15
2775 exec \"$@\"
2776 while sleep 1 & wait $!; do :; done
2777 "
2778 .trim()
2779 ),
2780 OsStr::new("-"),
2781 ]
2782 )
2783 }
2784
2785 #[gpui::test]
2786 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2787 // State where service not defined in dev container
2788 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2789 let given_docker_compose_config = DockerComposeResources {
2790 config: DockerComposeConfig {
2791 name: Some("devcontainers".to_string()),
2792 services: HashMap::new(),
2793 ..Default::default()
2794 },
2795 ..Default::default()
2796 };
2797
2798 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2799
2800 assert!(bad_result.is_err());
2801
2802 // State where service defined in devcontainer, not found in DockerCompose config
2803 let (_, given_dev_container) =
2804 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2805 .await
2806 .unwrap();
2807 let given_docker_compose_config = DockerComposeResources {
2808 config: DockerComposeConfig {
2809 name: Some("devcontainers".to_string()),
2810 services: HashMap::new(),
2811 ..Default::default()
2812 },
2813 ..Default::default()
2814 };
2815
2816 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2817
2818 assert!(bad_result.is_err());
2819 // State where service defined in devcontainer and in DockerCompose config
2820
2821 let (_, given_dev_container) =
2822 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2823 .await
2824 .unwrap();
2825 let given_docker_compose_config = DockerComposeResources {
2826 config: DockerComposeConfig {
2827 name: Some("devcontainers".to_string()),
2828 services: HashMap::from([(
2829 "found_service".to_string(),
2830 DockerComposeService {
2831 ..Default::default()
2832 },
2833 )]),
2834 ..Default::default()
2835 },
2836 ..Default::default()
2837 };
2838
2839 let (service_name, _) =
2840 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2841
2842 assert_eq!(service_name, "found_service".to_string());
2843 }
2844
2845 #[gpui::test]
2846 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2847 let fs = FakeFs::new(cx.executor());
2848 let given_devcontainer_contents = r#"
2849// These are some external comments. serde_lenient should handle them
2850{
2851 // These are some internal comments
2852 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2853 "name": "myDevContainer-${devcontainerId}",
2854 "remoteUser": "root",
2855 "remoteEnv": {
2856 "DEVCONTAINER_ID": "${devcontainerId}",
2857 "MYVAR2": "myvarothervalue",
2858 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2859 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2860 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2861 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2862 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2863 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2864
2865 }
2866}
2867 "#;
2868 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2869 cx,
2870 fs,
2871 fake_http_client(),
2872 Arc::new(FakeDocker::new()),
2873 Arc::new(TestCommandRunner::new()),
2874 HashMap::from([
2875 ("local_env_1".to_string(), "local_env_value1".to_string()),
2876 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2877 ]),
2878 given_devcontainer_contents,
2879 )
2880 .await
2881 .unwrap();
2882
2883 devcontainer_manifest.parse_nonremote_vars().unwrap();
2884
2885 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2886 &devcontainer_manifest.config
2887 else {
2888 panic!("Config not parsed");
2889 };
2890
2891 // ${devcontainerId}
2892 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2893 assert_eq!(
2894 variable_replaced_devcontainer.name,
2895 Some(format!("myDevContainer-{devcontainer_id}"))
2896 );
2897 assert_eq!(
2898 variable_replaced_devcontainer
2899 .remote_env
2900 .as_ref()
2901 .and_then(|env| env.get("DEVCONTAINER_ID")),
2902 Some(&devcontainer_id)
2903 );
2904
2905 // ${containerWorkspaceFolderBasename}
2906 assert_eq!(
2907 variable_replaced_devcontainer
2908 .remote_env
2909 .as_ref()
2910 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2911 Some(&test_project_filename())
2912 );
2913
2914 // ${localWorkspaceFolderBasename}
2915 assert_eq!(
2916 variable_replaced_devcontainer
2917 .remote_env
2918 .as_ref()
2919 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2920 Some(&test_project_filename())
2921 );
2922
2923 // ${containerWorkspaceFolder}
2924 assert_eq!(
2925 variable_replaced_devcontainer
2926 .remote_env
2927 .as_ref()
2928 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2929 Some(&format!("/workspaces/{}", test_project_filename()))
2930 );
2931
2932 // ${localWorkspaceFolder}
2933 assert_eq!(
2934 variable_replaced_devcontainer
2935 .remote_env
2936 .as_ref()
2937 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2938 Some(&TEST_PROJECT_PATH.to_string())
2939 );
2940
2941 // ${localEnv:VARIABLE_NAME}
2942 assert_eq!(
2943 variable_replaced_devcontainer
2944 .remote_env
2945 .as_ref()
2946 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2947 Some(&"local_env_value1".to_string())
2948 );
2949 assert_eq!(
2950 variable_replaced_devcontainer
2951 .remote_env
2952 .as_ref()
2953 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2954 Some(&"THISVALUEHERE".to_string())
2955 );
2956 }
2957
2958 #[gpui::test]
2959 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2960 let given_devcontainer_contents = r#"
2961 // These are some external comments. serde_lenient should handle them
2962 {
2963 // These are some internal comments
2964 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2965 "name": "myDevContainer-${devcontainerId}",
2966 "remoteUser": "root",
2967 "remoteEnv": {
2968 "DEVCONTAINER_ID": "${devcontainerId}",
2969 "MYVAR2": "myvarothervalue",
2970 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2971 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2972 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2973 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2974
2975 },
2976 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2977 "workspaceFolder": "/workspace/customfolder"
2978 }
2979 "#;
2980
2981 let (_, mut devcontainer_manifest) =
2982 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2983 .await
2984 .unwrap();
2985
2986 devcontainer_manifest.parse_nonremote_vars().unwrap();
2987
2988 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2989 &devcontainer_manifest.config
2990 else {
2991 panic!("Config not parsed");
2992 };
2993
2994 // ${devcontainerId}
2995 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2996 assert_eq!(
2997 variable_replaced_devcontainer.name,
2998 Some(format!("myDevContainer-{devcontainer_id}"))
2999 );
3000 assert_eq!(
3001 variable_replaced_devcontainer
3002 .remote_env
3003 .as_ref()
3004 .and_then(|env| env.get("DEVCONTAINER_ID")),
3005 Some(&devcontainer_id)
3006 );
3007
3008 // ${containerWorkspaceFolderBasename}
3009 assert_eq!(
3010 variable_replaced_devcontainer
3011 .remote_env
3012 .as_ref()
3013 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3014 Some(&"customfolder".to_string())
3015 );
3016
3017 // ${localWorkspaceFolderBasename}
3018 assert_eq!(
3019 variable_replaced_devcontainer
3020 .remote_env
3021 .as_ref()
3022 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3023 Some(&"project".to_string())
3024 );
3025
3026 // ${containerWorkspaceFolder}
3027 assert_eq!(
3028 variable_replaced_devcontainer
3029 .remote_env
3030 .as_ref()
3031 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3032 Some(&"/workspace/customfolder".to_string())
3033 );
3034
3035 // ${localWorkspaceFolder}
3036 assert_eq!(
3037 variable_replaced_devcontainer
3038 .remote_env
3039 .as_ref()
3040 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3041 Some(&TEST_PROJECT_PATH.to_string())
3042 );
3043 }
3044
3045 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3046 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3047 #[cfg(not(target_os = "windows"))]
3048 #[gpui::test]
3049 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3050 cx.executor().allow_parking();
3051 env_logger::try_init().ok();
3052 let given_devcontainer_contents = r#"
3053 /*---------------------------------------------------------------------------------------------
3054 * Copyright (c) Microsoft Corporation. All rights reserved.
3055 * Licensed under the MIT License. See License.txt in the project root for license information.
3056 *--------------------------------------------------------------------------------------------*/
3057 {
3058 "name": "cli-${devcontainerId}",
3059 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3060 "build": {
3061 "dockerfile": "Dockerfile",
3062 "args": {
3063 "VARIANT": "18-bookworm",
3064 "FOO": "bar",
3065 },
3066 },
3067 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3068 "workspaceFolder": "/workspace2",
3069 "mounts": [
3070 // Keep command history across instances
3071 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3072 ],
3073
3074 "runArgs": [
3075 "--cap-add=SYS_PTRACE",
3076 "--sig-proxy=true",
3077 ],
3078
3079 "forwardPorts": [
3080 8082,
3081 8083,
3082 ],
3083 "appPort": [
3084 8084,
3085 "8085:8086",
3086 ],
3087
3088 "containerEnv": {
3089 "VARIABLE_VALUE": "value",
3090 },
3091
3092 "initializeCommand": "touch IAM.md",
3093
3094 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3095
3096 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3097
3098 "postCreateCommand": {
3099 "yarn": "yarn install",
3100 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3101 },
3102
3103 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3104
3105 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3106
3107 "remoteUser": "node",
3108
3109 "remoteEnv": {
3110 "PATH": "${containerEnv:PATH}:/some/other/path",
3111 "OTHER_ENV": "other_env_value"
3112 },
3113
3114 "features": {
3115 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3116 "moby": false,
3117 },
3118 "ghcr.io/devcontainers/features/go:1": {},
3119 },
3120
3121 "customizations": {
3122 "vscode": {
3123 "extensions": [
3124 "dbaeumer.vscode-eslint",
3125 "GitHub.vscode-pull-request-github",
3126 ],
3127 },
3128 "zed": {
3129 "extensions": ["vue", "ruby"],
3130 },
3131 "codespaces": {
3132 "repositories": {
3133 "devcontainers/features": {
3134 "permissions": {
3135 "contents": "write",
3136 "workflows": "write",
3137 },
3138 },
3139 },
3140 },
3141 },
3142 }
3143 "#;
3144
3145 let (test_dependencies, mut devcontainer_manifest) =
3146 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3147 .await
3148 .unwrap();
3149
3150 test_dependencies
3151 .fs
3152 .atomic_write(
3153 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3154 r#"
3155# Copyright (c) Microsoft Corporation. All rights reserved.
3156# Licensed under the MIT License. See License.txt in the project root for license information.
3157ARG VARIANT="16-bullseye"
3158FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3159
3160RUN mkdir -p /workspaces && chown node:node /workspaces
3161
3162ARG USERNAME=node
3163USER $USERNAME
3164
3165# Save command line history
3166RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3167&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3168&& mkdir -p /home/$USERNAME/commandhistory \
3169&& touch /home/$USERNAME/commandhistory/.bash_history \
3170&& chown -R $USERNAME /home/$USERNAME/commandhistory
3171 "#.trim().to_string(),
3172 )
3173 .await
3174 .unwrap();
3175
3176 devcontainer_manifest.parse_nonremote_vars().unwrap();
3177
3178 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3179
3180 assert_eq!(
3181 devcontainer_up.extension_ids,
3182 vec!["vue".to_string(), "ruby".to_string()]
3183 );
3184
3185 let files = test_dependencies.fs.files();
3186 let feature_dockerfile = files
3187 .iter()
3188 .find(|f| {
3189 f.file_name()
3190 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3191 })
3192 .expect("to be found");
3193 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3194 assert_eq!(
3195 &feature_dockerfile,
3196 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3197
3198# Copyright (c) Microsoft Corporation. All rights reserved.
3199# Licensed under the MIT License. See License.txt in the project root for license information.
3200ARG VARIANT="16-bullseye"
3201FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3202
3203RUN mkdir -p /workspaces && chown node:node /workspaces
3204
3205ARG USERNAME=node
3206USER $USERNAME
3207
3208# Save command line history
3209RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3210&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3211&& mkdir -p /home/$USERNAME/commandhistory \
3212&& touch /home/$USERNAME/commandhistory/.bash_history \
3213&& chown -R $USERNAME /home/$USERNAME/commandhistory
3214FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3215
3216FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3217USER root
3218COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3219RUN chmod -R 0755 /tmp/build-features/
3220
3221FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3222
3223USER root
3224
3225RUN mkdir -p /tmp/dev-container-features
3226COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3227
3228RUN \
3229echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3230echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3231
3232
3233RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3234cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3235&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3236&& cd /tmp/dev-container-features/docker-in-docker_0 \
3237&& chmod +x ./devcontainer-features-install.sh \
3238&& ./devcontainer-features-install.sh \
3239&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3240
3241RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3242cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3243&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3244&& cd /tmp/dev-container-features/go_1 \
3245&& chmod +x ./devcontainer-features-install.sh \
3246&& ./devcontainer-features-install.sh \
3247&& rm -rf /tmp/dev-container-features/go_1
3248
3249
3250ARG _DEV_CONTAINERS_IMAGE_USER=root
3251USER $_DEV_CONTAINERS_IMAGE_USER
3252"#
3253 );
3254
3255 let uid_dockerfile = files
3256 .iter()
3257 .find(|f| {
3258 f.file_name()
3259 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3260 })
3261 .expect("to be found");
3262 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3263
3264 assert_eq!(
3265 &uid_dockerfile,
3266 r#"ARG BASE_IMAGE
3267FROM $BASE_IMAGE
3268
3269USER root
3270
3271ARG REMOTE_USER
3272ARG NEW_UID
3273ARG NEW_GID
3274SHELL ["/bin/sh", "-c"]
3275RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3276 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3277 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3278 if [ -z "$OLD_UID" ]; then \
3279 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3280 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3281 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3282 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3283 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3284 else \
3285 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3286 FREE_GID=65532; \
3287 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3288 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3289 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3290 fi; \
3291 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3292 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3293 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3294 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3295 fi; \
3296 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3297 fi;
3298
3299ARG IMAGE_USER
3300USER $IMAGE_USER
3301
3302# Ensure that /etc/profile does not clobber the existing path
3303RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3304
3305ENV DOCKER_BUILDKIT=1
3306
3307ENV GOPATH=/go
3308ENV GOROOT=/usr/local/go
3309ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3310ENV VARIABLE_VALUE=value
3311"#
3312 );
3313
3314 let golang_install_wrapper = files
3315 .iter()
3316 .find(|f| {
3317 f.file_name()
3318 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3319 && f.to_str().is_some_and(|s| s.contains("/go_"))
3320 })
3321 .expect("to be found");
3322 let golang_install_wrapper = test_dependencies
3323 .fs
3324 .load(golang_install_wrapper)
3325 .await
3326 .unwrap();
3327 assert_eq!(
3328 &golang_install_wrapper,
3329 r#"#!/bin/sh
3330set -e
3331
3332on_exit () {
3333 [ $? -eq 0 ] && exit
3334 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3335}
3336
3337trap on_exit EXIT
3338
3339echo ===========================================================================
3340echo 'Feature : go'
3341echo 'Id : ghcr.io/devcontainers/features/go:1'
3342echo 'Options :'
3343echo ' GOLANGCILINTVERSION=latest
3344 VERSION=latest'
3345echo ===========================================================================
3346
3347set -a
3348. ../devcontainer-features.builtin.env
3349. ./devcontainer-features.env
3350set +a
3351
3352chmod +x ./install.sh
3353./install.sh
3354"#
3355 );
3356
3357 let docker_commands = test_dependencies
3358 .command_runner
3359 .commands_by_program("docker");
3360
3361 let docker_run_command = docker_commands
3362 .iter()
3363 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3364 .expect("found");
3365
3366 assert_eq!(
3367 docker_run_command.args,
3368 vec![
3369 "run".to_string(),
3370 "--privileged".to_string(),
3371 "--cap-add=SYS_PTRACE".to_string(),
3372 "--sig-proxy=true".to_string(),
3373 "-d".to_string(),
3374 "--mount".to_string(),
3375 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3376 "--mount".to_string(),
3377 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3378 "--mount".to_string(),
3379 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3380 "-l".to_string(),
3381 "devcontainer.local_folder=/path/to/local/project".to_string(),
3382 "-l".to_string(),
3383 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3384 "-l".to_string(),
3385 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3386 "-p".to_string(),
3387 "8082:8082".to_string(),
3388 "-p".to_string(),
3389 "8083:8083".to_string(),
3390 "-p".to_string(),
3391 "8084:8084".to_string(),
3392 "-p".to_string(),
3393 "8085:8086".to_string(),
3394 "--entrypoint".to_string(),
3395 "/bin/sh".to_string(),
3396 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3397 "-c".to_string(),
3398 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3399 "-".to_string()
3400 ]
3401 );
3402
3403 let docker_exec_commands = test_dependencies
3404 .docker
3405 .exec_commands_recorded
3406 .lock()
3407 .unwrap();
3408
3409 assert!(docker_exec_commands.iter().all(|exec| {
3410 exec.env
3411 == HashMap::from([
3412 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3413 (
3414 "PATH".to_string(),
3415 "/initial/path:/some/other/path".to_string(),
3416 ),
3417 ])
3418 }))
3419 }
3420
3421 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3422 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3423 #[cfg(not(target_os = "windows"))]
3424 #[gpui::test]
3425 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3426 cx.executor().allow_parking();
3427 env_logger::try_init().ok();
3428 let given_devcontainer_contents = r#"
3429 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3430 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3431 {
3432 "features": {
3433 "ghcr.io/devcontainers/features/aws-cli:1": {},
3434 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3435 },
3436 "name": "Rust and PostgreSQL",
3437 "dockerComposeFile": "docker-compose.yml",
3438 "service": "app",
3439 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3440
3441 // Features to add to the dev container. More info: https://containers.dev/features.
3442 // "features": {},
3443
3444 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3445 "forwardPorts": [
3446 8083,
3447 "db:5432",
3448 "db:1234",
3449 ],
3450
3451 // Use 'postCreateCommand' to run commands after the container is created.
3452 // "postCreateCommand": "rustc --version",
3453
3454 // Configure tool-specific properties.
3455 // "customizations": {},
3456
3457 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3458 // "remoteUser": "root"
3459 }
3460 "#;
3461 let (test_dependencies, mut devcontainer_manifest) =
3462 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3463 .await
3464 .unwrap();
3465
3466 test_dependencies
3467 .fs
3468 .atomic_write(
3469 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3470 r#"
3471version: '3.8'
3472
3473volumes:
3474 postgres-data:
3475
3476services:
3477 app:
3478 build:
3479 context: .
3480 dockerfile: Dockerfile
3481 env_file:
3482 # Ensure that the variables in .env match the same variables in devcontainer.json
3483 - .env
3484
3485 volumes:
3486 - ../..:/workspaces:cached
3487
3488 # Overrides default command so things don't shut down after the process ends.
3489 command: sleep infinity
3490
3491 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3492 network_mode: service:db
3493
3494 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3495 # (Adding the "ports" property to this file will not forward from a Codespace.)
3496
3497 db:
3498 image: postgres:14.1
3499 restart: unless-stopped
3500 volumes:
3501 - postgres-data:/var/lib/postgresql/data
3502 env_file:
3503 # Ensure that the variables in .env match the same variables in devcontainer.json
3504 - .env
3505
3506 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3507 # (Adding the "ports" property to this file will not forward from a Codespace.)
3508 "#.trim().to_string(),
3509 )
3510 .await
3511 .unwrap();
3512
3513 test_dependencies.fs.atomic_write(
3514 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3515 r#"
3516FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3517
3518# Include lld linker to improve build times either by using environment variable
3519# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3520RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3521 && apt-get -y install clang lld \
3522 && apt-get autoremove -y && apt-get clean -y
3523 "#.trim().to_string()).await.unwrap();
3524
3525 devcontainer_manifest.parse_nonremote_vars().unwrap();
3526
3527 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3528
3529 let files = test_dependencies.fs.files();
3530 let feature_dockerfile = files
3531 .iter()
3532 .find(|f| {
3533 f.file_name()
3534 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3535 })
3536 .expect("to be found");
3537 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3538 assert_eq!(
3539 &feature_dockerfile,
3540 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3541
3542FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3543
3544# Include lld linker to improve build times either by using environment variable
3545# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3546RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3547 && apt-get -y install clang lld \
3548 && apt-get autoremove -y && apt-get clean -y
3549FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3550
3551FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3552USER root
3553COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3554RUN chmod -R 0755 /tmp/build-features/
3555
3556FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3557
3558USER root
3559
3560RUN mkdir -p /tmp/dev-container-features
3561COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3562
3563RUN \
3564echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3565echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3566
3567
3568RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3569cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3570&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3571&& cd /tmp/dev-container-features/aws-cli_0 \
3572&& chmod +x ./devcontainer-features-install.sh \
3573&& ./devcontainer-features-install.sh \
3574&& rm -rf /tmp/dev-container-features/aws-cli_0
3575
3576RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3577cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3578&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3579&& cd /tmp/dev-container-features/docker-in-docker_1 \
3580&& chmod +x ./devcontainer-features-install.sh \
3581&& ./devcontainer-features-install.sh \
3582&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3583
3584
3585ARG _DEV_CONTAINERS_IMAGE_USER=root
3586USER $_DEV_CONTAINERS_IMAGE_USER
3587"#
3588 );
3589
3590 let uid_dockerfile = files
3591 .iter()
3592 .find(|f| {
3593 f.file_name()
3594 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3595 })
3596 .expect("to be found");
3597 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3598
3599 assert_eq!(
3600 &uid_dockerfile,
3601 r#"ARG BASE_IMAGE
3602FROM $BASE_IMAGE
3603
3604USER root
3605
3606ARG REMOTE_USER
3607ARG NEW_UID
3608ARG NEW_GID
3609SHELL ["/bin/sh", "-c"]
3610RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3611 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3612 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3613 if [ -z "$OLD_UID" ]; then \
3614 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3615 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3616 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3617 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3618 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3619 else \
3620 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3621 FREE_GID=65532; \
3622 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3623 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3624 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3625 fi; \
3626 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3627 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3628 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3629 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3630 fi; \
3631 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3632 fi;
3633
3634ARG IMAGE_USER
3635USER $IMAGE_USER
3636
3637# Ensure that /etc/profile does not clobber the existing path
3638RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3639
3640
3641ENV DOCKER_BUILDKIT=1
3642"#
3643 );
3644
3645 let build_override = files
3646 .iter()
3647 .find(|f| {
3648 f.file_name()
3649 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3650 })
3651 .expect("to be found");
3652 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3653 let build_config: DockerComposeConfig =
3654 serde_json_lenient::from_str(&build_override).unwrap();
3655 let build_context = build_config
3656 .services
3657 .get("app")
3658 .and_then(|s| s.build.as_ref())
3659 .and_then(|b| b.context.clone())
3660 .expect("build override should have a context");
3661 assert_eq!(
3662 build_context, ".",
3663 "build override should preserve the original build context from docker-compose.yml"
3664 );
3665
3666 let runtime_override = files
3667 .iter()
3668 .find(|f| {
3669 f.file_name()
3670 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3671 })
3672 .expect("to be found");
3673 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3674
3675 let expected_runtime_override = DockerComposeConfig {
3676 name: None,
3677 services: HashMap::from([
3678 (
3679 "app".to_string(),
3680 DockerComposeService {
3681 entrypoint: Some(vec![
3682 "/bin/sh".to_string(),
3683 "-c".to_string(),
3684 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3685 "-".to_string(),
3686 ]),
3687 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3688 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3689 privileged: Some(true),
3690 labels: Some(HashMap::from([
3691 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3692 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3693 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3694 ])),
3695 volumes: vec![
3696 MountDefinition {
3697 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3698 target: "/var/lib/docker".to_string(),
3699 mount_type: Some("volume".to_string())
3700 }
3701 ],
3702 ..Default::default()
3703 },
3704 ),
3705 (
3706 "db".to_string(),
3707 DockerComposeService {
3708 ports: vec![
3709 DockerComposeServicePort {
3710 target: "8083".to_string(),
3711 published: "8083".to_string(),
3712 ..Default::default()
3713 },
3714 DockerComposeServicePort {
3715 target: "5432".to_string(),
3716 published: "5432".to_string(),
3717 ..Default::default()
3718 },
3719 DockerComposeServicePort {
3720 target: "1234".to_string(),
3721 published: "1234".to_string(),
3722 ..Default::default()
3723 },
3724 ],
3725 ..Default::default()
3726 },
3727 ),
3728 ]),
3729 volumes: HashMap::from([(
3730 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3731 DockerComposeVolume {
3732 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3733 },
3734 )]),
3735 };
3736
3737 assert_eq!(
3738 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3739 expected_runtime_override
3740 )
3741 }
3742
3743 #[test]
3744 fn test_resolve_compose_dockerfile() {
3745 let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3746
3747 // Bug case (#53473): context ".." with relative dockerfile
3748 assert_eq!(
3749 resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3750 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3751 );
3752
3753 // Compose path containing ".." (as docker_compose_manifest() produces)
3754 assert_eq!(
3755 resolve_compose_dockerfile(
3756 Path::new("/project/.devcontainer/../docker-compose.yml"),
3757 Some("."),
3758 "docker/Dockerfile",
3759 ),
3760 Some(PathBuf::from("/project/docker/Dockerfile")),
3761 );
3762
3763 // Absolute dockerfile returned as-is
3764 assert_eq!(
3765 resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3766 Some(PathBuf::from("/absolute/Dockerfile")),
3767 );
3768
3769 // Absolute context used directly
3770 assert_eq!(
3771 resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3772 Some(PathBuf::from("/abs/context/Dockerfile")),
3773 );
3774
3775 // No context defaults to compose file's directory
3776 assert_eq!(
3777 resolve_compose_dockerfile(compose, None, "Dockerfile"),
3778 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3779 );
3780 }
3781
3782 #[gpui::test]
3783 async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
3784 cx.executor().allow_parking();
3785 env_logger::try_init().ok();
3786
3787 let given_devcontainer_contents = r#"
3788 {
3789 "name": "Test",
3790 "dockerComposeFile": "docker-compose-context-parent.yml",
3791 "service": "app",
3792 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
3793 }
3794 "#;
3795 let (_, mut devcontainer_manifest) =
3796 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3797 .await
3798 .unwrap();
3799
3800 devcontainer_manifest.parse_nonremote_vars().unwrap();
3801
3802 let expected = PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile");
3803 assert_eq!(
3804 devcontainer_manifest.dockerfile_location().await,
3805 Some(expected)
3806 );
3807 }
3808
3809 #[gpui::test]
3810 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3811 cx: &mut TestAppContext,
3812 ) {
3813 cx.executor().allow_parking();
3814 env_logger::try_init().ok();
3815 let given_devcontainer_contents = r#"
3816 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3817 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3818 {
3819 "features": {
3820 "ghcr.io/devcontainers/features/aws-cli:1": {},
3821 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3822 },
3823 "name": "Rust and PostgreSQL",
3824 "dockerComposeFile": "docker-compose.yml",
3825 "service": "app",
3826 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3827
3828 // Features to add to the dev container. More info: https://containers.dev/features.
3829 // "features": {},
3830
3831 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3832 "forwardPorts": [
3833 8083,
3834 "db:5432",
3835 "db:1234",
3836 ],
3837 "updateRemoteUserUID": false,
3838 "appPort": "8084",
3839
3840 // Use 'postCreateCommand' to run commands after the container is created.
3841 // "postCreateCommand": "rustc --version",
3842
3843 // Configure tool-specific properties.
3844 // "customizations": {},
3845
3846 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3847 // "remoteUser": "root"
3848 }
3849 "#;
3850 let (test_dependencies, mut devcontainer_manifest) =
3851 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3852 .await
3853 .unwrap();
3854
3855 test_dependencies
3856 .fs
3857 .atomic_write(
3858 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3859 r#"
3860version: '3.8'
3861
3862volumes:
3863postgres-data:
3864
3865services:
3866app:
3867 build:
3868 context: .
3869 dockerfile: Dockerfile
3870 env_file:
3871 # Ensure that the variables in .env match the same variables in devcontainer.json
3872 - .env
3873
3874 volumes:
3875 - ../..:/workspaces:cached
3876
3877 # Overrides default command so things don't shut down after the process ends.
3878 command: sleep infinity
3879
3880 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3881 network_mode: service:db
3882
3883 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3884 # (Adding the "ports" property to this file will not forward from a Codespace.)
3885
3886db:
3887 image: postgres:14.1
3888 restart: unless-stopped
3889 volumes:
3890 - postgres-data:/var/lib/postgresql/data
3891 env_file:
3892 # Ensure that the variables in .env match the same variables in devcontainer.json
3893 - .env
3894
3895 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3896 # (Adding the "ports" property to this file will not forward from a Codespace.)
3897 "#.trim().to_string(),
3898 )
3899 .await
3900 .unwrap();
3901
3902 test_dependencies.fs.atomic_write(
3903 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3904 r#"
3905FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3906
3907# Include lld linker to improve build times either by using environment variable
3908# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3909RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3910&& apt-get -y install clang lld \
3911&& apt-get autoremove -y && apt-get clean -y
3912 "#.trim().to_string()).await.unwrap();
3913
3914 devcontainer_manifest.parse_nonremote_vars().unwrap();
3915
3916 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3917
3918 let files = test_dependencies.fs.files();
3919 let feature_dockerfile = files
3920 .iter()
3921 .find(|f| {
3922 f.file_name()
3923 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3924 })
3925 .expect("to be found");
3926 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3927 assert_eq!(
3928 &feature_dockerfile,
3929 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3930
3931FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3932
3933# Include lld linker to improve build times either by using environment variable
3934# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3935RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3936&& apt-get -y install clang lld \
3937&& apt-get autoremove -y && apt-get clean -y
3938FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3939
3940FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3941USER root
3942COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3943RUN chmod -R 0755 /tmp/build-features/
3944
3945FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3946
3947USER root
3948
3949RUN mkdir -p /tmp/dev-container-features
3950COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3951
3952RUN \
3953echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3954echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3955
3956
3957RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3958cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3959&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3960&& cd /tmp/dev-container-features/aws-cli_0 \
3961&& chmod +x ./devcontainer-features-install.sh \
3962&& ./devcontainer-features-install.sh \
3963&& rm -rf /tmp/dev-container-features/aws-cli_0
3964
3965RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3966cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3967&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3968&& cd /tmp/dev-container-features/docker-in-docker_1 \
3969&& chmod +x ./devcontainer-features-install.sh \
3970&& ./devcontainer-features-install.sh \
3971&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3972
3973
3974ARG _DEV_CONTAINERS_IMAGE_USER=root
3975USER $_DEV_CONTAINERS_IMAGE_USER
3976
3977# Ensure that /etc/profile does not clobber the existing path
3978RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3979
3980
3981ENV DOCKER_BUILDKIT=1
3982"#
3983 );
3984 }
3985
3986 #[cfg(not(target_os = "windows"))]
3987 #[gpui::test]
3988 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3989 cx.executor().allow_parking();
3990 env_logger::try_init().ok();
3991 let given_devcontainer_contents = r#"
3992 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3993 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3994 {
3995 "features": {
3996 "ghcr.io/devcontainers/features/aws-cli:1": {},
3997 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3998 },
3999 "name": "Rust and PostgreSQL",
4000 "dockerComposeFile": "docker-compose.yml",
4001 "service": "app",
4002 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4003
4004 // Features to add to the dev container. More info: https://containers.dev/features.
4005 // "features": {},
4006
4007 // Use 'forwardPorts' to make a list of ports inside the container available locally.
4008 // "forwardPorts": [5432],
4009
4010 // Use 'postCreateCommand' to run commands after the container is created.
4011 // "postCreateCommand": "rustc --version",
4012
4013 // Configure tool-specific properties.
4014 // "customizations": {},
4015
4016 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4017 // "remoteUser": "root"
4018 }
4019 "#;
4020 let mut fake_docker = FakeDocker::new();
4021 fake_docker.set_podman(true);
4022 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4023 cx,
4024 FakeFs::new(cx.executor()),
4025 fake_http_client(),
4026 Arc::new(fake_docker),
4027 Arc::new(TestCommandRunner::new()),
4028 HashMap::new(),
4029 given_devcontainer_contents,
4030 )
4031 .await
4032 .unwrap();
4033
4034 test_dependencies
4035 .fs
4036 .atomic_write(
4037 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4038 r#"
4039version: '3.8'
4040
4041volumes:
4042postgres-data:
4043
4044services:
4045app:
4046build:
4047 context: .
4048 dockerfile: Dockerfile
4049env_file:
4050 # Ensure that the variables in .env match the same variables in devcontainer.json
4051 - .env
4052
4053volumes:
4054 - ../..:/workspaces:cached
4055
4056# Overrides default command so things don't shut down after the process ends.
4057command: sleep infinity
4058
4059# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4060network_mode: service:db
4061
4062# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4063# (Adding the "ports" property to this file will not forward from a Codespace.)
4064
4065db:
4066image: postgres:14.1
4067restart: unless-stopped
4068volumes:
4069 - postgres-data:/var/lib/postgresql/data
4070env_file:
4071 # Ensure that the variables in .env match the same variables in devcontainer.json
4072 - .env
4073
4074# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4075# (Adding the "ports" property to this file will not forward from a Codespace.)
4076 "#.trim().to_string(),
4077 )
4078 .await
4079 .unwrap();
4080
4081 test_dependencies.fs.atomic_write(
4082 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4083 r#"
4084FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4085
4086# Include lld linker to improve build times either by using environment variable
4087# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4088RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4089&& apt-get -y install clang lld \
4090&& apt-get autoremove -y && apt-get clean -y
4091 "#.trim().to_string()).await.unwrap();
4092
4093 devcontainer_manifest.parse_nonremote_vars().unwrap();
4094
4095 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4096
4097 let files = test_dependencies.fs.files();
4098
4099 let feature_dockerfile = files
4100 .iter()
4101 .find(|f| {
4102 f.file_name()
4103 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4104 })
4105 .expect("to be found");
4106 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4107 assert_eq!(
4108 &feature_dockerfile,
4109 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4110
4111FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4112
4113# Include lld linker to improve build times either by using environment variable
4114# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4115RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4116&& apt-get -y install clang lld \
4117&& apt-get autoremove -y && apt-get clean -y
4118FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4119
4120FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4121
4122FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4123USER root
4124COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4125RUN chmod -R 0755 /tmp/build-features/
4126
4127FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4128
4129USER root
4130
4131RUN mkdir -p /tmp/dev-container-features
4132COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4133
4134RUN \
4135echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4136echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4137
4138
4139COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4140RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4141&& cd /tmp/dev-container-features/aws-cli_0 \
4142&& chmod +x ./devcontainer-features-install.sh \
4143&& ./devcontainer-features-install.sh
4144
4145COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4146RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4147&& cd /tmp/dev-container-features/docker-in-docker_1 \
4148&& chmod +x ./devcontainer-features-install.sh \
4149&& ./devcontainer-features-install.sh
4150
4151
4152ARG _DEV_CONTAINERS_IMAGE_USER=root
4153USER $_DEV_CONTAINERS_IMAGE_USER
4154"#
4155 );
4156
4157 let uid_dockerfile = files
4158 .iter()
4159 .find(|f| {
4160 f.file_name()
4161 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4162 })
4163 .expect("to be found");
4164 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4165
4166 assert_eq!(
4167 &uid_dockerfile,
4168 r#"ARG BASE_IMAGE
4169FROM $BASE_IMAGE
4170
4171USER root
4172
4173ARG REMOTE_USER
4174ARG NEW_UID
4175ARG NEW_GID
4176SHELL ["/bin/sh", "-c"]
4177RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4178 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4179 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4180 if [ -z "$OLD_UID" ]; then \
4181 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4182 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4183 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4184 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4185 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4186 else \
4187 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4188 FREE_GID=65532; \
4189 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4190 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4191 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4192 fi; \
4193 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4194 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4195 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4196 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4197 fi; \
4198 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4199 fi;
4200
4201ARG IMAGE_USER
4202USER $IMAGE_USER
4203
4204# Ensure that /etc/profile does not clobber the existing path
4205RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4206
4207
4208ENV DOCKER_BUILDKIT=1
4209"#
4210 );
4211 }
4212
4213 #[gpui::test]
4214 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4215 cx.executor().allow_parking();
4216 env_logger::try_init().ok();
4217 let given_devcontainer_contents = r#"
4218 /*---------------------------------------------------------------------------------------------
4219 * Copyright (c) Microsoft Corporation. All rights reserved.
4220 * Licensed under the MIT License. See License.txt in the project root for license information.
4221 *--------------------------------------------------------------------------------------------*/
4222 {
4223 "name": "cli-${devcontainerId}",
4224 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4225 "build": {
4226 "dockerfile": "Dockerfile",
4227 "args": {
4228 "VARIANT": "18-bookworm",
4229 "FOO": "bar",
4230 },
4231 "target": "development",
4232 },
4233 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4234 "workspaceFolder": "/workspace2",
4235 "mounts": [
4236 // Keep command history across instances
4237 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4238 ],
4239
4240 "forwardPorts": [
4241 8082,
4242 8083,
4243 ],
4244 "appPort": "8084",
4245 "updateRemoteUserUID": false,
4246
4247 "containerEnv": {
4248 "VARIABLE_VALUE": "value",
4249 },
4250
4251 "initializeCommand": "touch IAM.md",
4252
4253 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4254
4255 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4256
4257 "postCreateCommand": {
4258 "yarn": "yarn install",
4259 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4260 },
4261
4262 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4263
4264 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4265
4266 "remoteUser": "node",
4267
4268 "remoteEnv": {
4269 "PATH": "${containerEnv:PATH}:/some/other/path",
4270 "OTHER_ENV": "other_env_value"
4271 },
4272
4273 "features": {
4274 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4275 "moby": false,
4276 },
4277 "ghcr.io/devcontainers/features/go:1": {},
4278 },
4279
4280 "customizations": {
4281 "vscode": {
4282 "extensions": [
4283 "dbaeumer.vscode-eslint",
4284 "GitHub.vscode-pull-request-github",
4285 ],
4286 },
4287 "zed": {
4288 "extensions": ["vue", "ruby"],
4289 },
4290 "codespaces": {
4291 "repositories": {
4292 "devcontainers/features": {
4293 "permissions": {
4294 "contents": "write",
4295 "workflows": "write",
4296 },
4297 },
4298 },
4299 },
4300 },
4301 }
4302 "#;
4303
4304 let (test_dependencies, mut devcontainer_manifest) =
4305 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4306 .await
4307 .unwrap();
4308
4309 test_dependencies
4310 .fs
4311 .atomic_write(
4312 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4313 r#"
4314# Copyright (c) Microsoft Corporation. All rights reserved.
4315# Licensed under the MIT License. See License.txt in the project root for license information.
4316ARG VARIANT="16-bullseye"
4317FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4318FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4319
4320RUN mkdir -p /workspaces && chown node:node /workspaces
4321
4322ARG USERNAME=node
4323USER $USERNAME
4324
4325# Save command line history
4326RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4327&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4328&& mkdir -p /home/$USERNAME/commandhistory \
4329&& touch /home/$USERNAME/commandhistory/.bash_history \
4330&& chown -R $USERNAME /home/$USERNAME/commandhistory
4331 "#.trim().to_string(),
4332 )
4333 .await
4334 .unwrap();
4335
4336 devcontainer_manifest.parse_nonremote_vars().unwrap();
4337
4338 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4339
4340 assert_eq!(
4341 devcontainer_up.extension_ids,
4342 vec!["vue".to_string(), "ruby".to_string()]
4343 );
4344
4345 let files = test_dependencies.fs.files();
4346 let feature_dockerfile = files
4347 .iter()
4348 .find(|f| {
4349 f.file_name()
4350 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4351 })
4352 .expect("to be found");
4353 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4354 assert_eq!(
4355 &feature_dockerfile,
4356 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4357
4358# Copyright (c) Microsoft Corporation. All rights reserved.
4359# Licensed under the MIT License. See License.txt in the project root for license information.
4360ARG VARIANT="16-bullseye"
4361FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4362FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4363
4364RUN mkdir -p /workspaces && chown node:node /workspaces
4365
4366ARG USERNAME=node
4367USER $USERNAME
4368
4369# Save command line history
4370RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4371&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4372&& mkdir -p /home/$USERNAME/commandhistory \
4373&& touch /home/$USERNAME/commandhistory/.bash_history \
4374&& chown -R $USERNAME /home/$USERNAME/commandhistory
4375FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4376
4377FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4378USER root
4379COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4380RUN chmod -R 0755 /tmp/build-features/
4381
4382FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4383
4384USER root
4385
4386RUN mkdir -p /tmp/dev-container-features
4387COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4388
4389RUN \
4390echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4391echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4392
4393
4394RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4395cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4396&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4397&& cd /tmp/dev-container-features/docker-in-docker_0 \
4398&& chmod +x ./devcontainer-features-install.sh \
4399&& ./devcontainer-features-install.sh \
4400&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4401
4402RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4403cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4404&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4405&& cd /tmp/dev-container-features/go_1 \
4406&& chmod +x ./devcontainer-features-install.sh \
4407&& ./devcontainer-features-install.sh \
4408&& rm -rf /tmp/dev-container-features/go_1
4409
4410
4411ARG _DEV_CONTAINERS_IMAGE_USER=root
4412USER $_DEV_CONTAINERS_IMAGE_USER
4413
4414# Ensure that /etc/profile does not clobber the existing path
4415RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4416
4417ENV DOCKER_BUILDKIT=1
4418
4419ENV GOPATH=/go
4420ENV GOROOT=/usr/local/go
4421ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4422ENV VARIABLE_VALUE=value
4423"#
4424 );
4425
4426 let golang_install_wrapper = files
4427 .iter()
4428 .find(|f| {
4429 f.file_name()
4430 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4431 && f.to_str().is_some_and(|s| s.contains("go_"))
4432 })
4433 .expect("to be found");
4434 let golang_install_wrapper = test_dependencies
4435 .fs
4436 .load(golang_install_wrapper)
4437 .await
4438 .unwrap();
4439 assert_eq!(
4440 &golang_install_wrapper,
4441 r#"#!/bin/sh
4442set -e
4443
4444on_exit () {
4445 [ $? -eq 0 ] && exit
4446 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4447}
4448
4449trap on_exit EXIT
4450
4451echo ===========================================================================
4452echo 'Feature : go'
4453echo 'Id : ghcr.io/devcontainers/features/go:1'
4454echo 'Options :'
4455echo ' GOLANGCILINTVERSION=latest
4456 VERSION=latest'
4457echo ===========================================================================
4458
4459set -a
4460. ../devcontainer-features.builtin.env
4461. ./devcontainer-features.env
4462set +a
4463
4464chmod +x ./install.sh
4465./install.sh
4466"#
4467 );
4468
4469 let docker_commands = test_dependencies
4470 .command_runner
4471 .commands_by_program("docker");
4472
4473 let docker_run_command = docker_commands
4474 .iter()
4475 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4476
4477 assert!(docker_run_command.is_some());
4478
4479 let docker_exec_commands = test_dependencies
4480 .docker
4481 .exec_commands_recorded
4482 .lock()
4483 .unwrap();
4484
4485 assert!(docker_exec_commands.iter().all(|exec| {
4486 exec.env
4487 == HashMap::from([
4488 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4489 (
4490 "PATH".to_string(),
4491 "/initial/path:/some/other/path".to_string(),
4492 ),
4493 ])
4494 }))
4495 }
4496
4497 #[cfg(not(target_os = "windows"))]
4498 #[gpui::test]
4499 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4500 cx.executor().allow_parking();
4501 env_logger::try_init().ok();
4502 let given_devcontainer_contents = r#"
4503 {
4504 "name": "cli-${devcontainerId}",
4505 "image": "test_image:latest",
4506 }
4507 "#;
4508
4509 let (test_dependencies, mut devcontainer_manifest) =
4510 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4511 .await
4512 .unwrap();
4513
4514 devcontainer_manifest.parse_nonremote_vars().unwrap();
4515
4516 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4517
4518 let files = test_dependencies.fs.files();
4519 let uid_dockerfile = files
4520 .iter()
4521 .find(|f| {
4522 f.file_name()
4523 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4524 })
4525 .expect("to be found");
4526 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4527
4528 assert_eq!(
4529 &uid_dockerfile,
4530 r#"ARG BASE_IMAGE
4531FROM $BASE_IMAGE
4532
4533USER root
4534
4535ARG REMOTE_USER
4536ARG NEW_UID
4537ARG NEW_GID
4538SHELL ["/bin/sh", "-c"]
4539RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4540 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4541 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4542 if [ -z "$OLD_UID" ]; then \
4543 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4544 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4545 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4546 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4547 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4548 else \
4549 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4550 FREE_GID=65532; \
4551 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4552 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4553 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4554 fi; \
4555 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4556 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4557 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4558 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4559 fi; \
4560 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4561 fi;
4562
4563ARG IMAGE_USER
4564USER $IMAGE_USER
4565
4566# Ensure that /etc/profile does not clobber the existing path
4567RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4568"#
4569 );
4570 }
4571
4572 #[cfg(not(target_os = "windows"))]
4573 #[gpui::test]
4574 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4575 cx.executor().allow_parking();
4576 env_logger::try_init().ok();
4577 let given_devcontainer_contents = r#"
4578 {
4579 "name": "cli-${devcontainerId}",
4580 "dockerComposeFile": "docker-compose-plain.yml",
4581 "service": "app",
4582 }
4583 "#;
4584
4585 let (test_dependencies, mut devcontainer_manifest) =
4586 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4587 .await
4588 .unwrap();
4589
4590 test_dependencies
4591 .fs
4592 .atomic_write(
4593 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4594 r#"
4595services:
4596 app:
4597 image: test_image:latest
4598 command: sleep infinity
4599 volumes:
4600 - ..:/workspace:cached
4601 "#
4602 .trim()
4603 .to_string(),
4604 )
4605 .await
4606 .unwrap();
4607
4608 devcontainer_manifest.parse_nonremote_vars().unwrap();
4609
4610 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4611
4612 let files = test_dependencies.fs.files();
4613 let uid_dockerfile = files
4614 .iter()
4615 .find(|f| {
4616 f.file_name()
4617 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4618 })
4619 .expect("to be found");
4620 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4621
4622 assert_eq!(
4623 &uid_dockerfile,
4624 r#"ARG BASE_IMAGE
4625FROM $BASE_IMAGE
4626
4627USER root
4628
4629ARG REMOTE_USER
4630ARG NEW_UID
4631ARG NEW_GID
4632SHELL ["/bin/sh", "-c"]
4633RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4634 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4635 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4636 if [ -z "$OLD_UID" ]; then \
4637 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4638 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4639 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4640 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4641 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4642 else \
4643 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4644 FREE_GID=65532; \
4645 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4646 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4647 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4648 fi; \
4649 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4650 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4651 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4652 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4653 fi; \
4654 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4655 fi;
4656
4657ARG IMAGE_USER
4658USER $IMAGE_USER
4659
4660# Ensure that /etc/profile does not clobber the existing path
4661RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4662"#
4663 );
4664 }
4665
4666 #[gpui::test]
4667 async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4668 cx.executor().allow_parking();
4669 env_logger::try_init().ok();
4670 let given_devcontainer_contents = r#"
4671 {
4672 "name": "cli-${devcontainerId}",
4673 "build": {
4674 "dockerfile": "Dockerfile",
4675 "args": {
4676 "VERSION": "1.22",
4677 }
4678 },
4679 }
4680 "#;
4681
4682 let (test_dependencies, mut devcontainer_manifest) =
4683 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4684 .await
4685 .unwrap();
4686
4687 test_dependencies
4688 .fs
4689 .atomic_write(
4690 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4691 r#"
4692FROM dontgrabme as build_context
4693ARG VERSION=1.21
4694ARG REPOSITORY=mybuild
4695ARG REGISTRY=docker.io/stuff
4696
4697ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4698
4699FROM ${IMAGE} AS devcontainer
4700 "#
4701 .trim()
4702 .to_string(),
4703 )
4704 .await
4705 .unwrap();
4706
4707 devcontainer_manifest.parse_nonremote_vars().unwrap();
4708
4709 let dockerfile_contents = devcontainer_manifest
4710 .expanded_dockerfile_content()
4711 .await
4712 .unwrap();
4713 let base_image = image_from_dockerfile(
4714 dockerfile_contents,
4715 &devcontainer_manifest
4716 .dev_container()
4717 .build
4718 .as_ref()
4719 .and_then(|b| b.target.clone()),
4720 )
4721 .unwrap();
4722
4723 assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4724 }
4725
4726 #[gpui::test]
4727 async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4728 cx.executor().allow_parking();
4729 env_logger::try_init().ok();
4730 let given_devcontainer_contents = r#"
4731 {
4732 "name": "cli-${devcontainerId}",
4733 "build": {
4734 "dockerfile": "Dockerfile",
4735 "args": {
4736 "VERSION": "1.22",
4737 },
4738 "target": "development"
4739 },
4740 }
4741 "#;
4742
4743 let (test_dependencies, mut devcontainer_manifest) =
4744 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4745 .await
4746 .unwrap();
4747
4748 test_dependencies
4749 .fs
4750 .atomic_write(
4751 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4752 r#"
4753FROM dontgrabme as build_context
4754ARG VERSION=1.21
4755ARG REPOSITORY=mybuild
4756ARG REGISTRY=docker.io/stuff
4757
4758ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4759ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4760
4761FROM ${DEV_IMAGE} AS development
4762FROM ${IMAGE} AS production
4763 "#
4764 .trim()
4765 .to_string(),
4766 )
4767 .await
4768 .unwrap();
4769
4770 devcontainer_manifest.parse_nonremote_vars().unwrap();
4771
4772 let dockerfile_contents = devcontainer_manifest
4773 .expanded_dockerfile_content()
4774 .await
4775 .unwrap();
4776 let base_image = image_from_dockerfile(
4777 dockerfile_contents,
4778 &devcontainer_manifest
4779 .dev_container()
4780 .build
4781 .as_ref()
4782 .and_then(|b| b.target.clone()),
4783 )
4784 .unwrap();
4785
4786 assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4787 }
4788
4789 #[gpui::test]
4790 async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4791 cx.executor().allow_parking();
4792 env_logger::try_init().ok();
4793 let given_devcontainer_contents = r#"
4794 {
4795 "name": "cli-${devcontainerId}",
4796 "build": {
4797 "dockerfile": "Dockerfile",
4798 "args": {
4799 "JSON_ARG": "some-value",
4800 "ELIXIR_VERSION": "1.21",
4801 }
4802 },
4803 }
4804 "#;
4805
4806 let (test_dependencies, mut devcontainer_manifest) =
4807 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4808 .await
4809 .unwrap();
4810
4811 test_dependencies
4812 .fs
4813 .atomic_write(
4814 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4815 r#"
4816ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4817ARG ELIXIR_VERSION=1.20.0-rc.4
4818ARG FOO=foo BAR=bar
4819ARG FOOBAR=${FOO}${BAR}
4820ARG OTP_VERSION=28.4.1
4821ARG DEBIAN_VERSION=trixie-20260316-slim
4822ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4823ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4824ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4825ARG FROM_JSON=${JSON_ARG}
4826
4827FROM ${IMAGE} AS devcontainer
4828 "#
4829 .trim()
4830 .to_string(),
4831 )
4832 .await
4833 .unwrap();
4834
4835 devcontainer_manifest.parse_nonremote_vars().unwrap();
4836
4837 let expanded_dockerfile = devcontainer_manifest
4838 .expanded_dockerfile_content()
4839 .await
4840 .unwrap();
4841
4842 assert_eq!(
4843 &expanded_dockerfile,
4844 r#"
4845ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4846ARG ELIXIR_VERSION=1.20.0-rc.4
4847ARG FOO=foo BAR=bar
4848ARG FOOBAR=foobar
4849ARG OTP_VERSION=28.4.1
4850ARG DEBIAN_VERSION=trixie-20260316-slim
4851ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4852ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4853ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4854ARG FROM_JSON=some-value
4855
4856FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4857 "#
4858 .trim()
4859 )
4860 }
4861
4862 #[test]
4863 fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4864
4865 #[test]
4866 fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4867
4868 #[test]
4869 fn test_aliases_dockerfile_with_build_target_specified() {}
4870
4871 pub(crate) struct RecordedExecCommand {
4872 pub(crate) _container_id: String,
4873 pub(crate) _remote_folder: String,
4874 pub(crate) _user: String,
4875 pub(crate) env: HashMap<String, String>,
4876 pub(crate) _inner_command: Command,
4877 }
4878
4879 pub(crate) struct FakeDocker {
4880 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4881 podman: bool,
4882 has_buildx: bool,
4883 }
4884
4885 impl FakeDocker {
4886 pub(crate) fn new() -> Self {
4887 Self {
4888 podman: false,
4889 has_buildx: true,
4890 exec_commands_recorded: Mutex::new(Vec::new()),
4891 }
4892 }
4893 #[cfg(not(target_os = "windows"))]
4894 fn set_podman(&mut self, podman: bool) {
4895 self.podman = podman;
4896 }
4897 }
4898
4899 #[async_trait]
4900 impl DockerClient for FakeDocker {
4901 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4902 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4903 return Ok(DockerInspect {
4904 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4905 .to_string(),
4906 config: DockerInspectConfig {
4907 labels: DockerConfigLabels {
4908 metadata: Some(vec![HashMap::from([(
4909 "remoteUser".to_string(),
4910 Value::String("node".to_string()),
4911 )])]),
4912 },
4913 env: Vec::new(),
4914 image_user: Some("root".to_string()),
4915 },
4916 mounts: None,
4917 state: None,
4918 });
4919 }
4920 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4921 return Ok(DockerInspect {
4922 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4923 .to_string(),
4924 config: DockerInspectConfig {
4925 labels: DockerConfigLabels {
4926 metadata: Some(vec![HashMap::from([(
4927 "remoteUser".to_string(),
4928 Value::String("vscode".to_string()),
4929 )])]),
4930 },
4931 image_user: Some("root".to_string()),
4932 env: Vec::new(),
4933 },
4934 mounts: None,
4935 state: None,
4936 });
4937 }
4938 if id.starts_with("cli_") {
4939 return Ok(DockerInspect {
4940 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4941 .to_string(),
4942 config: DockerInspectConfig {
4943 labels: DockerConfigLabels {
4944 metadata: Some(vec![HashMap::from([(
4945 "remoteUser".to_string(),
4946 Value::String("node".to_string()),
4947 )])]),
4948 },
4949 image_user: Some("root".to_string()),
4950 env: vec!["PATH=/initial/path".to_string()],
4951 },
4952 mounts: None,
4953 state: None,
4954 });
4955 }
4956 if id == "found_docker_ps" {
4957 return Ok(DockerInspect {
4958 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4959 .to_string(),
4960 config: DockerInspectConfig {
4961 labels: DockerConfigLabels {
4962 metadata: Some(vec![HashMap::from([(
4963 "remoteUser".to_string(),
4964 Value::String("node".to_string()),
4965 )])]),
4966 },
4967 image_user: Some("root".to_string()),
4968 env: vec!["PATH=/initial/path".to_string()],
4969 },
4970 mounts: Some(vec![DockerInspectMount {
4971 source: "/path/to/local/project".to_string(),
4972 destination: "/workspaces/project".to_string(),
4973 }]),
4974 state: None,
4975 });
4976 }
4977 if id.starts_with("rust_a-") {
4978 return Ok(DockerInspect {
4979 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4980 .to_string(),
4981 config: DockerInspectConfig {
4982 labels: DockerConfigLabels {
4983 metadata: Some(vec![HashMap::from([(
4984 "remoteUser".to_string(),
4985 Value::String("vscode".to_string()),
4986 )])]),
4987 },
4988 image_user: Some("root".to_string()),
4989 env: Vec::new(),
4990 },
4991 mounts: None,
4992 state: None,
4993 });
4994 }
4995 if id == "test_image:latest" {
4996 return Ok(DockerInspect {
4997 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4998 .to_string(),
4999 config: DockerInspectConfig {
5000 labels: DockerConfigLabels {
5001 metadata: Some(vec![HashMap::from([(
5002 "remoteUser".to_string(),
5003 Value::String("node".to_string()),
5004 )])]),
5005 },
5006 env: Vec::new(),
5007 image_user: Some("root".to_string()),
5008 },
5009 mounts: None,
5010 state: None,
5011 });
5012 }
5013
5014 Err(DevContainerError::DockerNotAvailable)
5015 }
5016 async fn get_docker_compose_config(
5017 &self,
5018 config_files: &Vec<PathBuf>,
5019 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5020 if config_files.len() == 1
5021 && config_files.get(0)
5022 == Some(&PathBuf::from(
5023 "/path/to/local/project/.devcontainer/docker-compose.yml",
5024 ))
5025 {
5026 return Ok(Some(DockerComposeConfig {
5027 name: None,
5028 services: HashMap::from([
5029 (
5030 "app".to_string(),
5031 DockerComposeService {
5032 build: Some(DockerComposeServiceBuild {
5033 context: Some(".".to_string()),
5034 dockerfile: Some("Dockerfile".to_string()),
5035 args: None,
5036 additional_contexts: None,
5037 target: None,
5038 }),
5039 volumes: vec![MountDefinition {
5040 source: Some("../..".to_string()),
5041 target: "/workspaces".to_string(),
5042 mount_type: Some("bind".to_string()),
5043 }],
5044 network_mode: Some("service:db".to_string()),
5045 ..Default::default()
5046 },
5047 ),
5048 (
5049 "db".to_string(),
5050 DockerComposeService {
5051 image: Some("postgres:14.1".to_string()),
5052 volumes: vec![MountDefinition {
5053 source: Some("postgres-data".to_string()),
5054 target: "/var/lib/postgresql/data".to_string(),
5055 mount_type: Some("volume".to_string()),
5056 }],
5057 env_file: Some(vec![".env".to_string()]),
5058 ..Default::default()
5059 },
5060 ),
5061 ]),
5062 volumes: HashMap::from([(
5063 "postgres-data".to_string(),
5064 DockerComposeVolume::default(),
5065 )]),
5066 }));
5067 }
5068 if config_files.len() == 1
5069 && config_files.get(0)
5070 == Some(&PathBuf::from(
5071 "/path/to/local/project/.devcontainer/docker-compose-context-parent.yml",
5072 ))
5073 {
5074 return Ok(Some(DockerComposeConfig {
5075 name: None,
5076 services: HashMap::from([(
5077 "app".to_string(),
5078 DockerComposeService {
5079 build: Some(DockerComposeServiceBuild {
5080 context: Some("..".to_string()),
5081 dockerfile: Some(".devcontainer/Dockerfile".to_string()),
5082 args: None,
5083 additional_contexts: None,
5084 target: None,
5085 }),
5086 ..Default::default()
5087 },
5088 )]),
5089 volumes: HashMap::new(),
5090 }));
5091 }
5092 if config_files.len() == 1
5093 && config_files.get(0)
5094 == Some(&PathBuf::from(
5095 "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
5096 ))
5097 {
5098 return Ok(Some(DockerComposeConfig {
5099 name: None,
5100 services: HashMap::from([(
5101 "app".to_string(),
5102 DockerComposeService {
5103 image: Some("test_image:latest".to_string()),
5104 command: vec!["sleep".to_string(), "infinity".to_string()],
5105 ..Default::default()
5106 },
5107 )]),
5108 ..Default::default()
5109 }));
5110 }
5111 Err(DevContainerError::DockerNotAvailable)
5112 }
5113 async fn docker_compose_build(
5114 &self,
5115 _config_files: &Vec<PathBuf>,
5116 _project_name: &str,
5117 ) -> Result<(), DevContainerError> {
5118 Ok(())
5119 }
5120 async fn run_docker_exec(
5121 &self,
5122 container_id: &str,
5123 remote_folder: &str,
5124 user: &str,
5125 env: &HashMap<String, String>,
5126 inner_command: Command,
5127 ) -> Result<(), DevContainerError> {
5128 let mut record = self
5129 .exec_commands_recorded
5130 .lock()
5131 .expect("should be available");
5132 record.push(RecordedExecCommand {
5133 _container_id: container_id.to_string(),
5134 _remote_folder: remote_folder.to_string(),
5135 _user: user.to_string(),
5136 env: env.clone(),
5137 _inner_command: inner_command,
5138 });
5139 Ok(())
5140 }
5141 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5142 Err(DevContainerError::DockerNotAvailable)
5143 }
5144 async fn find_process_by_filters(
5145 &self,
5146 _filters: Vec<String>,
5147 ) -> Result<Option<DockerPs>, DevContainerError> {
5148 Ok(Some(DockerPs {
5149 id: "found_docker_ps".to_string(),
5150 }))
5151 }
5152 fn supports_compose_buildkit(&self) -> bool {
5153 !self.podman && self.has_buildx
5154 }
5155 fn docker_cli(&self) -> String {
5156 if self.podman {
5157 "podman".to_string()
5158 } else {
5159 "docker".to_string()
5160 }
5161 }
5162 }
5163
5164 #[derive(Debug, Clone)]
5165 pub(crate) struct TestCommand {
5166 pub(crate) program: String,
5167 pub(crate) args: Vec<String>,
5168 }
5169
5170 pub(crate) struct TestCommandRunner {
5171 commands_recorded: Mutex<Vec<TestCommand>>,
5172 }
5173
5174 impl TestCommandRunner {
5175 fn new() -> Self {
5176 Self {
5177 commands_recorded: Mutex::new(Vec::new()),
5178 }
5179 }
5180
5181 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5182 let record = self.commands_recorded.lock().expect("poisoned");
5183 record
5184 .iter()
5185 .filter(|r| r.program == program)
5186 .map(|r| r.clone())
5187 .collect()
5188 }
5189 }
5190
5191 #[async_trait]
5192 impl CommandRunner for TestCommandRunner {
5193 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5194 let mut record = self.commands_recorded.lock().expect("poisoned");
5195
5196 record.push(TestCommand {
5197 program: command.get_program().display().to_string(),
5198 args: command
5199 .get_args()
5200 .map(|a| a.display().to_string())
5201 .collect(),
5202 });
5203
5204 Ok(Output {
5205 status: ExitStatus::default(),
5206 stdout: vec![],
5207 stderr: vec![],
5208 })
5209 }
5210 }
5211
5212 fn fake_http_client() -> Arc<dyn HttpClient> {
5213 FakeHttpClient::create(|request| async move {
5214 let (parts, _body) = request.into_parts();
5215 if parts.uri.path() == "/token" {
5216 let token_response = TokenResponse {
5217 token: "token".to_string(),
5218 };
5219 return Ok(http::Response::builder()
5220 .status(200)
5221 .body(http_client::AsyncBody::from(
5222 serde_json_lenient::to_string(&token_response).unwrap(),
5223 ))
5224 .unwrap());
5225 }
5226
5227 // OCI specific things
5228 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5229 let response = r#"
5230 {
5231 "schemaVersion": 2,
5232 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5233 "config": {
5234 "mediaType": "application/vnd.devcontainers",
5235 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5236 "size": 2
5237 },
5238 "layers": [
5239 {
5240 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5241 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5242 "size": 59392,
5243 "annotations": {
5244 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5245 }
5246 }
5247 ],
5248 "annotations": {
5249 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5250 "com.github.package.type": "devcontainer_feature"
5251 }
5252 }
5253 "#;
5254 return Ok(http::Response::builder()
5255 .status(200)
5256 .body(http_client::AsyncBody::from(response))
5257 .unwrap());
5258 }
5259
5260 if parts.uri.path()
5261 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5262 {
5263 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5268 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5269 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5270 ```
5271 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5272 ```
5273 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5274
5275
5276 ## OS Support
5277
5278 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5279
5280 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5281
5282 `bash` is required to execute the `install.sh` script."#),
5283 ("./README.md", r#"
5284 # Docker (Docker-in-Docker) (docker-in-docker)
5285
5286 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5287
5288 ## Example Usage
5289
5290 ```json
5291 "features": {
5292 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5293 }
5294 ```
5295
5296 ## Options
5297
5298 | Options Id | Description | Type | Default Value |
5299 |-----|-----|-----|-----|
5300 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5301 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5302 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5303 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5304 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5305 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5306 | installDockerBuildx | Install Docker Buildx | boolean | true |
5307 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5308 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5309
5310 ## Customizations
5311
5312 ### VS Code Extensions
5313
5314 - `ms-azuretools.vscode-containers`
5315
5316 ## Limitations
5317
5318 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5319 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5320 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5321 ```
5322 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5323 ```
5324 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5325
5326
5327 ## OS Support
5328
5329 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5330
5331 `bash` is required to execute the `install.sh` script.
5332
5333
5334 ---
5335
5336 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
5337 ("./devcontainer-feature.json", r#"
5338 {
5339 "id": "docker-in-docker",
5340 "version": "2.16.1",
5341 "name": "Docker (Docker-in-Docker)",
5342 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5343 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5344 "options": {
5345 "version": {
5346 "type": "string",
5347 "proposals": [
5348 "latest",
5349 "none",
5350 "20.10"
5351 ],
5352 "default": "latest",
5353 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5354 },
5355 "moby": {
5356 "type": "boolean",
5357 "default": true,
5358 "description": "Install OSS Moby build instead of Docker CE"
5359 },
5360 "mobyBuildxVersion": {
5361 "type": "string",
5362 "default": "latest",
5363 "description": "Install a specific version of moby-buildx when using Moby"
5364 },
5365 "dockerDashComposeVersion": {
5366 "type": "string",
5367 "enum": [
5368 "none",
5369 "v1",
5370 "v2"
5371 ],
5372 "default": "v2",
5373 "description": "Default version of Docker Compose (v1, v2 or none)"
5374 },
5375 "azureDnsAutoDetection": {
5376 "type": "boolean",
5377 "default": true,
5378 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5379 },
5380 "dockerDefaultAddressPool": {
5381 "type": "string",
5382 "default": "",
5383 "proposals": [],
5384 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5385 },
5386 "installDockerBuildx": {
5387 "type": "boolean",
5388 "default": true,
5389 "description": "Install Docker Buildx"
5390 },
5391 "installDockerComposeSwitch": {
5392 "type": "boolean",
5393 "default": false,
5394 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5395 },
5396 "disableIp6tables": {
5397 "type": "boolean",
5398 "default": false,
5399 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5400 }
5401 },
5402 "entrypoint": "/usr/local/share/docker-init.sh",
5403 "privileged": true,
5404 "containerEnv": {
5405 "DOCKER_BUILDKIT": "1"
5406 },
5407 "customizations": {
5408 "vscode": {
5409 "extensions": [
5410 "ms-azuretools.vscode-containers"
5411 ],
5412 "settings": {
5413 "github.copilot.chat.codeGeneration.instructions": [
5414 {
5415 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5416 }
5417 ]
5418 }
5419 }
5420 },
5421 "mounts": [
5422 {
5423 "source": "dind-var-lib-docker-${devcontainerId}",
5424 "target": "/var/lib/docker",
5425 "type": "volume"
5426 }
5427 ],
5428 "installsAfter": [
5429 "ghcr.io/devcontainers/features/common-utils"
5430 ]
5431 }"#),
5432 ("./install.sh", r#"
5433 #!/usr/bin/env bash
5434 #-------------------------------------------------------------------------------------------------------------
5435 # Copyright (c) Microsoft Corporation. All rights reserved.
5436 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5437 #-------------------------------------------------------------------------------------------------------------
5438 #
5439 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5440 # Maintainer: The Dev Container spec maintainers
5441
5442
5443 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5444 USE_MOBY="${MOBY:-"true"}"
5445 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5446 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5447 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5448 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5449 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5450 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5451 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5452 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5453 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5454 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5455 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5456 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5457
5458 # Default: Exit on any failure.
5459 set -e
5460
5461 # Clean up
5462 rm -rf /var/lib/apt/lists/*
5463
5464 # Setup STDERR.
5465 err() {
5466 echo "(!) $*" >&2
5467 }
5468
5469 if [ "$(id -u)" -ne 0 ]; then
5470 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5471 exit 1
5472 fi
5473
5474 ###################
5475 # Helper Functions
5476 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5477 ###################
5478
5479 # Determine the appropriate non-root user
5480 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5481 USERNAME=""
5482 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5483 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5484 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5485 USERNAME=${CURRENT_USER}
5486 break
5487 fi
5488 done
5489 if [ "${USERNAME}" = "" ]; then
5490 USERNAME=root
5491 fi
5492 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5493 USERNAME=root
5494 fi
5495
5496 # Package manager update function
5497 pkg_mgr_update() {
5498 case ${ADJUSTED_ID} in
5499 debian)
5500 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5501 echo "Running apt-get update..."
5502 apt-get update -y
5503 fi
5504 ;;
5505 rhel)
5506 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5507 cache_check_dir="/var/cache/yum"
5508 else
5509 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5510 fi
5511 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5512 echo "Running ${PKG_MGR_CMD} makecache ..."
5513 ${PKG_MGR_CMD} makecache
5514 fi
5515 ;;
5516 esac
5517 }
5518
5519 # Checks if packages are installed and installs them if not
5520 check_packages() {
5521 case ${ADJUSTED_ID} in
5522 debian)
5523 if ! dpkg -s "$@" > /dev/null 2>&1; then
5524 pkg_mgr_update
5525 apt-get -y install --no-install-recommends "$@"
5526 fi
5527 ;;
5528 rhel)
5529 if ! rpm -q "$@" > /dev/null 2>&1; then
5530 pkg_mgr_update
5531 ${PKG_MGR_CMD} -y install "$@"
5532 fi
5533 ;;
5534 esac
5535 }
5536
5537 # Figure out correct version of a three part version number is not passed
5538 find_version_from_git_tags() {
5539 local variable_name=$1
5540 local requested_version=${!variable_name}
5541 if [ "${requested_version}" = "none" ]; then return; fi
5542 local repository=$2
5543 local prefix=${3:-"tags/v"}
5544 local separator=${4:-"."}
5545 local last_part_optional=${5:-"false"}
5546 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5547 local escaped_separator=${separator//./\\.}
5548 local last_part
5549 if [ "${last_part_optional}" = "true" ]; then
5550 last_part="(${escaped_separator}[0-9]+)?"
5551 else
5552 last_part="${escaped_separator}[0-9]+"
5553 fi
5554 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5555 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5556 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5557 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5558 else
5559 set +e
5560 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5561 set -e
5562 fi
5563 fi
5564 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5565 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5566 exit 1
5567 fi
5568 echo "${variable_name}=${!variable_name}"
5569 }
5570
5571 # Use semver logic to decrement a version number then look for the closest match
5572 find_prev_version_from_git_tags() {
5573 local variable_name=$1
5574 local current_version=${!variable_name}
5575 local repository=$2
5576 # Normally a "v" is used before the version number, but support alternate cases
5577 local prefix=${3:-"tags/v"}
5578 # Some repositories use "_" instead of "." for version number part separation, support that
5579 local separator=${4:-"."}
5580 # Some tools release versions that omit the last digit (e.g. go)
5581 local last_part_optional=${5:-"false"}
5582 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5583 local version_suffix_regex=$6
5584 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5585 set +e
5586 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5587 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5588 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5589
5590 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5591 ((major=major-1))
5592 declare -g ${variable_name}="${major}"
5593 # Look for latest version from previous major release
5594 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5595 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5596 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5597 ((minor=minor-1))
5598 declare -g ${variable_name}="${major}.${minor}"
5599 # Look for latest version from previous minor release
5600 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5601 else
5602 ((breakfix=breakfix-1))
5603 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5604 declare -g ${variable_name}="${major}.${minor}"
5605 else
5606 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5607 fi
5608 fi
5609 set -e
5610 }
5611
5612 # Function to fetch the version released prior to the latest version
5613 get_previous_version() {
5614 local url=$1
5615 local repo_url=$2
5616 local variable_name=$3
5617 prev_version=${!variable_name}
5618
5619 output=$(curl -s "$repo_url");
5620 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5621 message=$(echo "$output" | jq -r '.message')
5622
5623 if [[ $message == "API rate limit exceeded"* ]]; then
5624 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5625 echo -e "\nAttempting to find latest version using GitHub tags."
5626 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5627 declare -g ${variable_name}="${prev_version}"
5628 fi
5629 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5630 echo -e "\nAttempting to find latest version using GitHub Api."
5631 version=$(echo "$output" | jq -r '.[1].tag_name')
5632 declare -g ${variable_name}="${version#v}"
5633 fi
5634 echo "${variable_name}=${!variable_name}"
5635 }
5636
5637 get_github_api_repo_url() {
5638 local url=$1
5639 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5640 }
5641
5642 ###########################################
5643 # Start docker-in-docker installation
5644 ###########################################
5645
5646 # Ensure apt is in non-interactive to avoid prompts
5647 export DEBIAN_FRONTEND=noninteractive
5648
5649 # Source /etc/os-release to get OS info
5650 . /etc/os-release
5651
5652 # Determine adjusted ID and package manager
5653 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5654 ADJUSTED_ID="debian"
5655 PKG_MGR_CMD="apt-get"
5656 # Use dpkg for Debian-based systems
5657 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5658 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5659 ADJUSTED_ID="rhel"
5660 # Determine the appropriate package manager for RHEL-based systems
5661 for pkg_mgr in tdnf dnf microdnf yum; do
5662 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5663 PKG_MGR_CMD="$pkg_mgr"
5664 break
5665 fi
5666 done
5667
5668 if [ -z "${PKG_MGR_CMD}" ]; then
5669 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5670 exit 1
5671 fi
5672
5673 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5674 else
5675 err "Linux distro ${ID} not supported."
5676 exit 1
5677 fi
5678
5679 # Azure Linux specific setup
5680 if [ "${ID}" = "azurelinux" ]; then
5681 VERSION_CODENAME="azurelinux${VERSION_ID}"
5682 fi
5683
5684 # Prevent attempting to install Moby on Debian trixie (packages removed)
5685 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5686 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5687 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5688 exit 1
5689 fi
5690
5691 # Check if distro is supported
5692 if [ "${USE_MOBY}" = "true" ]; then
5693 if [ "${ADJUSTED_ID}" = "debian" ]; then
5694 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5695 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5696 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5697 exit 1
5698 fi
5699 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5700 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5701 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5702 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5703 else
5704 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5705 fi
5706 fi
5707 else
5708 if [ "${ADJUSTED_ID}" = "debian" ]; then
5709 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5710 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5711 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5712 exit 1
5713 fi
5714 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5715 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5716
5717 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5718 fi
5719 fi
5720
5721 # Install base dependencies
5722 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5723 case ${ADJUSTED_ID} in
5724 debian)
5725 check_packages apt-transport-https $base_packages dirmngr
5726 ;;
5727 rhel)
5728 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5729
5730 ;;
5731 esac
5732
5733 # Install git if not already present
5734 if ! command -v git >/dev/null 2>&1; then
5735 check_packages git
5736 fi
5737
5738 # Update CA certificates to ensure HTTPS connections work properly
5739 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5740 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5741 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5742 update-ca-certificates
5743 fi
5744
5745 # Swap to legacy iptables for compatibility (Debian only)
5746 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5747 update-alternatives --set iptables /usr/sbin/iptables-legacy
5748 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5749 fi
5750
5751 # Set up the necessary repositories
5752 if [ "${USE_MOBY}" = "true" ]; then
5753 # Name of open source engine/cli
5754 engine_package_name="moby-engine"
5755 cli_package_name="moby-cli"
5756
5757 case ${ADJUSTED_ID} in
5758 debian)
5759 # Import key safely and import Microsoft apt repo
5760 {
5761 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5762 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5763 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5764 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5765 ;;
5766 rhel)
5767 echo "(*) ${ID} detected - checking for Moby packages..."
5768
5769 # Check if moby packages are available in default repos
5770 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5771 echo "(*) Using built-in ${ID} Moby packages"
5772 else
5773 case "${ID}" in
5774 azurelinux)
5775 echo "(*) Moby packages not found in Azure Linux repositories"
5776 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5777 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5778 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5779 exit 1
5780 ;;
5781 mariner)
5782 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5783 # Add Microsoft repository if packages aren't available locally
5784 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5785 cat > /etc/yum.repos.d/microsoft.repo << EOF
5786 [microsoft]
5787 name=Microsoft Repository
5788 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5789 enabled=1
5790 gpgcheck=1
5791 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5792 EOF
5793 # Verify packages are available after adding repo
5794 pkg_mgr_update
5795 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5796 echo "(*) Moby packages not found in Microsoft repository either"
5797 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5798 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5799 exit 1
5800 fi
5801 ;;
5802 *)
5803 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5804 exit 1
5805 ;;
5806 esac
5807 fi
5808 ;;
5809 esac
5810 else
5811 # Name of licensed engine/cli
5812 engine_package_name="docker-ce"
5813 cli_package_name="docker-ce-cli"
5814 case ${ADJUSTED_ID} in
5815 debian)
5816 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5817 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5818 ;;
5819 rhel)
5820 # Docker CE repository setup for RHEL-based systems
5821 setup_docker_ce_repo() {
5822 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5823 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5824 [docker-ce-stable]
5825 name=Docker CE Stable
5826 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5827 enabled=1
5828 gpgcheck=1
5829 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5830 skip_if_unavailable=1
5831 module_hotfixes=1
5832 EOF
5833 }
5834 install_azure_linux_deps() {
5835 echo "(*) Installing device-mapper libraries for Docker CE..."
5836 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5837 echo "(*) Installing additional Docker CE dependencies..."
5838 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5839 echo "(*) Some optional dependencies could not be installed, continuing..."
5840 }
5841 }
5842 setup_selinux_context() {
5843 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5844 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5845 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5846 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5847 fi
5848 }
5849
5850 # Special handling for RHEL Docker CE installation
5851 case "${ID}" in
5852 azurelinux|mariner)
5853 echo "(*) ${ID} detected"
5854 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5855 echo "(*) Setting up Docker CE repository..."
5856
5857 setup_docker_ce_repo
5858 install_azure_linux_deps
5859
5860 if [ "${USE_MOBY}" != "true" ]; then
5861 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5862 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5863 setup_selinux_context
5864 else
5865 echo "(*) Using Moby - container-selinux not required"
5866 fi
5867 ;;
5868 *)
5869 # Standard RHEL/CentOS/Fedora approach
5870 if command -v dnf >/dev/null 2>&1; then
5871 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5872 elif command -v yum-config-manager >/dev/null 2>&1; then
5873 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5874 else
5875 # Manual fallback
5876 setup_docker_ce_repo
5877 fi
5878 ;;
5879 esac
5880 ;;
5881 esac
5882 fi
5883
5884 # Refresh package database
5885 case ${ADJUSTED_ID} in
5886 debian)
5887 apt-get update
5888 ;;
5889 rhel)
5890 pkg_mgr_update
5891 ;;
5892 esac
5893
5894 # Soft version matching
5895 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5896 # Empty, meaning grab whatever "latest" is in apt repo
5897 engine_version_suffix=""
5898 cli_version_suffix=""
5899 else
5900 case ${ADJUSTED_ID} in
5901 debian)
5902 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5903 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5904 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5905 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5906 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5907 set +e # Don't exit if finding version fails - will handle gracefully
5908 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5909 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5910 set -e
5911 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5912 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5913 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5914 exit 1
5915 fi
5916 ;;
5917 rhel)
5918 # For RHEL-based systems, use dnf/yum to find versions
5919 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5920 set +e # Don't exit if finding version fails - will handle gracefully
5921 if [ "${USE_MOBY}" = "true" ]; then
5922 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5923 else
5924 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5925 fi
5926 set -e
5927 if [ -n "${available_versions}" ]; then
5928 engine_version_suffix="-${available_versions}"
5929 cli_version_suffix="-${available_versions}"
5930 else
5931 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5932 engine_version_suffix=""
5933 cli_version_suffix=""
5934 fi
5935 ;;
5936 esac
5937 fi
5938
5939 # Version matching for moby-buildx
5940 if [ "${USE_MOBY}" = "true" ]; then
5941 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5942 # Empty, meaning grab whatever "latest" is in apt repo
5943 buildx_version_suffix=""
5944 else
5945 case ${ADJUSTED_ID} in
5946 debian)
5947 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5948 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5949 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5950 set +e
5951 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5952 set -e
5953 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5954 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5955 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5956 exit 1
5957 fi
5958 ;;
5959 rhel)
5960 # For RHEL-based systems, try to find buildx version or use latest
5961 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5962 set +e
5963 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5964 set -e
5965 if [ -n "${available_buildx}" ]; then
5966 buildx_version_suffix="-${available_buildx}"
5967 else
5968 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5969 buildx_version_suffix=""
5970 fi
5971 ;;
5972 esac
5973 echo "buildx_version_suffix ${buildx_version_suffix}"
5974 fi
5975 fi
5976
5977 # Install Docker / Moby CLI if not already installed
5978 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5979 echo "Docker / Moby CLI and Engine already installed."
5980 else
5981 case ${ADJUSTED_ID} in
5982 debian)
5983 if [ "${USE_MOBY}" = "true" ]; then
5984 # Install engine
5985 set +e # Handle error gracefully
5986 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5987 exit_code=$?
5988 set -e
5989
5990 if [ ${exit_code} -ne 0 ]; then
5991 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5992 exit 1
5993 fi
5994
5995 # Install compose
5996 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5997 else
5998 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5999 # Install compose
6000 apt-mark hold docker-ce docker-ce-cli
6001 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6002 fi
6003 ;;
6004 rhel)
6005 if [ "${USE_MOBY}" = "true" ]; then
6006 set +e # Handle error gracefully
6007 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6008 exit_code=$?
6009 set -e
6010
6011 if [ ${exit_code} -ne 0 ]; then
6012 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6013 exit 1
6014 fi
6015
6016 # Install compose
6017 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6018 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6019 fi
6020 else
6021 # Special handling for Azure Linux Docker CE installation
6022 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6023 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6024
6025 # Use rpm with --force and --nodeps for Azure Linux
6026 set +e # Don't exit on error for this section
6027 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6028 install_result=$?
6029 set -e
6030
6031 if [ $install_result -ne 0 ]; then
6032 echo "(*) Standard installation failed, trying manual installation..."
6033
6034 echo "(*) Standard installation failed, trying manual installation..."
6035
6036 # Create directory for downloading packages
6037 mkdir -p /tmp/docker-ce-install
6038
6039 # Download packages manually using curl since tdnf doesn't support download
6040 echo "(*) Downloading Docker CE packages manually..."
6041
6042 # Get the repository baseurl
6043 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6044
6045 # Download packages directly
6046 cd /tmp/docker-ce-install
6047
6048 # Get package names with versions
6049 if [ -n "${cli_version_suffix}" ]; then
6050 docker_ce_version="${cli_version_suffix#-}"
6051 docker_cli_version="${engine_version_suffix#-}"
6052 else
6053 # Get latest version from repository
6054 docker_ce_version="latest"
6055 fi
6056
6057 echo "(*) Attempting to download Docker CE packages from repository..."
6058
6059 # Try to download latest packages if specific version fails
6060 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6061 # Fallback: try to get latest available version
6062 echo "(*) Specific version not found, trying latest..."
6063 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6064 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6065 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6066
6067 if [ -n "${latest_docker}" ]; then
6068 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6069 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6070 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6071 else
6072 echo "(*) ERROR: Could not find Docker CE packages in repository"
6073 echo "(*) Please check repository configuration or use 'moby': true"
6074 exit 1
6075 fi
6076 fi
6077 # Install systemd libraries required by Docker CE
6078 echo "(*) Installing systemd libraries required by Docker CE..."
6079 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6080 echo "(*) WARNING: Could not install systemd libraries"
6081 echo "(*) Docker may fail to start without these"
6082 }
6083
6084 # Install with rpm --force --nodeps
6085 echo "(*) Installing Docker CE packages with dependency override..."
6086 rpm -Uvh --force --nodeps *.rpm
6087
6088 # Cleanup
6089 cd /
6090 rm -rf /tmp/docker-ce-install
6091
6092 echo "(*) Docker CE installation completed with dependency bypass"
6093 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6094 fi
6095 else
6096 # Standard installation for other RHEL-based systems
6097 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6098 fi
6099 # Install compose
6100 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6101 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6102 fi
6103 fi
6104 ;;
6105 esac
6106 fi
6107
6108 echo "Finished installing docker / moby!"
6109
6110 docker_home="/usr/libexec/docker"
6111 cli_plugins_dir="${docker_home}/cli-plugins"
6112
6113 # fallback for docker-compose
6114 fallback_compose(){
6115 local url=$1
6116 local repo_url=$(get_github_api_repo_url "$url")
6117 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6118 get_previous_version "${url}" "${repo_url}" compose_version
6119 echo -e "\nAttempting to install v${compose_version}"
6120 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6121 }
6122
6123 # If 'docker-compose' command is to be included
6124 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6125 case "${architecture}" in
6126 amd64|x86_64) target_compose_arch=x86_64 ;;
6127 arm64|aarch64) target_compose_arch=aarch64 ;;
6128 *)
6129 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6130 exit 1
6131 esac
6132
6133 docker_compose_path="/usr/local/bin/docker-compose"
6134 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6135 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6136 INSTALL_DOCKER_COMPOSE_SWITCH="false"
6137
6138 if [ "${target_compose_arch}" = "x86_64" ]; then
6139 echo "(*) Installing docker compose v1..."
6140 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6141 chmod +x ${docker_compose_path}
6142
6143 # Download the SHA256 checksum
6144 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6145 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6146 sha256sum -c docker-compose.sha256sum --ignore-missing
6147 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6148 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6149 exit 1
6150 else
6151 # Use pip to get a version that runs on this architecture
6152 check_packages python3-minimal python3-pip libffi-dev python3-venv
6153 echo "(*) Installing docker compose v1 via pip..."
6154 export PYTHONUSERBASE=/usr/local
6155 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6156 fi
6157 else
6158 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6159 docker_compose_url="https://github.com/docker/compose"
6160 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6161 echo "(*) Installing docker-compose ${compose_version}..."
6162 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6163 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6164 fallback_compose "$docker_compose_url"
6165 }
6166
6167 chmod +x ${docker_compose_path}
6168
6169 # Download the SHA256 checksum
6170 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6171 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6172 sha256sum -c docker-compose.sha256sum --ignore-missing
6173
6174 mkdir -p ${cli_plugins_dir}
6175 cp ${docker_compose_path} ${cli_plugins_dir}
6176 fi
6177 fi
6178
6179 # fallback method for compose-switch
6180 fallback_compose-switch() {
6181 local url=$1
6182 local repo_url=$(get_github_api_repo_url "$url")
6183 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6184 get_previous_version "$url" "$repo_url" compose_switch_version
6185 echo -e "\nAttempting to install v${compose_switch_version}"
6186 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6187 }
6188 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6189 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6190 if type docker-compose > /dev/null 2>&1; then
6191 echo "(*) Installing compose-switch..."
6192 current_compose_path="$(command -v docker-compose)"
6193 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6194 compose_switch_version="latest"
6195 compose_switch_url="https://github.com/docker/compose-switch"
6196 # Try to get latest version, fallback to known stable version if GitHub API fails
6197 set +e
6198 find_version_from_git_tags compose_switch_version "$compose_switch_url"
6199 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6200 echo "(*) GitHub API rate limited or failed, using fallback method"
6201 fallback_compose-switch "$compose_switch_url"
6202 fi
6203 set -e
6204
6205 # Map architecture for compose-switch downloads
6206 case "${architecture}" in
6207 amd64|x86_64) target_switch_arch=amd64 ;;
6208 arm64|aarch64) target_switch_arch=arm64 ;;
6209 *) target_switch_arch=${architecture} ;;
6210 esac
6211 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6212 chmod +x /usr/local/bin/compose-switch
6213 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6214 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6215 mv "${current_compose_path}" "${target_compose_path}"
6216 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6217 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6218 else
6219 err "Skipping installation of compose-switch as docker compose is unavailable..."
6220 fi
6221 fi
6222
6223 # If init file already exists, exit
6224 if [ -f "/usr/local/share/docker-init.sh" ]; then
6225 echo "/usr/local/share/docker-init.sh already exists, so exiting."
6226 # Clean up
6227 rm -rf /var/lib/apt/lists/*
6228 exit 0
6229 fi
6230 echo "docker-init doesn't exist, adding..."
6231
6232 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6233 groupadd -r docker
6234 fi
6235
6236 usermod -aG docker ${USERNAME}
6237
6238 # fallback for docker/buildx
6239 fallback_buildx() {
6240 local url=$1
6241 local repo_url=$(get_github_api_repo_url "$url")
6242 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6243 get_previous_version "$url" "$repo_url" buildx_version
6244 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6245 echo -e "\nAttempting to install v${buildx_version}"
6246 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6247 }
6248
6249 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6250 buildx_version="latest"
6251 docker_buildx_url="https://github.com/docker/buildx"
6252 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6253 echo "(*) Installing buildx ${buildx_version}..."
6254
6255 # Map architecture for buildx downloads
6256 case "${architecture}" in
6257 amd64|x86_64) target_buildx_arch=amd64 ;;
6258 arm64|aarch64) target_buildx_arch=arm64 ;;
6259 *) target_buildx_arch=${architecture} ;;
6260 esac
6261
6262 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6263
6264 cd /tmp
6265 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6266
6267 docker_home="/usr/libexec/docker"
6268 cli_plugins_dir="${docker_home}/cli-plugins"
6269
6270 mkdir -p ${cli_plugins_dir}
6271 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6272 chmod +x ${cli_plugins_dir}/docker-buildx
6273
6274 chown -R "${USERNAME}:docker" "${docker_home}"
6275 chmod -R g+r+w "${docker_home}"
6276 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6277 fi
6278
6279 DOCKER_DEFAULT_IP6_TABLES=""
6280 if [ "$DISABLE_IP6_TABLES" == true ]; then
6281 requested_version=""
6282 # checking whether the version requested either is in semver format or just a number denoting the major version
6283 # and, extracting the major version number out of the two scenarios
6284 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6285 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6286 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6287 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6288 requested_version=$DOCKER_VERSION
6289 fi
6290 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6291 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6292 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6293 fi
6294 fi
6295
6296 if [ ! -d /usr/local/share ]; then
6297 mkdir -p /usr/local/share
6298 fi
6299
6300 tee /usr/local/share/docker-init.sh > /dev/null \
6301 << EOF
6302 #!/bin/sh
6303 #-------------------------------------------------------------------------------------------------------------
6304 # Copyright (c) Microsoft Corporation. All rights reserved.
6305 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6306 #-------------------------------------------------------------------------------------------------------------
6307
6308 set -e
6309
6310 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6311 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6312 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6313 EOF
6314
6315 tee -a /usr/local/share/docker-init.sh > /dev/null \
6316 << 'EOF'
6317 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6318 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6319 find /run /var/run -iname 'docker*.pid' -delete || :
6320 find /run /var/run -iname 'container*.pid' -delete || :
6321
6322 # -- Start: dind wrapper script --
6323 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6324
6325 export container=docker
6326
6327 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6328 mount -t securityfs none /sys/kernel/security || {
6329 echo >&2 'Could not mount /sys/kernel/security.'
6330 echo >&2 'AppArmor detection and --privileged mode might break.'
6331 }
6332 fi
6333
6334 # Mount /tmp (conditionally)
6335 if ! mountpoint -q /tmp; then
6336 mount -t tmpfs none /tmp
6337 fi
6338
6339 set_cgroup_nesting()
6340 {
6341 # cgroup v2: enable nesting
6342 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6343 # move the processes from the root group to the /init group,
6344 # otherwise writing subtree_control fails with EBUSY.
6345 # An error during moving non-existent process (i.e., "cat") is ignored.
6346 mkdir -p /sys/fs/cgroup/init
6347 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6348 # enable controllers
6349 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6350 > /sys/fs/cgroup/cgroup.subtree_control
6351 fi
6352 }
6353
6354 # Set cgroup nesting, retrying if necessary
6355 retry_cgroup_nesting=0
6356
6357 until [ "${retry_cgroup_nesting}" -eq "5" ];
6358 do
6359 set +e
6360 set_cgroup_nesting
6361
6362 if [ $? -ne 0 ]; then
6363 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6364 else
6365 break
6366 fi
6367
6368 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6369 set -e
6370 done
6371
6372 # -- End: dind wrapper script --
6373
6374 # Handle DNS
6375 set +e
6376 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6377 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6378 then
6379 echo "Setting dockerd Azure DNS."
6380 CUSTOMDNS="--dns 168.63.129.16"
6381 else
6382 echo "Not setting dockerd DNS manually."
6383 CUSTOMDNS=""
6384 fi
6385 set -e
6386
6387 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6388 then
6389 DEFAULT_ADDRESS_POOL=""
6390 else
6391 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6392 fi
6393
6394 # Start docker/moby engine
6395 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6396 INNEREOF
6397 )"
6398
6399 sudo_if() {
6400 COMMAND="$*"
6401
6402 if [ "$(id -u)" -ne 0 ]; then
6403 sudo $COMMAND
6404 else
6405 $COMMAND
6406 fi
6407 }
6408
6409 retry_docker_start_count=0
6410 docker_ok="false"
6411
6412 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6413 do
6414 # Start using sudo if not invoked as root
6415 if [ "$(id -u)" -ne 0 ]; then
6416 sudo /bin/sh -c "${dockerd_start}"
6417 else
6418 eval "${dockerd_start}"
6419 fi
6420
6421 retry_count=0
6422 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6423 do
6424 sleep 1s
6425 set +e
6426 docker info > /dev/null 2>&1 && docker_ok="true"
6427 set -e
6428
6429 retry_count=`expr $retry_count + 1`
6430 done
6431
6432 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6433 echo "(*) Failed to start docker, retrying..."
6434 set +e
6435 sudo_if pkill dockerd
6436 sudo_if pkill containerd
6437 set -e
6438 fi
6439
6440 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6441 done
6442
6443 # Execute whatever commands were passed in (if any). This allows us
6444 # to set this script to ENTRYPOINT while still executing the default CMD.
6445 exec "$@"
6446 EOF
6447
6448 chmod +x /usr/local/share/docker-init.sh
6449 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6450
6451 # Clean up
6452 rm -rf /var/lib/apt/lists/*
6453
6454 echo 'docker-in-docker-debian script has completed!'"#),
6455 ]).await;
6456
6457 return Ok(http::Response::builder()
6458 .status(200)
6459 .body(AsyncBody::from(response))
6460 .unwrap());
6461 }
6462 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6463 let response = r#"
6464 {
6465 "schemaVersion": 2,
6466 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6467 "config": {
6468 "mediaType": "application/vnd.devcontainers",
6469 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6470 "size": 2
6471 },
6472 "layers": [
6473 {
6474 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6475 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6476 "size": 20992,
6477 "annotations": {
6478 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6479 }
6480 }
6481 ],
6482 "annotations": {
6483 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6484 "com.github.package.type": "devcontainer_feature"
6485 }
6486 }
6487 "#;
6488
6489 return Ok(http::Response::builder()
6490 .status(200)
6491 .body(http_client::AsyncBody::from(response))
6492 .unwrap());
6493 }
6494 if parts.uri.path()
6495 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6496 {
6497 let response = build_tarball(vec![
6498 ("./devcontainer-feature.json", r#"
6499 {
6500 "id": "go",
6501 "version": "1.3.3",
6502 "name": "Go",
6503 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6504 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6505 "options": {
6506 "version": {
6507 "type": "string",
6508 "proposals": [
6509 "latest",
6510 "none",
6511 "1.24",
6512 "1.23"
6513 ],
6514 "default": "latest",
6515 "description": "Select or enter a Go version to install"
6516 },
6517 "golangciLintVersion": {
6518 "type": "string",
6519 "default": "latest",
6520 "description": "Version of golangci-lint to install"
6521 }
6522 },
6523 "init": true,
6524 "customizations": {
6525 "vscode": {
6526 "extensions": [
6527 "golang.Go"
6528 ],
6529 "settings": {
6530 "github.copilot.chat.codeGeneration.instructions": [
6531 {
6532 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6533 }
6534 ]
6535 }
6536 }
6537 },
6538 "containerEnv": {
6539 "GOROOT": "/usr/local/go",
6540 "GOPATH": "/go",
6541 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6542 },
6543 "capAdd": [
6544 "SYS_PTRACE"
6545 ],
6546 "securityOpt": [
6547 "seccomp=unconfined"
6548 ],
6549 "installsAfter": [
6550 "ghcr.io/devcontainers/features/common-utils"
6551 ]
6552 }
6553 "#),
6554 ("./install.sh", r#"
6555 #!/usr/bin/env bash
6556 #-------------------------------------------------------------------------------------------------------------
6557 # Copyright (c) Microsoft Corporation. All rights reserved.
6558 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6559 #-------------------------------------------------------------------------------------------------------------
6560 #
6561 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6562 # Maintainer: The VS Code and Codespaces Teams
6563
6564 TARGET_GO_VERSION="${VERSION:-"latest"}"
6565 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6566
6567 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6568 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6569 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6570 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6571
6572 # https://www.google.com/linuxrepositories/
6573 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6574
6575 set -e
6576
6577 if [ "$(id -u)" -ne 0 ]; then
6578 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6579 exit 1
6580 fi
6581
6582 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6583 . /etc/os-release
6584 # Get an adjusted ID independent of distro variants
6585 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6586 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6587 ADJUSTED_ID="debian"
6588 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6589 ADJUSTED_ID="rhel"
6590 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6591 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6592 else
6593 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6594 fi
6595 else
6596 echo "Linux distro ${ID} not supported."
6597 exit 1
6598 fi
6599
6600 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6601 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6602 # Update the repo files to reference vault.centos.org.
6603 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6604 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6605 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6606 fi
6607
6608 # Setup INSTALL_CMD & PKG_MGR_CMD
6609 if type apt-get > /dev/null 2>&1; then
6610 PKG_MGR_CMD=apt-get
6611 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6612 elif type microdnf > /dev/null 2>&1; then
6613 PKG_MGR_CMD=microdnf
6614 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6615 elif type dnf > /dev/null 2>&1; then
6616 PKG_MGR_CMD=dnf
6617 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6618 else
6619 PKG_MGR_CMD=yum
6620 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6621 fi
6622
6623 # Clean up
6624 clean_up() {
6625 case ${ADJUSTED_ID} in
6626 debian)
6627 rm -rf /var/lib/apt/lists/*
6628 ;;
6629 rhel)
6630 rm -rf /var/cache/dnf/* /var/cache/yum/*
6631 rm -rf /tmp/yum.log
6632 rm -rf ${GPG_INSTALL_PATH}
6633 ;;
6634 esac
6635 }
6636 clean_up
6637
6638
6639 # Figure out correct version of a three part version number is not passed
6640 find_version_from_git_tags() {
6641 local variable_name=$1
6642 local requested_version=${!variable_name}
6643 if [ "${requested_version}" = "none" ]; then return; fi
6644 local repository=$2
6645 local prefix=${3:-"tags/v"}
6646 local separator=${4:-"."}
6647 local last_part_optional=${5:-"false"}
6648 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6649 local escaped_separator=${separator//./\\.}
6650 local last_part
6651 if [ "${last_part_optional}" = "true" ]; then
6652 last_part="(${escaped_separator}[0-9]+)?"
6653 else
6654 last_part="${escaped_separator}[0-9]+"
6655 fi
6656 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6657 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6658 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6659 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6660 else
6661 set +e
6662 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6663 set -e
6664 fi
6665 fi
6666 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6667 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6668 exit 1
6669 fi
6670 echo "${variable_name}=${!variable_name}"
6671 }
6672
6673 pkg_mgr_update() {
6674 case $ADJUSTED_ID in
6675 debian)
6676 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6677 echo "Running apt-get update..."
6678 ${PKG_MGR_CMD} update -y
6679 fi
6680 ;;
6681 rhel)
6682 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6683 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6684 echo "Running ${PKG_MGR_CMD} makecache ..."
6685 ${PKG_MGR_CMD} makecache
6686 fi
6687 else
6688 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6689 echo "Running ${PKG_MGR_CMD} check-update ..."
6690 set +e
6691 ${PKG_MGR_CMD} check-update
6692 rc=$?
6693 if [ $rc != 0 ] && [ $rc != 100 ]; then
6694 exit 1
6695 fi
6696 set -e
6697 fi
6698 fi
6699 ;;
6700 esac
6701 }
6702
6703 # Checks if packages are installed and installs them if not
6704 check_packages() {
6705 case ${ADJUSTED_ID} in
6706 debian)
6707 if ! dpkg -s "$@" > /dev/null 2>&1; then
6708 pkg_mgr_update
6709 ${INSTALL_CMD} "$@"
6710 fi
6711 ;;
6712 rhel)
6713 if ! rpm -q "$@" > /dev/null 2>&1; then
6714 pkg_mgr_update
6715 ${INSTALL_CMD} "$@"
6716 fi
6717 ;;
6718 esac
6719 }
6720
6721 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6722 rm -f /etc/profile.d/00-restore-env.sh
6723 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6724 chmod +x /etc/profile.d/00-restore-env.sh
6725
6726 # Some distributions do not install awk by default (e.g. Mariner)
6727 if ! type awk >/dev/null 2>&1; then
6728 check_packages awk
6729 fi
6730
6731 # Determine the appropriate non-root user
6732 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6733 USERNAME=""
6734 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6735 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6736 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6737 USERNAME=${CURRENT_USER}
6738 break
6739 fi
6740 done
6741 if [ "${USERNAME}" = "" ]; then
6742 USERNAME=root
6743 fi
6744 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6745 USERNAME=root
6746 fi
6747
6748 export DEBIAN_FRONTEND=noninteractive
6749
6750 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6751
6752 if [ $ADJUSTED_ID = "debian" ]; then
6753 check_packages g++ libc6-dev
6754 else
6755 check_packages gcc-c++ glibc-devel
6756 fi
6757 # Install curl, git, other dependencies if missing
6758 if ! type curl > /dev/null 2>&1; then
6759 check_packages curl
6760 fi
6761 if ! type git > /dev/null 2>&1; then
6762 check_packages git
6763 fi
6764 # Some systems, e.g. Mariner, still a few more packages
6765 if ! type as > /dev/null 2>&1; then
6766 check_packages binutils
6767 fi
6768 if ! [ -f /usr/include/linux/errno.h ]; then
6769 check_packages kernel-headers
6770 fi
6771 # Minimal RHEL install may need findutils installed
6772 if ! [ -f /usr/bin/find ]; then
6773 check_packages findutils
6774 fi
6775
6776 # Get closest match for version number specified
6777 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6778
6779 architecture="$(uname -m)"
6780 case $architecture in
6781 x86_64) architecture="amd64";;
6782 aarch64 | armv8*) architecture="arm64";;
6783 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6784 i?86) architecture="386";;
6785 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6786 esac
6787
6788 # Install Go
6789 umask 0002
6790 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6791 groupadd -r golang
6792 fi
6793 usermod -a -G golang "${USERNAME}"
6794 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6795
6796 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6797 # Use a temporary location for gpg keys to avoid polluting image
6798 export GNUPGHOME="/tmp/tmp-gnupg"
6799 mkdir -p ${GNUPGHOME}
6800 chmod 700 ${GNUPGHOME}
6801 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6802 gpg -q --import /tmp/tmp-gnupg/golang_key
6803 echo "Downloading Go ${TARGET_GO_VERSION}..."
6804 set +e
6805 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6806 exit_code=$?
6807 set -e
6808 if [ "$exit_code" != "0" ]; then
6809 echo "(!) Download failed."
6810 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6811 set +e
6812 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6813 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6814 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6815 # Handle Go's odd version pattern where "0" releases omit the last part
6816 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6817 ((minor=minor-1))
6818 TARGET_GO_VERSION="${major}.${minor}"
6819 # Look for latest version from previous minor release
6820 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6821 else
6822 ((breakfix=breakfix-1))
6823 if [ "${breakfix}" = "0" ]; then
6824 TARGET_GO_VERSION="${major}.${minor}"
6825 else
6826 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6827 fi
6828 fi
6829 set -e
6830 echo "Trying ${TARGET_GO_VERSION}..."
6831 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6832 fi
6833 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6834 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6835 echo "Extracting Go ${TARGET_GO_VERSION}..."
6836 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6837 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6838 else
6839 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6840 fi
6841
6842 # Install Go tools that are isImportant && !replacedByGopls based on
6843 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6844 GO_TOOLS="\
6845 golang.org/x/tools/gopls@latest \
6846 honnef.co/go/tools/cmd/staticcheck@latest \
6847 golang.org/x/lint/golint@latest \
6848 github.com/mgechev/revive@latest \
6849 github.com/go-delve/delve/cmd/dlv@latest \
6850 github.com/fatih/gomodifytags@latest \
6851 github.com/haya14busa/goplay/cmd/goplay@latest \
6852 github.com/cweill/gotests/gotests@latest \
6853 github.com/josharian/impl@latest"
6854
6855 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6856 echo "Installing common Go tools..."
6857 export PATH=${TARGET_GOROOT}/bin:${PATH}
6858 export GOPATH=/tmp/gotools
6859 export GOCACHE="${GOPATH}/cache"
6860
6861 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6862 cd "${GOPATH}"
6863
6864 # Use go get for versions of go under 1.16
6865 go_install_command=install
6866 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6867 export GO111MODULE=on
6868 go_install_command=get
6869 echo "Go version < 1.16, using go get."
6870 fi
6871
6872 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6873
6874 # Move Go tools into path
6875 if [ -d "${GOPATH}/bin" ]; then
6876 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6877 fi
6878
6879 # Install golangci-lint from precompiled binaries
6880 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6881 echo "Installing golangci-lint latest..."
6882 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6883 sh -s -- -b "${TARGET_GOPATH}/bin"
6884 else
6885 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6886 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6887 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6888 fi
6889
6890 # Remove Go tools temp directory
6891 rm -rf "${GOPATH}"
6892 fi
6893
6894
6895 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6896 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6897 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6898 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6899
6900 # Clean up
6901 clean_up
6902
6903 echo "Done!"
6904 "#),
6905 ])
6906 .await;
6907 return Ok(http::Response::builder()
6908 .status(200)
6909 .body(AsyncBody::from(response))
6910 .unwrap());
6911 }
6912 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6913 let response = r#"
6914 {
6915 "schemaVersion": 2,
6916 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6917 "config": {
6918 "mediaType": "application/vnd.devcontainers",
6919 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6920 "size": 2
6921 },
6922 "layers": [
6923 {
6924 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6925 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6926 "size": 19968,
6927 "annotations": {
6928 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6929 }
6930 }
6931 ],
6932 "annotations": {
6933 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6934 "com.github.package.type": "devcontainer_feature"
6935 }
6936 }"#;
6937 return Ok(http::Response::builder()
6938 .status(200)
6939 .body(AsyncBody::from(response))
6940 .unwrap());
6941 }
6942 if parts.uri.path()
6943 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6944 {
6945 let response = build_tarball(vec![
6946 (
6947 "./devcontainer-feature.json",
6948 r#"
6949{
6950 "id": "aws-cli",
6951 "version": "1.1.3",
6952 "name": "AWS CLI",
6953 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6954 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6955 "options": {
6956 "version": {
6957 "type": "string",
6958 "proposals": [
6959 "latest"
6960 ],
6961 "default": "latest",
6962 "description": "Select or enter an AWS CLI version."
6963 },
6964 "verbose": {
6965 "type": "boolean",
6966 "default": true,
6967 "description": "Suppress verbose output."
6968 }
6969 },
6970 "customizations": {
6971 "vscode": {
6972 "extensions": [
6973 "AmazonWebServices.aws-toolkit-vscode"
6974 ],
6975 "settings": {
6976 "github.copilot.chat.codeGeneration.instructions": [
6977 {
6978 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6979 }
6980 ]
6981 }
6982 }
6983 },
6984 "installsAfter": [
6985 "ghcr.io/devcontainers/features/common-utils"
6986 ]
6987}
6988 "#,
6989 ),
6990 (
6991 "./install.sh",
6992 r#"#!/usr/bin/env bash
6993 #-------------------------------------------------------------------------------------------------------------
6994 # Copyright (c) Microsoft Corporation. All rights reserved.
6995 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6996 #-------------------------------------------------------------------------------------------------------------
6997 #
6998 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6999 # Maintainer: The VS Code and Codespaces Teams
7000
7001 set -e
7002
7003 # Clean up
7004 rm -rf /var/lib/apt/lists/*
7005
7006 VERSION=${VERSION:-"latest"}
7007 VERBOSE=${VERBOSE:-"true"}
7008
7009 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7010 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7011
7012 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7013 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7014 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7015 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7016 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7017 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7018 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7019 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7020 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7021 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7022 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7023 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7024 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7025 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7026 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7027 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7028 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7029 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7030 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7031 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7032 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7033 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7034 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7035 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7036 YLZATHZKTJyiqA==
7037 =vYOk
7038 -----END PGP PUBLIC KEY BLOCK-----"
7039
7040 if [ "$(id -u)" -ne 0 ]; then
7041 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7042 exit 1
7043 fi
7044
7045 apt_get_update()
7046 {
7047 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7048 echo "Running apt-get update..."
7049 apt-get update -y
7050 fi
7051 }
7052
7053 # Checks if packages are installed and installs them if not
7054 check_packages() {
7055 if ! dpkg -s "$@" > /dev/null 2>&1; then
7056 apt_get_update
7057 apt-get -y install --no-install-recommends "$@"
7058 fi
7059 }
7060
7061 export DEBIAN_FRONTEND=noninteractive
7062
7063 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7064
7065 verify_aws_cli_gpg_signature() {
7066 local filePath=$1
7067 local sigFilePath=$2
7068 local awsGpgKeyring=aws-cli-public-key.gpg
7069
7070 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7071 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7072 local status=$?
7073
7074 rm "./${awsGpgKeyring}"
7075
7076 return ${status}
7077 }
7078
7079 install() {
7080 local scriptZipFile=awscli.zip
7081 local scriptSigFile=awscli.sig
7082
7083 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7084 if [ "${VERSION}" != "latest" ]; then
7085 local versionStr=-${VERSION}
7086 fi
7087 architecture=$(dpkg --print-architecture)
7088 case "${architecture}" in
7089 amd64) architectureStr=x86_64 ;;
7090 arm64) architectureStr=aarch64 ;;
7091 *)
7092 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7093 exit 1
7094 esac
7095 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7096 curl "${scriptUrl}" -o "${scriptZipFile}"
7097 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7098
7099 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7100 if (( $? > 0 )); then
7101 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7102 exit 1
7103 fi
7104
7105 if [ "${VERBOSE}" = "false" ]; then
7106 unzip -q "${scriptZipFile}"
7107 else
7108 unzip "${scriptZipFile}"
7109 fi
7110
7111 ./aws/install
7112
7113 # kubectl bash completion
7114 mkdir -p /etc/bash_completion.d
7115 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7116
7117 # kubectl zsh completion
7118 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7119 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7120 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7121 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7122 fi
7123
7124 rm -rf ./aws
7125 }
7126
7127 echo "(*) Installing AWS CLI..."
7128
7129 install
7130
7131 # Clean up
7132 rm -rf /var/lib/apt/lists/*
7133
7134 echo "Done!""#,
7135 ),
7136 ("./scripts/", r#""#),
7137 (
7138 "./scripts/fetch-latest-completer-scripts.sh",
7139 r#"
7140 #!/bin/bash
7141 #-------------------------------------------------------------------------------------------------------------
7142 # Copyright (c) Microsoft Corporation. All rights reserved.
7143 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7144 #-------------------------------------------------------------------------------------------------------------
7145 #
7146 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7147 # Maintainer: The Dev Container spec maintainers
7148 #
7149 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7150 #
7151 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7152 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7153 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7154
7155 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7156 chmod +x "$BASH_COMPLETER_SCRIPT"
7157
7158 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7159 chmod +x "$ZSH_COMPLETER_SCRIPT"
7160 "#,
7161 ),
7162 ("./scripts/vendor/", r#""#),
7163 (
7164 "./scripts/vendor/aws_bash_completer",
7165 r#"
7166 # Typically that would be added under one of the following paths:
7167 # - /etc/bash_completion.d
7168 # - /usr/local/etc/bash_completion.d
7169 # - /usr/share/bash-completion/completions
7170
7171 complete -C aws_completer aws
7172 "#,
7173 ),
7174 (
7175 "./scripts/vendor/aws_zsh_completer.sh",
7176 r#"
7177 # Source this file to activate auto completion for zsh using the bash
7178 # compatibility helper. Make sure to run `compinit` before, which should be
7179 # given usually.
7180 #
7181 # % source /path/to/zsh_complete.sh
7182 #
7183 # Typically that would be called somewhere in your .zshrc.
7184 #
7185 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7186 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7187 #
7188 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7189 #
7190 # zsh releases prior to that version do not export the required env variables!
7191
7192 autoload -Uz bashcompinit
7193 bashcompinit -i
7194
7195 _bash_complete() {
7196 local ret=1
7197 local -a suf matches
7198 local -x COMP_POINT COMP_CWORD
7199 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7200 local -x COMP_LINE="$words"
7201 local -A savejobstates savejobtexts
7202
7203 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7204 (( COMP_CWORD = CURRENT - 1))
7205 COMP_WORDS=( $words )
7206 BASH_VERSINFO=( 2 05b 0 1 release )
7207
7208 savejobstates=( ${(kv)jobstates} )
7209 savejobtexts=( ${(kv)jobtexts} )
7210
7211 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7212
7213 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7214
7215 if [[ -n $matches ]]; then
7216 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7217 compset -P '*/' && matches=( ${matches##*/} )
7218 compset -S '/*' && matches=( ${matches%%/*} )
7219 compadd -Q -f "${suf[@]}" -a matches && ret=0
7220 else
7221 compadd -Q "${suf[@]}" -a matches && ret=0
7222 fi
7223 fi
7224
7225 if (( ret )); then
7226 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7227 _default "${suf[@]}" && ret=0
7228 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7229 _directories "${suf[@]}" && ret=0
7230 fi
7231 fi
7232
7233 return ret
7234 }
7235
7236 complete -C aws_completer aws
7237 "#,
7238 ),
7239 ]).await;
7240
7241 return Ok(http::Response::builder()
7242 .status(200)
7243 .body(AsyncBody::from(response))
7244 .unwrap());
7245 }
7246
7247 Ok(http::Response::builder()
7248 .status(404)
7249 .body(http_client::AsyncBody::default())
7250 .unwrap())
7251 })
7252 }
7253}