1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use regex::Regex;
10
11use fs::Fs;
12use http_client::HttpClient;
13use util::{ResultExt, command::Command, normalize_path};
14
15use crate::{
16 DevContainerConfig, DevContainerContext,
17 command_json::{CommandRunner, DefaultCommandRunner},
18 devcontainer_api::{DevContainerError, DevContainerUp},
19 devcontainer_json::{
20 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
21 deserialize_devcontainer_json,
22 },
23 docker::{
24 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
25 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
26 },
27 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
28 get_oci_token,
29 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
30 safe_id_lower,
31};
32
33enum ConfigStatus {
34 Deserialized(DevContainer),
35 VariableParsed(DevContainer),
36}
37
38#[derive(Debug, Clone, Eq, PartialEq, Default)]
39pub(crate) struct DockerComposeResources {
40 files: Vec<PathBuf>,
41 config: DockerComposeConfig,
42}
43
44struct DevContainerManifest {
45 http_client: Arc<dyn HttpClient>,
46 fs: Arc<dyn Fs>,
47 docker_client: Arc<dyn DockerClient>,
48 command_runner: Arc<dyn CommandRunner>,
49 raw_config: String,
50 config: ConfigStatus,
51 local_environment: HashMap<String, String>,
52 local_project_directory: PathBuf,
53 config_directory: PathBuf,
54 file_name: String,
55 root_image: Option<DockerInspect>,
56 features_build_info: Option<FeaturesBuildInfo>,
57 features: Vec<FeatureManifest>,
58}
59const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
60impl DevContainerManifest {
61 async fn new(
62 context: &DevContainerContext,
63 environment: HashMap<String, String>,
64 docker_client: Arc<dyn DockerClient>,
65 command_runner: Arc<dyn CommandRunner>,
66 local_config: DevContainerConfig,
67 local_project_path: &Path,
68 ) -> Result<Self, DevContainerError> {
69 let config_path = local_project_path.join(local_config.config_path.clone());
70 log::debug!("parsing devcontainer json found in {:?}", &config_path);
71 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
72 log::error!("Unable to read devcontainer contents: {e}");
73 DevContainerError::DevContainerParseFailed
74 })?;
75
76 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
77
78 let devcontainer_directory = config_path.parent().ok_or_else(|| {
79 log::error!("Dev container file should be in a directory");
80 DevContainerError::NotInValidProject
81 })?;
82 let file_name = config_path
83 .file_name()
84 .and_then(|f| f.to_str())
85 .ok_or_else(|| {
86 log::error!("Dev container file has no file name, or is invalid unicode");
87 DevContainerError::DevContainerParseFailed
88 })?;
89
90 Ok(Self {
91 fs: context.fs.clone(),
92 http_client: context.http_client.clone(),
93 docker_client,
94 command_runner,
95 raw_config: devcontainer_contents,
96 config: ConfigStatus::Deserialized(devcontainer),
97 local_project_directory: local_project_path.to_path_buf(),
98 local_environment: environment,
99 config_directory: devcontainer_directory.to_path_buf(),
100 file_name: file_name.to_string(),
101 root_image: None,
102 features_build_info: None,
103 features: Vec::new(),
104 })
105 }
106
107 fn devcontainer_id(&self) -> String {
108 let mut labels = self.identifying_labels();
109 labels.sort_by_key(|(key, _)| *key);
110
111 let mut hasher = DefaultHasher::new();
112 for (key, value) in &labels {
113 key.hash(&mut hasher);
114 value.hash(&mut hasher);
115 }
116
117 format!("{:016x}", hasher.finish())
118 }
119
120 fn identifying_labels(&self) -> Vec<(&str, String)> {
121 let labels = vec![
122 (
123 "devcontainer.local_folder",
124 (self.local_project_directory.display()).to_string(),
125 ),
126 (
127 "devcontainer.config_file",
128 (self.config_file().display()).to_string(),
129 ),
130 ];
131 labels
132 }
133
134 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
135 let mut replaced_content = content
136 .replace("${devcontainerId}", &self.devcontainer_id())
137 .replace(
138 "${containerWorkspaceFolderBasename}",
139 &self.remote_workspace_base_name().unwrap_or_default(),
140 )
141 .replace(
142 "${localWorkspaceFolderBasename}",
143 &self.local_workspace_base_name()?,
144 )
145 .replace(
146 "${containerWorkspaceFolder}",
147 &self
148 .remote_workspace_folder()
149 .map(|path| path.display().to_string())
150 .unwrap_or_default()
151 .replace('\\', "/"),
152 )
153 .replace(
154 "${localWorkspaceFolder}",
155 &self.local_workspace_folder().replace('\\', "/"),
156 );
157 for (k, v) in &self.local_environment {
158 let find = format!("${{localEnv:{k}}}");
159 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
160 }
161
162 Ok(replaced_content)
163 }
164
165 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
166 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
167 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
168
169 self.config = ConfigStatus::VariableParsed(parsed_config);
170
171 Ok(())
172 }
173
174 fn runtime_remote_env(
175 &self,
176 container_env: &HashMap<String, String>,
177 ) -> Result<HashMap<String, String>, DevContainerError> {
178 let mut merged_remote_env = container_env.clone();
179 // HOME is user-specific, and we will often not run as the image user
180 merged_remote_env.remove("HOME");
181 if let Some(remote_env) = self.dev_container().remote_env.clone() {
182 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
183 log::error!(
184 "Unexpected error serializing dev container remote_env: {e} - {:?}",
185 remote_env
186 );
187 DevContainerError::DevContainerParseFailed
188 })?;
189 for (k, v) in container_env {
190 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
191 }
192 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
193 .map_err(|e| {
194 log::error!(
195 "Unexpected error reserializing dev container remote env: {e} - {:?}",
196 &raw
197 );
198 DevContainerError::DevContainerParseFailed
199 })?;
200 for (k, v) in reserialized {
201 merged_remote_env.insert(k, v);
202 }
203 }
204 Ok(merged_remote_env)
205 }
206
207 fn config_file(&self) -> PathBuf {
208 self.config_directory.join(&self.file_name)
209 }
210
211 fn dev_container(&self) -> &DevContainer {
212 match &self.config {
213 ConfigStatus::Deserialized(dev_container) => dev_container,
214 ConfigStatus::VariableParsed(dev_container) => dev_container,
215 }
216 }
217
218 async fn dockerfile_location(&self) -> Option<PathBuf> {
219 let dev_container = self.dev_container();
220 match dev_container.build_type() {
221 DevContainerBuildType::Image(_) => None,
222 DevContainerBuildType::Dockerfile(build) => {
223 Some(self.config_directory.join(&build.dockerfile))
224 }
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service.build.and_then(|b| {
234 let compose_file = docker_compose_manifest.files.first()?;
235 resolve_compose_dockerfile(
236 compose_file,
237 b.context.as_deref(),
238 b.dockerfile.as_deref()?,
239 )
240 })
241 }
242 DevContainerBuildType::None => None,
243 }
244 }
245
246 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
247 let mut hasher = DefaultHasher::new();
248 let prefix = match &self.dev_container().name {
249 Some(name) => &safe_id_lower(name),
250 None => "zed-dc",
251 };
252 let prefix = prefix.get(..6).unwrap_or(prefix);
253
254 dockerfile_build_path.hash(&mut hasher);
255
256 let hash = hasher.finish();
257 format!("{}-{:x}-features", prefix, hash)
258 }
259
260 /// Gets the base image from the devcontainer with the following precedence:
261 /// - The devcontainer image if an image is specified
262 /// - The image sourced in the Dockerfile if a Dockerfile is specified
263 /// - The image sourced in the docker-compose main service, if one is specified
264 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
265 /// If no such image is available, return an error
266 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
267 match self.dev_container().build_type() {
268 DevContainerBuildType::Image(image) => {
269 return Ok(image);
270 }
271 DevContainerBuildType::Dockerfile(build) => {
272 let dockerfile_contents = self.expanded_dockerfile_content().await?;
273 return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
274 || {
275 log::error!("Unable to find base image in Dockerfile");
276 DevContainerError::DevContainerParseFailed
277 },
278 );
279 }
280 DevContainerBuildType::DockerCompose => {
281 let docker_compose_manifest = self.docker_compose_manifest().await?;
282 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
283
284 if let Some(_) = main_service
285 .build
286 .as_ref()
287 .and_then(|b| b.dockerfile.as_ref())
288 {
289 let dockerfile_contents = self.expanded_dockerfile_content().await?;
290 return image_from_dockerfile(
291 dockerfile_contents,
292 &main_service.build.as_ref().and_then(|b| b.target.clone()),
293 )
294 .ok_or_else(|| {
295 log::error!("Unable to find base image in Dockerfile");
296 DevContainerError::DevContainerParseFailed
297 });
298 }
299 if let Some(image) = &main_service.image {
300 return Ok(image.to_string());
301 }
302
303 log::error!("No valid base image found in docker-compose configuration");
304 return Err(DevContainerError::DevContainerParseFailed);
305 }
306 DevContainerBuildType::None => {
307 log::error!("Not a valid devcontainer config for build");
308 return Err(DevContainerError::NotInValidProject);
309 }
310 }
311 }
312
313 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
314 let dev_container = match &self.config {
315 ConfigStatus::Deserialized(_) => {
316 log::error!(
317 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
318 );
319 return Err(DevContainerError::DevContainerParseFailed);
320 }
321 ConfigStatus::VariableParsed(dev_container) => dev_container,
322 };
323 let root_image_tag = self.get_base_image_from_config().await?;
324 let root_image = self.docker_client.inspect(&root_image_tag).await?;
325
326 let temp_base = std::env::temp_dir().join("devcontainer-zed");
327 let timestamp = std::time::SystemTime::now()
328 .duration_since(std::time::UNIX_EPOCH)
329 .map(|d| d.as_millis())
330 .unwrap_or(0);
331
332 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
333 let empty_context_dir = temp_base.join("empty-folder");
334
335 self.fs
336 .create_dir(&features_content_dir)
337 .await
338 .map_err(|e| {
339 log::error!("Failed to create features content dir: {e}");
340 DevContainerError::FilesystemError
341 })?;
342
343 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
344 log::error!("Failed to create empty context dir: {e}");
345 DevContainerError::FilesystemError
346 })?;
347
348 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
349 let image_tag =
350 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
351
352 let build_info = FeaturesBuildInfo {
353 dockerfile_path,
354 features_content_dir,
355 empty_context_dir,
356 build_image: dev_container.image.clone(),
357 image_tag,
358 };
359
360 let features = match &dev_container.features {
361 Some(features) => features,
362 None => &HashMap::new(),
363 };
364
365 let container_user = get_container_user_from_config(&root_image, self)?;
366 let remote_user = get_remote_user_from_config(&root_image, self)?;
367
368 let builtin_env_content = format!(
369 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
370 container_user, remote_user
371 );
372
373 let builtin_env_path = build_info
374 .features_content_dir
375 .join("devcontainer-features.builtin.env");
376
377 self.fs
378 .write(&builtin_env_path, &builtin_env_content.as_bytes())
379 .await
380 .map_err(|e| {
381 log::error!("Failed to write builtin env file: {e}");
382 DevContainerError::FilesystemError
383 })?;
384
385 let ordered_features =
386 resolve_feature_order(features, &dev_container.override_feature_install_order);
387
388 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
389 if matches!(options, FeatureOptions::Bool(false)) {
390 log::debug!(
391 "Feature '{}' is disabled (set to false), skipping",
392 feature_ref
393 );
394 continue;
395 }
396
397 let feature_id = extract_feature_id(feature_ref);
398 let consecutive_id = format!("{}_{}", feature_id, index);
399 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
400
401 self.fs.create_dir(&feature_dir).await.map_err(|e| {
402 log::error!(
403 "Failed to create feature directory for {}: {e}",
404 feature_ref
405 );
406 DevContainerError::FilesystemError
407 })?;
408
409 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
410 log::error!(
411 "Feature '{}' is not a supported OCI feature reference",
412 feature_ref
413 );
414 DevContainerError::DevContainerParseFailed
415 })?;
416 let TokenResponse { token } =
417 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
418 .await
419 .map_err(|e| {
420 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
421 DevContainerError::ResourceFetchFailed
422 })?;
423 let manifest = get_oci_manifest(
424 &oci_ref.registry,
425 &oci_ref.path,
426 &token,
427 &self.http_client,
428 &oci_ref.version,
429 None,
430 )
431 .await
432 .map_err(|e| {
433 log::error!(
434 "Failed to fetch OCI manifest for feature '{}': {e}",
435 feature_ref
436 );
437 DevContainerError::ResourceFetchFailed
438 })?;
439 let digest = &manifest
440 .layers
441 .first()
442 .ok_or_else(|| {
443 log::error!(
444 "OCI manifest for feature '{}' contains no layers",
445 feature_ref
446 );
447 DevContainerError::ResourceFetchFailed
448 })?
449 .digest;
450 download_oci_tarball(
451 &token,
452 &oci_ref.registry,
453 &oci_ref.path,
454 digest,
455 "application/vnd.devcontainers.layer.v1+tar",
456 &feature_dir,
457 &self.http_client,
458 &self.fs,
459 None,
460 )
461 .await?;
462
463 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
464 if !self.fs.is_file(feature_json_path).await {
465 let message = format!(
466 "No devcontainer-feature.json found in {:?}, no defaults to apply",
467 feature_json_path
468 );
469 log::error!("{}", &message);
470 return Err(DevContainerError::ResourceFetchFailed);
471 }
472
473 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
474 log::error!("error reading devcontainer-feature.json: {:?}", e);
475 DevContainerError::FilesystemError
476 })?;
477
478 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
479
480 let feature_json: DevContainerFeatureJson =
481 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
482 log::error!("Failed to parse devcontainer-feature.json: {e}");
483 DevContainerError::ResourceFetchFailed
484 })?;
485
486 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
487
488 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
489
490 let env_content = feature_manifest
491 .write_feature_env(&self.fs, options)
492 .await?;
493
494 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
495
496 self.fs
497 .write(
498 &feature_manifest
499 .file_path()
500 .join("devcontainer-features-install.sh"),
501 &wrapper_content.as_bytes(),
502 )
503 .await
504 .map_err(|e| {
505 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
506 DevContainerError::FilesystemError
507 })?;
508
509 self.features.push(feature_manifest);
510 }
511
512 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
513
514 let is_compose = match dev_container.build_type() {
515 DevContainerBuildType::DockerCompose => true,
516 _ => false,
517 };
518 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
519
520 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
521 self.fs.load(location).await.log_err()
522 } else {
523 None
524 };
525
526 let build_target = if is_compose {
527 find_primary_service(&self.docker_compose_manifest().await?, self)?
528 .1
529 .build
530 .and_then(|b| b.target)
531 } else {
532 dev_container.build.as_ref().and_then(|b| b.target.clone())
533 };
534
535 let dockerfile_content = dockerfile_base_content
536 .map(|content| {
537 dockerfile_inject_alias(
538 &content,
539 "dev_container_auto_added_stage_label",
540 build_target,
541 )
542 })
543 .unwrap_or_default();
544
545 let dockerfile_content = self.generate_dockerfile_extended(
546 &container_user,
547 &remote_user,
548 dockerfile_content,
549 use_buildkit,
550 );
551
552 self.fs
553 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
554 .await
555 .map_err(|e| {
556 log::error!("Failed to write Dockerfile.extended: {e}");
557 DevContainerError::FilesystemError
558 })?;
559
560 log::debug!(
561 "Features build resources written to {:?}",
562 build_info.features_content_dir
563 );
564
565 self.root_image = Some(root_image);
566 self.features_build_info = Some(build_info);
567
568 Ok(())
569 }
570
571 fn generate_dockerfile_extended(
572 &self,
573 container_user: &str,
574 remote_user: &str,
575 dockerfile_content: String,
576 use_buildkit: bool,
577 ) -> String {
578 #[cfg(not(target_os = "windows"))]
579 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
580 #[cfg(target_os = "windows")]
581 let update_remote_user_uid = false;
582 let feature_layers: String = self
583 .features
584 .iter()
585 .map(|manifest| {
586 manifest.generate_dockerfile_feature_layer(
587 use_buildkit,
588 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
589 )
590 })
591 .collect();
592
593 let container_home_cmd = get_ent_passwd_shell_command(container_user);
594 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
595
596 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
597
598 let feature_content_source_stage = if use_buildkit {
599 "".to_string()
600 } else {
601 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
602 .to_string()
603 };
604
605 let builtin_env_source_path = if use_buildkit {
606 "./devcontainer-features.builtin.env"
607 } else {
608 "/tmp/build-features/devcontainer-features.builtin.env"
609 };
610
611 let mut extended_dockerfile = format!(
612 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
613
614{dockerfile_content}
615{feature_content_source_stage}
616FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
617USER root
618COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
619RUN chmod -R 0755 /tmp/build-features/
620
621FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
622
623USER root
624
625RUN mkdir -p {dest}
626COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
627
628RUN \
629echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
630echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
631
632{feature_layers}
633
634ARG _DEV_CONTAINERS_IMAGE_USER=root
635USER $_DEV_CONTAINERS_IMAGE_USER
636"#
637 );
638
639 // If we're not adding a uid update layer, then we should add env vars to this layer instead
640 if !update_remote_user_uid {
641 extended_dockerfile = format!(
642 r#"{extended_dockerfile}
643# Ensure that /etc/profile does not clobber the existing path
644RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
645"#
646 );
647
648 for feature in &self.features {
649 let container_env_layer = feature.generate_dockerfile_env();
650 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
651 }
652
653 if let Some(env) = &self.dev_container().container_env {
654 for (key, value) in env {
655 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
656 }
657 }
658 }
659
660 extended_dockerfile
661 }
662
663 fn build_merged_resources(
664 &self,
665 base_image: DockerInspect,
666 ) -> Result<DockerBuildResources, DevContainerError> {
667 let dev_container = match &self.config {
668 ConfigStatus::Deserialized(_) => {
669 log::error!(
670 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
671 );
672 return Err(DevContainerError::DevContainerParseFailed);
673 }
674 ConfigStatus::VariableParsed(dev_container) => dev_container,
675 };
676 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
677
678 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
679
680 mounts.append(&mut feature_mounts);
681
682 let privileged = dev_container.privileged.unwrap_or(false)
683 || self.features.iter().any(|f| f.privileged());
684
685 let mut entrypoint_script_lines = vec![
686 "echo Container started".to_string(),
687 "trap \"exit 0\" 15".to_string(),
688 ];
689
690 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
691 entrypoint_script_lines.push(entrypoint.clone());
692 }
693 entrypoint_script_lines.append(&mut vec![
694 "exec \"$@\"".to_string(),
695 "while sleep 1 & wait $!; do :; done".to_string(),
696 ]);
697
698 Ok(DockerBuildResources {
699 image: base_image,
700 additional_mounts: mounts,
701 privileged,
702 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
703 })
704 }
705
706 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
707 if let ConfigStatus::Deserialized(_) = &self.config {
708 log::error!(
709 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
710 );
711 return Err(DevContainerError::DevContainerParseFailed);
712 }
713 let dev_container = self.dev_container();
714 match dev_container.build_type() {
715 DevContainerBuildType::Image(base_image) => {
716 let built_docker_image = self.build_docker_image().await?;
717
718 let built_docker_image = self
719 .update_remote_user_uid(built_docker_image, &base_image)
720 .await?;
721
722 let resources = self.build_merged_resources(built_docker_image)?;
723 Ok(DevContainerBuildResources::Docker(resources))
724 }
725 DevContainerBuildType::Dockerfile(_) => {
726 let built_docker_image = self.build_docker_image().await?;
727 let Some(features_build_info) = &self.features_build_info else {
728 log::error!(
729 "Can't attempt to build update UID dockerfile before initial docker build"
730 );
731 return Err(DevContainerError::DevContainerParseFailed);
732 };
733 let built_docker_image = self
734 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
735 .await?;
736
737 let resources = self.build_merged_resources(built_docker_image)?;
738 Ok(DevContainerBuildResources::Docker(resources))
739 }
740 DevContainerBuildType::DockerCompose => {
741 log::debug!("Using docker compose. Building extended compose files");
742 let docker_compose_resources = self.build_and_extend_compose_files().await?;
743
744 return Ok(DevContainerBuildResources::DockerCompose(
745 docker_compose_resources,
746 ));
747 }
748 DevContainerBuildType::None => {
749 return Err(DevContainerError::DevContainerParseFailed);
750 }
751 }
752 }
753
754 async fn run_dev_container(
755 &self,
756 build_resources: DevContainerBuildResources,
757 ) -> Result<DevContainerUp, DevContainerError> {
758 let ConfigStatus::VariableParsed(_) = &self.config else {
759 log::error!(
760 "Variables have not been parsed; cannot proceed with running the dev container"
761 );
762 return Err(DevContainerError::DevContainerParseFailed);
763 };
764 let running_container = match build_resources {
765 DevContainerBuildResources::DockerCompose(resources) => {
766 self.run_docker_compose(resources).await?
767 }
768 DevContainerBuildResources::Docker(resources) => {
769 self.run_docker_image(resources).await?
770 }
771 };
772
773 let remote_user = get_remote_user_from_config(&running_container, self)?;
774 let remote_workspace_folder = self.remote_workspace_folder()?;
775
776 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
777
778 Ok(DevContainerUp {
779 container_id: running_container.id,
780 remote_user,
781 remote_workspace_folder: remote_workspace_folder.display().to_string(),
782 extension_ids: self.extension_ids(),
783 remote_env,
784 })
785 }
786
787 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
788 let dev_container = match &self.config {
789 ConfigStatus::Deserialized(_) => {
790 log::error!(
791 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
792 );
793 return Err(DevContainerError::DevContainerParseFailed);
794 }
795 ConfigStatus::VariableParsed(dev_container) => dev_container,
796 };
797 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
798 return Err(DevContainerError::DevContainerParseFailed);
799 };
800 let docker_compose_full_paths = docker_compose_files
801 .iter()
802 .map(|relative| self.config_directory.join(relative))
803 .collect::<Vec<PathBuf>>();
804
805 let Some(config) = self
806 .docker_client
807 .get_docker_compose_config(&docker_compose_full_paths)
808 .await?
809 else {
810 log::error!("Output could not deserialize into DockerComposeConfig");
811 return Err(DevContainerError::DevContainerParseFailed);
812 };
813 Ok(DockerComposeResources {
814 files: docker_compose_full_paths,
815 config,
816 })
817 }
818
819 async fn build_and_extend_compose_files(
820 &self,
821 ) -> Result<DockerComposeResources, DevContainerError> {
822 let dev_container = match &self.config {
823 ConfigStatus::Deserialized(_) => {
824 log::error!(
825 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
826 );
827 return Err(DevContainerError::DevContainerParseFailed);
828 }
829 ConfigStatus::VariableParsed(dev_container) => dev_container,
830 };
831
832 let Some(features_build_info) = &self.features_build_info else {
833 log::error!(
834 "Cannot build and extend compose files: features build info is not yet constructed"
835 );
836 return Err(DevContainerError::DevContainerParseFailed);
837 };
838 let mut docker_compose_resources = self.docker_compose_manifest().await?;
839 let supports_buildkit = self.docker_client.supports_compose_buildkit();
840
841 let (main_service_name, main_service) =
842 find_primary_service(&docker_compose_resources, self)?;
843 let (built_service_image, built_service_image_tag) = if main_service
844 .build
845 .as_ref()
846 .map(|b| b.dockerfile.as_ref())
847 .is_some()
848 {
849 if !supports_buildkit {
850 self.build_feature_content_image().await?;
851 }
852
853 let dockerfile_path = &features_build_info.dockerfile_path;
854
855 let build_args = if !supports_buildkit {
856 HashMap::from([
857 (
858 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
859 "dev_container_auto_added_stage_label".to_string(),
860 ),
861 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
862 ])
863 } else {
864 HashMap::from([
865 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
866 (
867 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
868 "dev_container_auto_added_stage_label".to_string(),
869 ),
870 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
871 ])
872 };
873
874 let additional_contexts = if !supports_buildkit {
875 None
876 } else {
877 Some(HashMap::from([(
878 "dev_containers_feature_content_source".to_string(),
879 features_build_info
880 .features_content_dir
881 .display()
882 .to_string(),
883 )]))
884 };
885
886 let build_override = DockerComposeConfig {
887 name: None,
888 services: HashMap::from([(
889 main_service_name.clone(),
890 DockerComposeService {
891 image: Some(features_build_info.image_tag.clone()),
892 entrypoint: None,
893 cap_add: None,
894 security_opt: None,
895 labels: None,
896 build: Some(DockerComposeServiceBuild {
897 context: Some(
898 main_service
899 .build
900 .as_ref()
901 .and_then(|b| b.context.clone())
902 .unwrap_or_else(|| {
903 features_build_info.empty_context_dir.display().to_string()
904 }),
905 ),
906 dockerfile: Some(dockerfile_path.display().to_string()),
907 target: Some("dev_containers_target_stage".to_string()),
908 args: Some(build_args),
909 additional_contexts,
910 }),
911 volumes: Vec::new(),
912 ..Default::default()
913 },
914 )]),
915 volumes: HashMap::new(),
916 };
917
918 let temp_base = std::env::temp_dir().join("devcontainer-zed");
919 let config_location = temp_base.join("docker_compose_build.json");
920
921 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
922 log::error!("Error serializing docker compose runtime override: {e}");
923 DevContainerError::DevContainerParseFailed
924 })?;
925
926 self.fs
927 .write(&config_location, config_json.as_bytes())
928 .await
929 .map_err(|e| {
930 log::error!("Error writing the runtime override file: {e}");
931 DevContainerError::FilesystemError
932 })?;
933
934 docker_compose_resources.files.push(config_location);
935
936 self.docker_client
937 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
938 .await?;
939 (
940 self.docker_client
941 .inspect(&features_build_info.image_tag)
942 .await?,
943 &features_build_info.image_tag,
944 )
945 } else if let Some(image) = &main_service.image {
946 if dev_container
947 .features
948 .as_ref()
949 .is_none_or(|features| features.is_empty())
950 {
951 (self.docker_client.inspect(image).await?, image)
952 } else {
953 if !supports_buildkit {
954 self.build_feature_content_image().await?;
955 }
956
957 let dockerfile_path = &features_build_info.dockerfile_path;
958
959 let build_args = if !supports_buildkit {
960 HashMap::from([
961 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
962 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
963 ])
964 } else {
965 HashMap::from([
966 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
967 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
968 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
969 ])
970 };
971
972 let additional_contexts = if !supports_buildkit {
973 None
974 } else {
975 Some(HashMap::from([(
976 "dev_containers_feature_content_source".to_string(),
977 features_build_info
978 .features_content_dir
979 .display()
980 .to_string(),
981 )]))
982 };
983
984 let build_override = DockerComposeConfig {
985 name: None,
986 services: HashMap::from([(
987 main_service_name.clone(),
988 DockerComposeService {
989 image: Some(features_build_info.image_tag.clone()),
990 entrypoint: None,
991 cap_add: None,
992 security_opt: None,
993 labels: None,
994 build: Some(DockerComposeServiceBuild {
995 context: Some(
996 features_build_info.empty_context_dir.display().to_string(),
997 ),
998 dockerfile: Some(dockerfile_path.display().to_string()),
999 target: Some("dev_containers_target_stage".to_string()),
1000 args: Some(build_args),
1001 additional_contexts,
1002 }),
1003 volumes: Vec::new(),
1004 ..Default::default()
1005 },
1006 )]),
1007 volumes: HashMap::new(),
1008 };
1009
1010 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1011 let config_location = temp_base.join("docker_compose_build.json");
1012
1013 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1014 log::error!("Error serializing docker compose runtime override: {e}");
1015 DevContainerError::DevContainerParseFailed
1016 })?;
1017
1018 self.fs
1019 .write(&config_location, config_json.as_bytes())
1020 .await
1021 .map_err(|e| {
1022 log::error!("Error writing the runtime override file: {e}");
1023 DevContainerError::FilesystemError
1024 })?;
1025
1026 docker_compose_resources.files.push(config_location);
1027
1028 self.docker_client
1029 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1030 .await?;
1031
1032 (
1033 self.docker_client
1034 .inspect(&features_build_info.image_tag)
1035 .await?,
1036 &features_build_info.image_tag,
1037 )
1038 }
1039 } else {
1040 log::error!("Docker compose must have either image or dockerfile defined");
1041 return Err(DevContainerError::DevContainerParseFailed);
1042 };
1043
1044 let built_service_image = self
1045 .update_remote_user_uid(built_service_image, built_service_image_tag)
1046 .await?;
1047
1048 let resources = self.build_merged_resources(built_service_image)?;
1049
1050 let network_mode = main_service.network_mode.as_ref();
1051 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1052 let runtime_override_file = self
1053 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1054 .await?;
1055
1056 docker_compose_resources.files.push(runtime_override_file);
1057
1058 Ok(docker_compose_resources)
1059 }
1060
1061 async fn write_runtime_override_file(
1062 &self,
1063 main_service_name: &str,
1064 network_mode_service: Option<&str>,
1065 resources: DockerBuildResources,
1066 ) -> Result<PathBuf, DevContainerError> {
1067 let config =
1068 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1069 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1070 let config_location = temp_base.join("docker_compose_runtime.json");
1071
1072 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1073 log::error!("Error serializing docker compose runtime override: {e}");
1074 DevContainerError::DevContainerParseFailed
1075 })?;
1076
1077 self.fs
1078 .write(&config_location, config_json.as_bytes())
1079 .await
1080 .map_err(|e| {
1081 log::error!("Error writing the runtime override file: {e}");
1082 DevContainerError::FilesystemError
1083 })?;
1084
1085 Ok(config_location)
1086 }
1087
1088 fn build_runtime_override(
1089 &self,
1090 main_service_name: &str,
1091 network_mode_service: Option<&str>,
1092 resources: DockerBuildResources,
1093 ) -> Result<DockerComposeConfig, DevContainerError> {
1094 let mut runtime_labels = HashMap::new();
1095
1096 if let Some(metadata) = &resources.image.config.labels.metadata {
1097 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1098 log::error!("Error serializing docker image metadata: {e}");
1099 DevContainerError::ContainerNotValid(resources.image.id.clone())
1100 })?;
1101
1102 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1103 }
1104
1105 for (k, v) in self.identifying_labels() {
1106 runtime_labels.insert(k.to_string(), v.to_string());
1107 }
1108
1109 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1110 .additional_mounts
1111 .iter()
1112 .filter_map(|mount| {
1113 if let Some(mount_type) = &mount.mount_type
1114 && mount_type.to_lowercase() == "volume"
1115 && let Some(source) = &mount.source
1116 {
1117 Some((
1118 source.clone(),
1119 DockerComposeVolume {
1120 name: source.clone(),
1121 },
1122 ))
1123 } else {
1124 None
1125 }
1126 })
1127 .collect();
1128
1129 let volumes: Vec<MountDefinition> = resources
1130 .additional_mounts
1131 .iter()
1132 .map(|v| MountDefinition {
1133 source: v.source.clone(),
1134 target: v.target.clone(),
1135 mount_type: v.mount_type.clone(),
1136 })
1137 .collect();
1138
1139 let mut main_service = DockerComposeService {
1140 entrypoint: Some(vec![
1141 "/bin/sh".to_string(),
1142 "-c".to_string(),
1143 resources.entrypoint_script,
1144 "-".to_string(),
1145 ]),
1146 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1147 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1148 labels: Some(runtime_labels),
1149 volumes,
1150 privileged: Some(resources.privileged),
1151 ..Default::default()
1152 };
1153 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1154 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1155 if let Some(forward_ports) = &self.dev_container().forward_ports {
1156 let main_service_ports: Vec<String> = forward_ports
1157 .iter()
1158 .filter_map(|f| match f {
1159 ForwardPort::Number(port) => Some(port.to_string()),
1160 ForwardPort::String(port) => {
1161 let parts: Vec<&str> = port.split(":").collect();
1162 if parts.len() <= 1 {
1163 Some(port.to_string())
1164 } else if parts.len() == 2 {
1165 if parts[0] == main_service_name {
1166 Some(parts[1].to_string())
1167 } else {
1168 None
1169 }
1170 } else {
1171 None
1172 }
1173 }
1174 })
1175 .collect();
1176 for port in main_service_ports {
1177 // If the main service uses a different service's network bridge, append to that service's ports instead
1178 if let Some(network_service_name) = network_mode_service {
1179 if let Some(service) = service_declarations.get_mut(network_service_name) {
1180 service.ports.push(DockerComposeServicePort {
1181 target: port.clone(),
1182 published: port.clone(),
1183 ..Default::default()
1184 });
1185 } else {
1186 service_declarations.insert(
1187 network_service_name.to_string(),
1188 DockerComposeService {
1189 ports: vec![DockerComposeServicePort {
1190 target: port.clone(),
1191 published: port.clone(),
1192 ..Default::default()
1193 }],
1194 ..Default::default()
1195 },
1196 );
1197 }
1198 } else {
1199 main_service.ports.push(DockerComposeServicePort {
1200 target: port.clone(),
1201 published: port.clone(),
1202 ..Default::default()
1203 });
1204 }
1205 }
1206 let other_service_ports: Vec<(&str, &str)> = forward_ports
1207 .iter()
1208 .filter_map(|f| match f {
1209 ForwardPort::Number(_) => None,
1210 ForwardPort::String(port) => {
1211 let parts: Vec<&str> = port.split(":").collect();
1212 if parts.len() != 2 {
1213 None
1214 } else {
1215 if parts[0] == main_service_name {
1216 None
1217 } else {
1218 Some((parts[0], parts[1]))
1219 }
1220 }
1221 }
1222 })
1223 .collect();
1224 for (service_name, port) in other_service_ports {
1225 if let Some(service) = service_declarations.get_mut(service_name) {
1226 service.ports.push(DockerComposeServicePort {
1227 target: port.to_string(),
1228 published: port.to_string(),
1229 ..Default::default()
1230 });
1231 } else {
1232 service_declarations.insert(
1233 service_name.to_string(),
1234 DockerComposeService {
1235 ports: vec![DockerComposeServicePort {
1236 target: port.to_string(),
1237 published: port.to_string(),
1238 ..Default::default()
1239 }],
1240 ..Default::default()
1241 },
1242 );
1243 }
1244 }
1245 }
1246
1247 service_declarations.insert(main_service_name.to_string(), main_service);
1248 let new_docker_compose_config = DockerComposeConfig {
1249 name: None,
1250 services: service_declarations,
1251 volumes: config_volumes,
1252 };
1253
1254 Ok(new_docker_compose_config)
1255 }
1256
1257 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1258 let dev_container = match &self.config {
1259 ConfigStatus::Deserialized(_) => {
1260 log::error!(
1261 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1262 );
1263 return Err(DevContainerError::DevContainerParseFailed);
1264 }
1265 ConfigStatus::VariableParsed(dev_container) => dev_container,
1266 };
1267
1268 match dev_container.build_type() {
1269 DevContainerBuildType::Image(image_tag) => {
1270 let base_image = self.docker_client.inspect(&image_tag).await?;
1271 if dev_container
1272 .features
1273 .as_ref()
1274 .is_none_or(|features| features.is_empty())
1275 {
1276 log::debug!("No features to add. Using base image");
1277 return Ok(base_image);
1278 }
1279 }
1280 DevContainerBuildType::Dockerfile(_) => {}
1281 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1282 return Err(DevContainerError::DevContainerParseFailed);
1283 }
1284 };
1285
1286 let mut command = self.create_docker_build()?;
1287
1288 let output = self
1289 .command_runner
1290 .run_command(&mut command)
1291 .await
1292 .map_err(|e| {
1293 log::error!("Error building docker image: {e}");
1294 DevContainerError::CommandFailed(command.get_program().display().to_string())
1295 })?;
1296
1297 if !output.status.success() {
1298 let stderr = String::from_utf8_lossy(&output.stderr);
1299 log::error!("docker buildx build failed: {stderr}");
1300 return Err(DevContainerError::CommandFailed(
1301 command.get_program().display().to_string(),
1302 ));
1303 }
1304
1305 // After a successful build, inspect the newly tagged image to get its metadata
1306 let Some(features_build_info) = &self.features_build_info else {
1307 log::error!("Features build info expected, but not created");
1308 return Err(DevContainerError::DevContainerParseFailed);
1309 };
1310 let image = self
1311 .docker_client
1312 .inspect(&features_build_info.image_tag)
1313 .await?;
1314
1315 Ok(image)
1316 }
1317
1318 #[cfg(target_os = "windows")]
1319 async fn update_remote_user_uid(
1320 &self,
1321 image: DockerInspect,
1322 _base_image: &str,
1323 ) -> Result<DockerInspect, DevContainerError> {
1324 Ok(image)
1325 }
1326 #[cfg(not(target_os = "windows"))]
1327 async fn update_remote_user_uid(
1328 &self,
1329 image: DockerInspect,
1330 base_image: &str,
1331 ) -> Result<DockerInspect, DevContainerError> {
1332 let dev_container = self.dev_container();
1333
1334 let Some(features_build_info) = &self.features_build_info else {
1335 return Ok(image);
1336 };
1337
1338 // updateRemoteUserUID defaults to true per the devcontainers spec
1339 if dev_container.update_remote_user_uid == Some(false) {
1340 return Ok(image);
1341 }
1342
1343 let remote_user = get_remote_user_from_config(&image, self)?;
1344 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1345 return Ok(image);
1346 }
1347
1348 let image_user = image
1349 .config
1350 .image_user
1351 .as_deref()
1352 .unwrap_or("root")
1353 .to_string();
1354
1355 let host_uid = Command::new("id")
1356 .arg("-u")
1357 .output()
1358 .await
1359 .map_err(|e| {
1360 log::error!("Failed to get host UID: {e}");
1361 DevContainerError::CommandFailed("id -u".to_string())
1362 })
1363 .and_then(|output| {
1364 String::from_utf8_lossy(&output.stdout)
1365 .trim()
1366 .parse::<u32>()
1367 .map_err(|e| {
1368 log::error!("Failed to parse host UID: {e}");
1369 DevContainerError::CommandFailed("id -u".to_string())
1370 })
1371 })?;
1372
1373 let host_gid = Command::new("id")
1374 .arg("-g")
1375 .output()
1376 .await
1377 .map_err(|e| {
1378 log::error!("Failed to get host GID: {e}");
1379 DevContainerError::CommandFailed("id -g".to_string())
1380 })
1381 .and_then(|output| {
1382 String::from_utf8_lossy(&output.stdout)
1383 .trim()
1384 .parse::<u32>()
1385 .map_err(|e| {
1386 log::error!("Failed to parse host GID: {e}");
1387 DevContainerError::CommandFailed("id -g".to_string())
1388 })
1389 })?;
1390
1391 let dockerfile_content = self.generate_update_uid_dockerfile();
1392
1393 let dockerfile_path = features_build_info
1394 .features_content_dir
1395 .join("updateUID.Dockerfile");
1396 self.fs
1397 .write(&dockerfile_path, dockerfile_content.as_bytes())
1398 .await
1399 .map_err(|e| {
1400 log::error!("Failed to write updateUID Dockerfile: {e}");
1401 DevContainerError::FilesystemError
1402 })?;
1403
1404 let updated_image_tag = features_build_info.image_tag.clone();
1405
1406 let mut command = Command::new(self.docker_client.docker_cli());
1407 command.args(["build"]);
1408 command.args(["-f", &dockerfile_path.display().to_string()]);
1409 command.args(["-t", &updated_image_tag]);
1410 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1411 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1412 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1413 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1414 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1415 command.arg(features_build_info.empty_context_dir.display().to_string());
1416
1417 let output = self
1418 .command_runner
1419 .run_command(&mut command)
1420 .await
1421 .map_err(|e| {
1422 log::error!("Error building UID update image: {e}");
1423 DevContainerError::CommandFailed(command.get_program().display().to_string())
1424 })?;
1425
1426 if !output.status.success() {
1427 let stderr = String::from_utf8_lossy(&output.stderr);
1428 log::error!("UID update build failed: {stderr}");
1429 return Err(DevContainerError::CommandFailed(
1430 command.get_program().display().to_string(),
1431 ));
1432 }
1433
1434 self.docker_client.inspect(&updated_image_tag).await
1435 }
1436
1437 #[cfg(not(target_os = "windows"))]
1438 fn generate_update_uid_dockerfile(&self) -> String {
1439 let mut dockerfile = r#"ARG BASE_IMAGE
1440FROM $BASE_IMAGE
1441
1442USER root
1443
1444ARG REMOTE_USER
1445ARG NEW_UID
1446ARG NEW_GID
1447SHELL ["/bin/sh", "-c"]
1448RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1449 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1450 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1451 if [ -z "$OLD_UID" ]; then \
1452 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1453 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1454 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1455 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1456 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1457 else \
1458 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1459 FREE_GID=65532; \
1460 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1461 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1462 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1463 fi; \
1464 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1465 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1466 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1467 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1468 fi; \
1469 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1470 fi;
1471
1472ARG IMAGE_USER
1473USER $IMAGE_USER
1474
1475# Ensure that /etc/profile does not clobber the existing path
1476RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1477"#.to_string();
1478 for feature in &self.features {
1479 let container_env_layer = feature.generate_dockerfile_env();
1480 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1481 }
1482
1483 if let Some(env) = &self.dev_container().container_env {
1484 for (key, value) in env {
1485 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1486 }
1487 }
1488 dockerfile
1489 }
1490
1491 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1492 let Some(features_build_info) = &self.features_build_info else {
1493 log::error!("Features build info not available for building feature content image");
1494 return Err(DevContainerError::DevContainerParseFailed);
1495 };
1496 let features_content_dir = &features_build_info.features_content_dir;
1497
1498 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1499 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1500
1501 self.fs
1502 .write(&dockerfile_path, dockerfile_content.as_bytes())
1503 .await
1504 .map_err(|e| {
1505 log::error!("Failed to write feature content Dockerfile: {e}");
1506 DevContainerError::FilesystemError
1507 })?;
1508
1509 let mut command = Command::new(self.docker_client.docker_cli());
1510 command.args([
1511 "build",
1512 "-t",
1513 "dev_container_feature_content_temp",
1514 "-f",
1515 &dockerfile_path.display().to_string(),
1516 &features_content_dir.display().to_string(),
1517 ]);
1518
1519 let output = self
1520 .command_runner
1521 .run_command(&mut command)
1522 .await
1523 .map_err(|e| {
1524 log::error!("Error building feature content image: {e}");
1525 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1526 })?;
1527
1528 if !output.status.success() {
1529 let stderr = String::from_utf8_lossy(&output.stderr);
1530 log::error!("Feature content image build failed: {stderr}");
1531 return Err(DevContainerError::CommandFailed(
1532 self.docker_client.docker_cli(),
1533 ));
1534 }
1535
1536 Ok(())
1537 }
1538
1539 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1540 let dev_container = match &self.config {
1541 ConfigStatus::Deserialized(_) => {
1542 log::error!(
1543 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1544 );
1545 return Err(DevContainerError::DevContainerParseFailed);
1546 }
1547 ConfigStatus::VariableParsed(dev_container) => dev_container,
1548 };
1549
1550 let Some(features_build_info) = &self.features_build_info else {
1551 log::error!(
1552 "Cannot create docker build command; features build info has not been constructed"
1553 );
1554 return Err(DevContainerError::DevContainerParseFailed);
1555 };
1556 let mut command = Command::new(self.docker_client.docker_cli());
1557
1558 command.args(["buildx", "build"]);
1559
1560 // --load is short for --output=docker, loading the built image into the local docker images
1561 command.arg("--load");
1562
1563 // BuildKit build context: provides the features content directory as a named context
1564 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1565 command.args([
1566 "--build-context",
1567 &format!(
1568 "dev_containers_feature_content_source={}",
1569 features_build_info.features_content_dir.display()
1570 ),
1571 ]);
1572
1573 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1574 if let Some(build_image) = &features_build_info.build_image {
1575 command.args([
1576 "--build-arg",
1577 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1578 ]);
1579 } else {
1580 command.args([
1581 "--build-arg",
1582 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1583 ]);
1584 }
1585
1586 command.args([
1587 "--build-arg",
1588 &format!(
1589 "_DEV_CONTAINERS_IMAGE_USER={}",
1590 self.root_image
1591 .as_ref()
1592 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1593 .unwrap_or(&"root".to_string())
1594 ),
1595 ]);
1596
1597 command.args([
1598 "--build-arg",
1599 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1600 ]);
1601
1602 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1603 for (key, value) in args {
1604 command.args(["--build-arg", &format!("{}={}", key, value)]);
1605 }
1606 }
1607
1608 command.args(["--target", "dev_containers_target_stage"]);
1609
1610 command.args([
1611 "-f",
1612 &features_build_info.dockerfile_path.display().to_string(),
1613 ]);
1614
1615 command.args(["-t", &features_build_info.image_tag]);
1616
1617 if let DevContainerBuildType::Dockerfile(_) = dev_container.build_type() {
1618 command.arg(self.config_directory.display().to_string());
1619 } else {
1620 // Use an empty folder as the build context to avoid pulling in unneeded files.
1621 // The actual feature content is supplied via the BuildKit build context above.
1622 command.arg(features_build_info.empty_context_dir.display().to_string());
1623 }
1624
1625 Ok(command)
1626 }
1627
1628 async fn run_docker_compose(
1629 &self,
1630 resources: DockerComposeResources,
1631 ) -> Result<DockerInspect, DevContainerError> {
1632 let mut command = Command::new(self.docker_client.docker_cli());
1633 command.args(&["compose", "--project-name", &self.project_name()]);
1634 for docker_compose_file in resources.files {
1635 command.args(&["-f", &docker_compose_file.display().to_string()]);
1636 }
1637 command.args(&["up", "-d"]);
1638
1639 let output = self
1640 .command_runner
1641 .run_command(&mut command)
1642 .await
1643 .map_err(|e| {
1644 log::error!("Error running docker compose up: {e}");
1645 DevContainerError::CommandFailed(command.get_program().display().to_string())
1646 })?;
1647
1648 if !output.status.success() {
1649 let stderr = String::from_utf8_lossy(&output.stderr);
1650 log::error!("Non-success status from docker compose up: {}", stderr);
1651 return Err(DevContainerError::CommandFailed(
1652 command.get_program().display().to_string(),
1653 ));
1654 }
1655
1656 if let Some(docker_ps) = self.check_for_existing_container().await? {
1657 log::debug!("Found newly created dev container");
1658 return self.docker_client.inspect(&docker_ps.id).await;
1659 }
1660
1661 log::error!("Could not find existing container after docker compose up");
1662
1663 Err(DevContainerError::DevContainerParseFailed)
1664 }
1665
1666 async fn run_docker_image(
1667 &self,
1668 build_resources: DockerBuildResources,
1669 ) -> Result<DockerInspect, DevContainerError> {
1670 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1671
1672 let output = self
1673 .command_runner
1674 .run_command(&mut docker_run_command)
1675 .await
1676 .map_err(|e| {
1677 log::error!("Error running docker run: {e}");
1678 DevContainerError::CommandFailed(
1679 docker_run_command.get_program().display().to_string(),
1680 )
1681 })?;
1682
1683 if !output.status.success() {
1684 let std_err = String::from_utf8_lossy(&output.stderr);
1685 log::error!("Non-success status from docker run. StdErr: {std_err}");
1686 return Err(DevContainerError::CommandFailed(
1687 docker_run_command.get_program().display().to_string(),
1688 ));
1689 }
1690
1691 log::debug!("Checking for container that was started");
1692 let Some(docker_ps) = self.check_for_existing_container().await? else {
1693 log::error!("Could not locate container just created");
1694 return Err(DevContainerError::DevContainerParseFailed);
1695 };
1696 self.docker_client.inspect(&docker_ps.id).await
1697 }
1698
1699 fn local_workspace_folder(&self) -> String {
1700 self.local_project_directory.display().to_string()
1701 }
1702 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1703 self.local_project_directory
1704 .file_name()
1705 .map(|f| f.display().to_string())
1706 .ok_or(DevContainerError::DevContainerParseFailed)
1707 }
1708
1709 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1710 self.dev_container()
1711 .workspace_folder
1712 .as_ref()
1713 .map(|folder| PathBuf::from(folder))
1714 .or(Some(
1715 // We explicitly use "/" here, instead of PathBuf::join
1716 // because we want remote targets to use unix-style filepaths,
1717 // even on a Windows host
1718 PathBuf::from(format!(
1719 "{}/{}",
1720 DEFAULT_REMOTE_PROJECT_DIR,
1721 self.local_workspace_base_name()?
1722 )),
1723 ))
1724 .ok_or(DevContainerError::DevContainerParseFailed)
1725 }
1726 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1727 self.remote_workspace_folder().and_then(|f| {
1728 f.file_name()
1729 .map(|file_name| file_name.display().to_string())
1730 .ok_or(DevContainerError::DevContainerParseFailed)
1731 })
1732 }
1733
1734 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1735 if let Some(mount) = &self.dev_container().workspace_mount {
1736 return Ok(mount.clone());
1737 }
1738 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1739 return Err(DevContainerError::DevContainerParseFailed);
1740 };
1741
1742 Ok(MountDefinition {
1743 source: Some(self.local_workspace_folder()),
1744 // We explicitly use "/" here, instead of PathBuf::join
1745 // because we want the remote target to use unix-style filepaths,
1746 // even on a Windows host
1747 target: format!(
1748 "{}/{}",
1749 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1750 project_directory_name.display()
1751 ),
1752 mount_type: None,
1753 })
1754 }
1755
1756 fn create_docker_run_command(
1757 &self,
1758 build_resources: DockerBuildResources,
1759 ) -> Result<Command, DevContainerError> {
1760 let remote_workspace_mount = self.remote_workspace_mount()?;
1761
1762 let docker_cli = self.docker_client.docker_cli();
1763 let mut command = Command::new(&docker_cli);
1764
1765 command.arg("run");
1766
1767 if build_resources.privileged {
1768 command.arg("--privileged");
1769 }
1770
1771 let run_args = match &self.dev_container().run_args {
1772 Some(run_args) => run_args,
1773 None => &Vec::new(),
1774 };
1775
1776 for arg in run_args {
1777 command.arg(arg);
1778 }
1779
1780 let run_if_missing = {
1781 |arg_name: &str, arg: &str, command: &mut Command| {
1782 if !run_args
1783 .iter()
1784 .any(|arg| arg.strip_prefix(arg_name).is_some())
1785 {
1786 command.arg(arg);
1787 }
1788 }
1789 };
1790
1791 if &docker_cli == "podman" {
1792 run_if_missing(
1793 "--security-opt",
1794 "--security-opt=label=disable",
1795 &mut command,
1796 );
1797 run_if_missing("--userns", "--userns=keep-id", &mut command);
1798 }
1799
1800 run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1801 command.arg("-d");
1802 command.arg("--mount");
1803 command.arg(remote_workspace_mount.to_string());
1804
1805 for mount in &build_resources.additional_mounts {
1806 command.arg("--mount");
1807 command.arg(mount.to_string());
1808 }
1809
1810 for (key, val) in self.identifying_labels() {
1811 command.arg("-l");
1812 command.arg(format!("{}={}", key, val));
1813 }
1814
1815 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1816 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1817 log::error!("Problem serializing image metadata: {e}");
1818 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1819 })?;
1820 command.arg("-l");
1821 command.arg(format!(
1822 "{}={}",
1823 "devcontainer.metadata", serialized_metadata
1824 ));
1825 }
1826
1827 if let Some(forward_ports) = &self.dev_container().forward_ports {
1828 for port in forward_ports {
1829 if let ForwardPort::Number(port_number) = port {
1830 command.arg("-p");
1831 command.arg(format!("{port_number}:{port_number}"));
1832 }
1833 }
1834 }
1835 for app_port in &self.dev_container().app_port {
1836 command.arg("-p");
1837 command.arg(app_port);
1838 }
1839
1840 command.arg("--entrypoint");
1841 command.arg("/bin/sh");
1842 command.arg(&build_resources.image.id);
1843 command.arg("-c");
1844
1845 command.arg(build_resources.entrypoint_script);
1846 command.arg("-");
1847
1848 Ok(command)
1849 }
1850
1851 fn extension_ids(&self) -> Vec<String> {
1852 self.dev_container()
1853 .customizations
1854 .as_ref()
1855 .map(|c| c.zed.extensions.clone())
1856 .unwrap_or_default()
1857 }
1858
1859 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1860 self.dev_container().validate_devcontainer_contents()?;
1861
1862 self.run_initialize_commands().await?;
1863
1864 self.download_feature_and_dockerfile_resources().await?;
1865
1866 let build_resources = self.build_resources().await?;
1867
1868 let devcontainer_up = self.run_dev_container(build_resources).await?;
1869
1870 self.run_remote_scripts(&devcontainer_up, true).await?;
1871
1872 Ok(devcontainer_up)
1873 }
1874
1875 async fn run_remote_scripts(
1876 &self,
1877 devcontainer_up: &DevContainerUp,
1878 new_container: bool,
1879 ) -> Result<(), DevContainerError> {
1880 let ConfigStatus::VariableParsed(config) = &self.config else {
1881 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1882 return Err(DevContainerError::DevContainerScriptsFailed);
1883 };
1884 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1885
1886 if new_container {
1887 if let Some(on_create_command) = &config.on_create_command {
1888 for (command_name, command) in on_create_command.script_commands() {
1889 log::debug!("Running on create command {command_name}");
1890 self.docker_client
1891 .run_docker_exec(
1892 &devcontainer_up.container_id,
1893 &remote_folder,
1894 &devcontainer_up.remote_user,
1895 &devcontainer_up.remote_env,
1896 command,
1897 )
1898 .await?;
1899 }
1900 }
1901 if let Some(update_content_command) = &config.update_content_command {
1902 for (command_name, command) in update_content_command.script_commands() {
1903 log::debug!("Running update content command {command_name}");
1904 self.docker_client
1905 .run_docker_exec(
1906 &devcontainer_up.container_id,
1907 &remote_folder,
1908 &devcontainer_up.remote_user,
1909 &devcontainer_up.remote_env,
1910 command,
1911 )
1912 .await?;
1913 }
1914 }
1915
1916 if let Some(post_create_command) = &config.post_create_command {
1917 for (command_name, command) in post_create_command.script_commands() {
1918 log::debug!("Running post create command {command_name}");
1919 self.docker_client
1920 .run_docker_exec(
1921 &devcontainer_up.container_id,
1922 &remote_folder,
1923 &devcontainer_up.remote_user,
1924 &devcontainer_up.remote_env,
1925 command,
1926 )
1927 .await?;
1928 }
1929 }
1930 if let Some(post_start_command) = &config.post_start_command {
1931 for (command_name, command) in post_start_command.script_commands() {
1932 log::debug!("Running post start command {command_name}");
1933 self.docker_client
1934 .run_docker_exec(
1935 &devcontainer_up.container_id,
1936 &remote_folder,
1937 &devcontainer_up.remote_user,
1938 &devcontainer_up.remote_env,
1939 command,
1940 )
1941 .await?;
1942 }
1943 }
1944 }
1945 if let Some(post_attach_command) = &config.post_attach_command {
1946 for (command_name, command) in post_attach_command.script_commands() {
1947 log::debug!("Running post attach command {command_name}");
1948 self.docker_client
1949 .run_docker_exec(
1950 &devcontainer_up.container_id,
1951 &remote_folder,
1952 &devcontainer_up.remote_user,
1953 &devcontainer_up.remote_env,
1954 command,
1955 )
1956 .await?;
1957 }
1958 }
1959
1960 Ok(())
1961 }
1962
1963 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1964 let ConfigStatus::VariableParsed(config) = &self.config else {
1965 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1966 return Err(DevContainerError::DevContainerParseFailed);
1967 };
1968
1969 if let Some(initialize_command) = &config.initialize_command {
1970 log::debug!("Running initialize command");
1971 initialize_command
1972 .run(&self.command_runner, &self.local_project_directory)
1973 .await
1974 } else {
1975 log::warn!("No initialize command found");
1976 Ok(())
1977 }
1978 }
1979
1980 async fn check_for_existing_devcontainer(
1981 &self,
1982 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1983 if let Some(docker_ps) = self.check_for_existing_container().await? {
1984 log::debug!("Dev container already found. Proceeding with it");
1985
1986 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1987
1988 if !docker_inspect.is_running() {
1989 log::debug!("Container not running. Will attempt to start, and then proceed");
1990 self.docker_client.start_container(&docker_ps.id).await?;
1991 }
1992
1993 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1994
1995 let remote_folder = self.remote_workspace_folder()?;
1996
1997 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1998
1999 let dev_container_up = DevContainerUp {
2000 container_id: docker_ps.id,
2001 remote_user: remote_user,
2002 remote_workspace_folder: remote_folder.display().to_string(),
2003 extension_ids: self.extension_ids(),
2004 remote_env,
2005 };
2006
2007 self.run_remote_scripts(&dev_container_up, false).await?;
2008
2009 Ok(Some(dev_container_up))
2010 } else {
2011 log::debug!("Existing container not found.");
2012
2013 Ok(None)
2014 }
2015 }
2016
2017 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2018 self.docker_client
2019 .find_process_by_filters(
2020 self.identifying_labels()
2021 .iter()
2022 .map(|(k, v)| format!("label={k}={v}"))
2023 .collect(),
2024 )
2025 .await
2026 }
2027
2028 fn project_name(&self) -> String {
2029 if let Some(name) = &self.dev_container().name {
2030 safe_id_lower(name)
2031 } else {
2032 let alternate_name = &self
2033 .local_workspace_base_name()
2034 .unwrap_or(self.local_workspace_folder());
2035 safe_id_lower(alternate_name)
2036 }
2037 }
2038
2039 async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2040 let Some(dockerfile_path) = self.dockerfile_location().await else {
2041 log::error!("Tried to expand dockerfile for an image-type config");
2042 return Err(DevContainerError::DevContainerParseFailed);
2043 };
2044
2045 let devcontainer_args = self
2046 .dev_container()
2047 .build
2048 .as_ref()
2049 .and_then(|b| b.args.clone())
2050 .unwrap_or_default();
2051 let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2052 log::error!("Failed to load Dockerfile: {e}");
2053 DevContainerError::FilesystemError
2054 })?;
2055 let mut parsed_lines: Vec<String> = Vec::new();
2056 let mut inline_args: Vec<(String, String)> = Vec::new();
2057 let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2058
2059 for line in contents.lines() {
2060 let mut parsed_line = line.to_string();
2061 // Replace from devcontainer args first, since they take precedence
2062 for (key, value) in &devcontainer_args {
2063 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2064 }
2065 for (key, value) in &inline_args {
2066 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2067 }
2068 if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2069 let trimmed = arg_directives.trim();
2070 let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2071 for (i, captures) in key_matches.iter().enumerate() {
2072 let key = captures[1].to_string();
2073 // Insert the devcontainer overrides here if needed
2074 let value_start = captures.get(0).expect("full match").end();
2075 let value_end = if i + 1 < key_matches.len() {
2076 key_matches[i + 1].get(0).expect("full match").start()
2077 } else {
2078 trimmed.len()
2079 };
2080 let raw_value = trimmed[value_start..value_end].trim();
2081 let value = if raw_value.starts_with('"')
2082 && raw_value.ends_with('"')
2083 && raw_value.len() > 1
2084 {
2085 &raw_value[1..raw_value.len() - 1]
2086 } else {
2087 raw_value
2088 };
2089 inline_args.push((key, value.to_string()));
2090 }
2091 }
2092 parsed_lines.push(parsed_line);
2093 }
2094
2095 Ok(parsed_lines.join("\n"))
2096 }
2097}
2098
2099/// Holds all the information needed to construct a `docker buildx build` command
2100/// that extends a base image with dev container features.
2101///
2102/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2103/// (cli/src/spec-node/containerFeatures.ts).
2104#[derive(Debug, Eq, PartialEq)]
2105pub(crate) struct FeaturesBuildInfo {
2106 /// Path to the generated Dockerfile.extended
2107 pub dockerfile_path: PathBuf,
2108 /// Path to the features content directory (used as a BuildKit build context)
2109 pub features_content_dir: PathBuf,
2110 /// Path to an empty directory used as the Docker build context
2111 pub empty_context_dir: PathBuf,
2112 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2113 pub build_image: Option<String>,
2114 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2115 pub image_tag: String,
2116}
2117
2118pub(crate) async fn read_devcontainer_configuration(
2119 config: DevContainerConfig,
2120 context: &DevContainerContext,
2121 environment: HashMap<String, String>,
2122) -> Result<DevContainer, DevContainerError> {
2123 let docker = if context.use_podman {
2124 Docker::new("podman").await
2125 } else {
2126 Docker::new("docker").await
2127 };
2128 let mut dev_container = DevContainerManifest::new(
2129 context,
2130 environment,
2131 Arc::new(docker),
2132 Arc::new(DefaultCommandRunner::new()),
2133 config,
2134 &context.project_directory.as_ref(),
2135 )
2136 .await?;
2137 dev_container.parse_nonremote_vars()?;
2138 Ok(dev_container.dev_container().clone())
2139}
2140
2141pub(crate) async fn spawn_dev_container(
2142 context: &DevContainerContext,
2143 environment: HashMap<String, String>,
2144 config: DevContainerConfig,
2145 local_project_path: &Path,
2146) -> Result<DevContainerUp, DevContainerError> {
2147 let docker = if context.use_podman {
2148 Docker::new("podman").await
2149 } else {
2150 Docker::new("docker").await
2151 };
2152 let mut devcontainer_manifest = DevContainerManifest::new(
2153 context,
2154 environment,
2155 Arc::new(docker),
2156 Arc::new(DefaultCommandRunner::new()),
2157 config,
2158 local_project_path,
2159 )
2160 .await?;
2161
2162 devcontainer_manifest.parse_nonremote_vars()?;
2163
2164 log::debug!("Checking for existing container");
2165 if let Some(devcontainer) = devcontainer_manifest
2166 .check_for_existing_devcontainer()
2167 .await?
2168 {
2169 Ok(devcontainer)
2170 } else {
2171 log::debug!("Existing container not found. Building");
2172
2173 devcontainer_manifest.build_and_run().await
2174 }
2175}
2176
2177#[derive(Debug)]
2178struct DockerBuildResources {
2179 image: DockerInspect,
2180 additional_mounts: Vec<MountDefinition>,
2181 privileged: bool,
2182 entrypoint_script: String,
2183}
2184
2185#[derive(Debug)]
2186enum DevContainerBuildResources {
2187 DockerCompose(DockerComposeResources),
2188 Docker(DockerBuildResources),
2189}
2190
2191fn find_primary_service(
2192 docker_compose: &DockerComposeResources,
2193 devcontainer: &DevContainerManifest,
2194) -> Result<(String, DockerComposeService), DevContainerError> {
2195 let Some(service_name) = &devcontainer.dev_container().service else {
2196 return Err(DevContainerError::DevContainerParseFailed);
2197 };
2198
2199 match docker_compose.config.services.get(service_name) {
2200 Some(service) => Ok((service_name.clone(), service.clone())),
2201 None => Err(DevContainerError::DevContainerParseFailed),
2202 }
2203}
2204
2205/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2206/// `dockerfile` is relative to the build `context`, and `context` is relative to
2207/// the compose file's directory.
2208fn resolve_compose_dockerfile(
2209 compose_file: &Path,
2210 context: Option<&str>,
2211 dockerfile: &str,
2212) -> Option<PathBuf> {
2213 let dockerfile = PathBuf::from(dockerfile);
2214 if dockerfile.is_absolute() {
2215 return Some(dockerfile);
2216 }
2217 let compose_dir = compose_file.parent()?;
2218 let context_dir = match context {
2219 Some(ctx) => {
2220 let ctx = PathBuf::from(ctx);
2221 if ctx.is_absolute() {
2222 ctx
2223 } else {
2224 normalize_path(&compose_dir.join(ctx))
2225 }
2226 }
2227 None => compose_dir.to_path_buf(),
2228 };
2229 Some(context_dir.join(dockerfile))
2230}
2231
2232/// Destination folder inside the container where feature content is staged during build.
2233/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2234const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2235
2236/// Escapes regex special characters in a string.
2237fn escape_regex_chars(input: &str) -> String {
2238 let mut result = String::with_capacity(input.len() * 2);
2239 for c in input.chars() {
2240 if ".*+?^${}()|[]\\".contains(c) {
2241 result.push('\\');
2242 }
2243 result.push(c);
2244 }
2245 result
2246}
2247
2248/// Extracts the short feature ID from a full feature reference string.
2249///
2250/// Examples:
2251/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2252/// - `ghcr.io/user/repo/go` → `go`
2253/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2254/// - `./myFeature` → `myFeature`
2255fn extract_feature_id(feature_ref: &str) -> &str {
2256 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2257 &feature_ref[..at_idx]
2258 } else {
2259 let last_slash = feature_ref.rfind('/');
2260 let last_colon = feature_ref.rfind(':');
2261 match (last_slash, last_colon) {
2262 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2263 _ => feature_ref,
2264 }
2265 };
2266 match without_version.rfind('/') {
2267 Some(idx) => &without_version[idx + 1..],
2268 None => without_version,
2269 }
2270}
2271
2272/// Generates a shell command that looks up a user's passwd entry.
2273///
2274/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2275/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2276fn get_ent_passwd_shell_command(user: &str) -> String {
2277 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2278 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2279 format!(
2280 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2281 shell = escaped_for_shell,
2282 re = escaped_for_regex,
2283 )
2284}
2285
2286/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2287///
2288/// Features listed in the override come first (in the specified order), followed
2289/// by any remaining features sorted lexicographically by their full reference ID.
2290fn resolve_feature_order<'a>(
2291 features: &'a HashMap<String, FeatureOptions>,
2292 override_order: &Option<Vec<String>>,
2293) -> Vec<(&'a String, &'a FeatureOptions)> {
2294 if let Some(order) = override_order {
2295 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2296 for ordered_id in order {
2297 if let Some((key, options)) = features.get_key_value(ordered_id) {
2298 ordered.push((key, options));
2299 }
2300 }
2301 let mut remaining: Vec<_> = features
2302 .iter()
2303 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2304 .collect();
2305 remaining.sort_by_key(|(id, _)| id.as_str());
2306 ordered.extend(remaining);
2307 ordered
2308 } else {
2309 let mut entries: Vec<_> = features.iter().collect();
2310 entries.sort_by_key(|(id, _)| id.as_str());
2311 entries
2312 }
2313}
2314
2315/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2316///
2317/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2318/// `containerFeaturesConfiguration.ts`.
2319fn generate_install_wrapper(
2320 feature_ref: &str,
2321 feature_id: &str,
2322 env_variables: &str,
2323) -> Result<String, DevContainerError> {
2324 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2325 log::error!("Error escaping feature ref {feature_ref}: {e}");
2326 DevContainerError::DevContainerParseFailed
2327 })?;
2328 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2329 log::error!("Error escaping feature {feature_id}: {e}");
2330 DevContainerError::DevContainerParseFailed
2331 })?;
2332 let options_indented: String = env_variables
2333 .lines()
2334 .filter(|l| !l.is_empty())
2335 .map(|l| format!(" {}", l))
2336 .collect::<Vec<_>>()
2337 .join("\n");
2338 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2339 log::error!("Error escaping options {options_indented}: {e}");
2340 DevContainerError::DevContainerParseFailed
2341 })?;
2342
2343 let script = format!(
2344 r#"#!/bin/sh
2345set -e
2346
2347on_exit () {{
2348 [ $? -eq 0 ] && exit
2349 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2350}}
2351
2352trap on_exit EXIT
2353
2354echo ===========================================================================
2355echo 'Feature : {escaped_name}'
2356echo 'Id : {escaped_id}'
2357echo 'Options :'
2358echo {escaped_options}
2359echo ===========================================================================
2360
2361set -a
2362. ../devcontainer-features.builtin.env
2363. ./devcontainer-features.env
2364set +a
2365
2366chmod +x ./install.sh
2367./install.sh
2368"#
2369 );
2370
2371 Ok(script)
2372}
2373
2374fn dockerfile_inject_alias(
2375 dockerfile_content: &str,
2376 alias: &str,
2377 build_target: Option<String>,
2378) -> String {
2379 match image_from_dockerfile(dockerfile_content.to_string(), &build_target) {
2380 Some(target) => format!(
2381 r#"{dockerfile_content}
2382FROM {target} AS {alias}"#
2383 ),
2384 None => dockerfile_content.to_string(),
2385 }
2386}
2387
2388fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2389 dockerfile_contents
2390 .lines()
2391 .filter(|line| line.starts_with("FROM"))
2392 .rfind(|from_line| match &target {
2393 Some(target) => {
2394 let parts = from_line.split(' ').collect::<Vec<&str>>();
2395 if parts.len() >= 3
2396 && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2397 {
2398 parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2399 } else {
2400 false
2401 }
2402 }
2403 None => true,
2404 })
2405 .and_then(|from_line| {
2406 from_line
2407 .split(' ')
2408 .collect::<Vec<&str>>()
2409 .get(1)
2410 .map(|s| s.to_string())
2411 })
2412}
2413
2414fn get_remote_user_from_config(
2415 docker_config: &DockerInspect,
2416 devcontainer: &DevContainerManifest,
2417) -> Result<String, DevContainerError> {
2418 if let DevContainer {
2419 remote_user: Some(user),
2420 ..
2421 } = &devcontainer.dev_container()
2422 {
2423 return Ok(user.clone());
2424 }
2425 if let Some(metadata) = &docker_config.config.labels.metadata {
2426 for metadatum in metadata {
2427 if let Some(remote_user) = metadatum.get("remoteUser") {
2428 if let Some(remote_user_str) = remote_user.as_str() {
2429 return Ok(remote_user_str.to_string());
2430 }
2431 }
2432 }
2433 }
2434 if let Some(image_user) = &docker_config.config.image_user {
2435 if !image_user.is_empty() {
2436 return Ok(image_user.to_string());
2437 }
2438 }
2439 Ok("root".to_string())
2440}
2441
2442// This should come from spec - see the docs
2443fn get_container_user_from_config(
2444 docker_config: &DockerInspect,
2445 devcontainer: &DevContainerManifest,
2446) -> Result<String, DevContainerError> {
2447 if let Some(user) = &devcontainer.dev_container().container_user {
2448 return Ok(user.to_string());
2449 }
2450 if let Some(metadata) = &docker_config.config.labels.metadata {
2451 for metadatum in metadata {
2452 if let Some(container_user) = metadatum.get("containerUser") {
2453 if let Some(container_user_str) = container_user.as_str() {
2454 return Ok(container_user_str.to_string());
2455 }
2456 }
2457 }
2458 }
2459 if let Some(image_user) = &docker_config.config.image_user {
2460 return Ok(image_user.to_string());
2461 }
2462
2463 Ok("root".to_string())
2464}
2465
2466#[cfg(test)]
2467mod test {
2468 use std::{
2469 collections::HashMap,
2470 ffi::OsStr,
2471 path::{Path, PathBuf},
2472 process::{ExitStatus, Output},
2473 sync::{Arc, Mutex},
2474 };
2475
2476 use async_trait::async_trait;
2477 use fs::{FakeFs, Fs};
2478 use gpui::{AppContext, TestAppContext};
2479 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2480 use project::{
2481 ProjectEnvironment,
2482 worktree_store::{WorktreeIdCounter, WorktreeStore},
2483 };
2484 use serde_json_lenient::Value;
2485 use util::{command::Command, paths::SanitizedPath};
2486
2487 #[cfg(not(target_os = "windows"))]
2488 use crate::docker::DockerComposeServicePort;
2489 use crate::{
2490 DevContainerConfig, DevContainerContext,
2491 command_json::CommandRunner,
2492 devcontainer_api::DevContainerError,
2493 devcontainer_json::MountDefinition,
2494 devcontainer_manifest::{
2495 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2496 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2497 image_from_dockerfile, resolve_compose_dockerfile,
2498 },
2499 docker::{
2500 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2501 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2502 DockerPs,
2503 },
2504 oci::TokenResponse,
2505 };
2506 #[cfg(not(target_os = "windows"))]
2507 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2508 #[cfg(target_os = "windows")]
2509 const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2510
2511 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2512 let buffer = futures::io::Cursor::new(Vec::new());
2513 let mut builder = async_tar::Builder::new(buffer);
2514 for (file_name, content) in content {
2515 if content.is_empty() {
2516 let mut header = async_tar::Header::new_gnu();
2517 header.set_size(0);
2518 header.set_mode(0o755);
2519 header.set_entry_type(async_tar::EntryType::Directory);
2520 header.set_cksum();
2521 builder
2522 .append_data(&mut header, file_name, &[] as &[u8])
2523 .await
2524 .unwrap();
2525 } else {
2526 let data = content.as_bytes();
2527 let mut header = async_tar::Header::new_gnu();
2528 header.set_size(data.len() as u64);
2529 header.set_mode(0o755);
2530 header.set_entry_type(async_tar::EntryType::Regular);
2531 header.set_cksum();
2532 builder
2533 .append_data(&mut header, file_name, data)
2534 .await
2535 .unwrap();
2536 }
2537 }
2538 let buffer = builder.into_inner().await.unwrap();
2539 buffer.into_inner()
2540 }
2541
2542 fn test_project_filename() -> String {
2543 PathBuf::from(TEST_PROJECT_PATH)
2544 .file_name()
2545 .expect("is valid")
2546 .display()
2547 .to_string()
2548 }
2549
2550 async fn init_devcontainer_config(
2551 fs: &Arc<FakeFs>,
2552 devcontainer_contents: &str,
2553 ) -> DevContainerConfig {
2554 fs.insert_tree(
2555 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2556 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2557 )
2558 .await;
2559
2560 DevContainerConfig::default_config()
2561 }
2562
2563 struct TestDependencies {
2564 fs: Arc<FakeFs>,
2565 _http_client: Arc<dyn HttpClient>,
2566 docker: Arc<FakeDocker>,
2567 command_runner: Arc<TestCommandRunner>,
2568 }
2569
2570 async fn init_default_devcontainer_manifest(
2571 cx: &mut TestAppContext,
2572 devcontainer_contents: &str,
2573 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2574 let fs = FakeFs::new(cx.executor());
2575 let http_client = fake_http_client();
2576 let command_runner = Arc::new(TestCommandRunner::new());
2577 let docker = Arc::new(FakeDocker::new());
2578 let environment = HashMap::new();
2579
2580 init_devcontainer_manifest(
2581 cx,
2582 fs,
2583 http_client,
2584 docker,
2585 command_runner,
2586 environment,
2587 devcontainer_contents,
2588 )
2589 .await
2590 }
2591
2592 async fn init_devcontainer_manifest(
2593 cx: &mut TestAppContext,
2594 fs: Arc<FakeFs>,
2595 http_client: Arc<dyn HttpClient>,
2596 docker_client: Arc<FakeDocker>,
2597 command_runner: Arc<TestCommandRunner>,
2598 environment: HashMap<String, String>,
2599 devcontainer_contents: &str,
2600 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2601 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2602 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2603 let worktree_store =
2604 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2605 let project_environment =
2606 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2607
2608 let context = DevContainerContext {
2609 project_directory: SanitizedPath::cast_arc(project_path),
2610 use_podman: false,
2611 fs: fs.clone(),
2612 http_client: http_client.clone(),
2613 environment: project_environment.downgrade(),
2614 };
2615
2616 let test_dependencies = TestDependencies {
2617 fs: fs.clone(),
2618 _http_client: http_client.clone(),
2619 docker: docker_client.clone(),
2620 command_runner: command_runner.clone(),
2621 };
2622 let manifest = DevContainerManifest::new(
2623 &context,
2624 environment,
2625 docker_client,
2626 command_runner,
2627 local_config,
2628 &PathBuf::from(TEST_PROJECT_PATH),
2629 )
2630 .await?;
2631
2632 Ok((test_dependencies, manifest))
2633 }
2634
2635 #[gpui::test]
2636 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2637 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2638 cx,
2639 r#"
2640// These are some external comments. serde_lenient should handle them
2641{
2642 // These are some internal comments
2643 "image": "image",
2644 "remoteUser": "root",
2645}
2646 "#,
2647 )
2648 .await
2649 .unwrap();
2650
2651 let mut metadata = HashMap::new();
2652 metadata.insert(
2653 "remoteUser".to_string(),
2654 serde_json_lenient::Value::String("vsCode".to_string()),
2655 );
2656 let given_docker_config = DockerInspect {
2657 id: "docker_id".to_string(),
2658 config: DockerInspectConfig {
2659 labels: DockerConfigLabels {
2660 metadata: Some(vec![metadata]),
2661 },
2662 image_user: None,
2663 env: Vec::new(),
2664 },
2665 mounts: None,
2666 state: None,
2667 };
2668
2669 let remote_user =
2670 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2671
2672 assert_eq!(remote_user, "root".to_string())
2673 }
2674
2675 #[gpui::test]
2676 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2677 let (_, devcontainer_manifest) =
2678 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2679 let mut metadata = HashMap::new();
2680 metadata.insert(
2681 "remoteUser".to_string(),
2682 serde_json_lenient::Value::String("vsCode".to_string()),
2683 );
2684 let given_docker_config = DockerInspect {
2685 id: "docker_id".to_string(),
2686 config: DockerInspectConfig {
2687 labels: DockerConfigLabels {
2688 metadata: Some(vec![metadata]),
2689 },
2690 image_user: None,
2691 env: Vec::new(),
2692 },
2693 mounts: None,
2694 state: None,
2695 };
2696
2697 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2698
2699 assert!(remote_user.is_ok());
2700 let remote_user = remote_user.expect("ok");
2701 assert_eq!(&remote_user, "vsCode")
2702 }
2703
2704 #[test]
2705 fn should_extract_feature_id_from_references() {
2706 assert_eq!(
2707 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2708 "aws-cli"
2709 );
2710 assert_eq!(
2711 extract_feature_id("ghcr.io/devcontainers/features/go"),
2712 "go"
2713 );
2714 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2715 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2716 assert_eq!(
2717 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2718 "rust"
2719 );
2720 }
2721
2722 #[gpui::test]
2723 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2724 let mut metadata = HashMap::new();
2725 metadata.insert(
2726 "remoteUser".to_string(),
2727 serde_json_lenient::Value::String("vsCode".to_string()),
2728 );
2729
2730 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2731 cx,
2732 r#"{
2733 "name": "TODO"
2734 }"#,
2735 )
2736 .await
2737 .unwrap();
2738 let build_resources = DockerBuildResources {
2739 image: DockerInspect {
2740 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2741 config: DockerInspectConfig {
2742 labels: DockerConfigLabels { metadata: None },
2743 image_user: None,
2744 env: Vec::new(),
2745 },
2746 mounts: None,
2747 state: None,
2748 },
2749 additional_mounts: vec![],
2750 privileged: false,
2751 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2752 };
2753 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2754
2755 assert!(docker_run_command.is_ok());
2756 let docker_run_command = docker_run_command.expect("ok");
2757
2758 assert_eq!(docker_run_command.get_program(), "docker");
2759 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2760 .join(".devcontainer")
2761 .join("devcontainer.json");
2762 let expected_config_file_label = expected_config_file_label.display();
2763 assert_eq!(
2764 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2765 vec![
2766 OsStr::new("run"),
2767 OsStr::new("--sig-proxy=false"),
2768 OsStr::new("-d"),
2769 OsStr::new("--mount"),
2770 OsStr::new(&format!(
2771 "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
2772 )),
2773 OsStr::new("-l"),
2774 OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
2775 OsStr::new("-l"),
2776 OsStr::new(&format!(
2777 "devcontainer.config_file={expected_config_file_label}"
2778 )),
2779 OsStr::new("--entrypoint"),
2780 OsStr::new("/bin/sh"),
2781 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2782 OsStr::new("-c"),
2783 OsStr::new(
2784 "
2785 echo Container started
2786 trap \"exit 0\" 15
2787 exec \"$@\"
2788 while sleep 1 & wait $!; do :; done
2789 "
2790 .trim()
2791 ),
2792 OsStr::new("-"),
2793 ]
2794 )
2795 }
2796
2797 #[gpui::test]
2798 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2799 // State where service not defined in dev container
2800 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2801 let given_docker_compose_config = DockerComposeResources {
2802 config: DockerComposeConfig {
2803 name: Some("devcontainers".to_string()),
2804 services: HashMap::new(),
2805 ..Default::default()
2806 },
2807 ..Default::default()
2808 };
2809
2810 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2811
2812 assert!(bad_result.is_err());
2813
2814 // State where service defined in devcontainer, not found in DockerCompose config
2815 let (_, given_dev_container) =
2816 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2817 .await
2818 .unwrap();
2819 let given_docker_compose_config = DockerComposeResources {
2820 config: DockerComposeConfig {
2821 name: Some("devcontainers".to_string()),
2822 services: HashMap::new(),
2823 ..Default::default()
2824 },
2825 ..Default::default()
2826 };
2827
2828 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2829
2830 assert!(bad_result.is_err());
2831 // State where service defined in devcontainer and in DockerCompose config
2832
2833 let (_, given_dev_container) =
2834 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2835 .await
2836 .unwrap();
2837 let given_docker_compose_config = DockerComposeResources {
2838 config: DockerComposeConfig {
2839 name: Some("devcontainers".to_string()),
2840 services: HashMap::from([(
2841 "found_service".to_string(),
2842 DockerComposeService {
2843 ..Default::default()
2844 },
2845 )]),
2846 ..Default::default()
2847 },
2848 ..Default::default()
2849 };
2850
2851 let (service_name, _) =
2852 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2853
2854 assert_eq!(service_name, "found_service".to_string());
2855 }
2856
2857 #[gpui::test]
2858 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2859 let fs = FakeFs::new(cx.executor());
2860 let given_devcontainer_contents = r#"
2861// These are some external comments. serde_lenient should handle them
2862{
2863 // These are some internal comments
2864 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2865 "name": "myDevContainer-${devcontainerId}",
2866 "remoteUser": "root",
2867 "remoteEnv": {
2868 "DEVCONTAINER_ID": "${devcontainerId}",
2869 "MYVAR2": "myvarothervalue",
2870 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2871 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2872 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2873 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2874 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2875 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2876
2877 }
2878}
2879 "#;
2880 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2881 cx,
2882 fs,
2883 fake_http_client(),
2884 Arc::new(FakeDocker::new()),
2885 Arc::new(TestCommandRunner::new()),
2886 HashMap::from([
2887 ("local_env_1".to_string(), "local_env_value1".to_string()),
2888 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2889 ]),
2890 given_devcontainer_contents,
2891 )
2892 .await
2893 .unwrap();
2894
2895 devcontainer_manifest.parse_nonremote_vars().unwrap();
2896
2897 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2898 &devcontainer_manifest.config
2899 else {
2900 panic!("Config not parsed");
2901 };
2902
2903 // ${devcontainerId}
2904 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2905 assert_eq!(
2906 variable_replaced_devcontainer.name,
2907 Some(format!("myDevContainer-{devcontainer_id}"))
2908 );
2909 assert_eq!(
2910 variable_replaced_devcontainer
2911 .remote_env
2912 .as_ref()
2913 .and_then(|env| env.get("DEVCONTAINER_ID")),
2914 Some(&devcontainer_id)
2915 );
2916
2917 // ${containerWorkspaceFolderBasename}
2918 assert_eq!(
2919 variable_replaced_devcontainer
2920 .remote_env
2921 .as_ref()
2922 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2923 Some(&test_project_filename())
2924 );
2925
2926 // ${localWorkspaceFolderBasename}
2927 assert_eq!(
2928 variable_replaced_devcontainer
2929 .remote_env
2930 .as_ref()
2931 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2932 Some(&test_project_filename())
2933 );
2934
2935 // ${containerWorkspaceFolder}
2936 assert_eq!(
2937 variable_replaced_devcontainer
2938 .remote_env
2939 .as_ref()
2940 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2941 Some(&format!("/workspaces/{}", test_project_filename()))
2942 );
2943
2944 // ${localWorkspaceFolder}
2945 assert_eq!(
2946 variable_replaced_devcontainer
2947 .remote_env
2948 .as_ref()
2949 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2950 // We replace backslashes with forward slashes during variable replacement for JSON safety
2951 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
2952 );
2953
2954 // ${localEnv:VARIABLE_NAME}
2955 assert_eq!(
2956 variable_replaced_devcontainer
2957 .remote_env
2958 .as_ref()
2959 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2960 Some(&"local_env_value1".to_string())
2961 );
2962 assert_eq!(
2963 variable_replaced_devcontainer
2964 .remote_env
2965 .as_ref()
2966 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2967 Some(&"THISVALUEHERE".to_string())
2968 );
2969 }
2970
2971 #[gpui::test]
2972 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2973 let given_devcontainer_contents = r#"
2974 // These are some external comments. serde_lenient should handle them
2975 {
2976 // These are some internal comments
2977 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2978 "name": "myDevContainer-${devcontainerId}",
2979 "remoteUser": "root",
2980 "remoteEnv": {
2981 "DEVCONTAINER_ID": "${devcontainerId}",
2982 "MYVAR2": "myvarothervalue",
2983 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2984 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2985 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2986 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2987
2988 },
2989 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2990 "workspaceFolder": "/workspace/customfolder"
2991 }
2992 "#;
2993
2994 let (_, mut devcontainer_manifest) =
2995 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2996 .await
2997 .unwrap();
2998
2999 devcontainer_manifest.parse_nonremote_vars().unwrap();
3000
3001 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3002 &devcontainer_manifest.config
3003 else {
3004 panic!("Config not parsed");
3005 };
3006
3007 // ${devcontainerId}
3008 let devcontainer_id = devcontainer_manifest.devcontainer_id();
3009 assert_eq!(
3010 variable_replaced_devcontainer.name,
3011 Some(format!("myDevContainer-{devcontainer_id}"))
3012 );
3013 assert_eq!(
3014 variable_replaced_devcontainer
3015 .remote_env
3016 .as_ref()
3017 .and_then(|env| env.get("DEVCONTAINER_ID")),
3018 Some(&devcontainer_id)
3019 );
3020
3021 // ${containerWorkspaceFolderBasename}
3022 assert_eq!(
3023 variable_replaced_devcontainer
3024 .remote_env
3025 .as_ref()
3026 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3027 Some(&"customfolder".to_string())
3028 );
3029
3030 // ${localWorkspaceFolderBasename}
3031 assert_eq!(
3032 variable_replaced_devcontainer
3033 .remote_env
3034 .as_ref()
3035 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3036 Some(&"project".to_string())
3037 );
3038
3039 // ${containerWorkspaceFolder}
3040 assert_eq!(
3041 variable_replaced_devcontainer
3042 .remote_env
3043 .as_ref()
3044 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3045 Some(&"/workspace/customfolder".to_string())
3046 );
3047
3048 // ${localWorkspaceFolder}
3049 assert_eq!(
3050 variable_replaced_devcontainer
3051 .remote_env
3052 .as_ref()
3053 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3054 // We replace backslashes with forward slashes during variable replacement for JSON safety
3055 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3056 );
3057 }
3058
3059 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3060 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3061 #[cfg(not(target_os = "windows"))]
3062 #[gpui::test]
3063 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3064 cx.executor().allow_parking();
3065 env_logger::try_init().ok();
3066 let given_devcontainer_contents = r#"
3067 /*---------------------------------------------------------------------------------------------
3068 * Copyright (c) Microsoft Corporation. All rights reserved.
3069 * Licensed under the MIT License. See License.txt in the project root for license information.
3070 *--------------------------------------------------------------------------------------------*/
3071 {
3072 "name": "cli-${devcontainerId}",
3073 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3074 "build": {
3075 "dockerfile": "Dockerfile",
3076 "args": {
3077 "VARIANT": "18-bookworm",
3078 "FOO": "bar",
3079 },
3080 },
3081 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3082 "workspaceFolder": "/workspace2",
3083 "mounts": [
3084 // Keep command history across instances
3085 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3086 ],
3087
3088 "runArgs": [
3089 "--cap-add=SYS_PTRACE",
3090 "--sig-proxy=true",
3091 ],
3092
3093 "forwardPorts": [
3094 8082,
3095 8083,
3096 ],
3097 "appPort": [
3098 8084,
3099 "8085:8086",
3100 ],
3101
3102 "containerEnv": {
3103 "VARIABLE_VALUE": "value",
3104 },
3105
3106 "initializeCommand": "touch IAM.md",
3107
3108 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3109
3110 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3111
3112 "postCreateCommand": {
3113 "yarn": "yarn install",
3114 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3115 },
3116
3117 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3118
3119 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3120
3121 "remoteUser": "node",
3122
3123 "remoteEnv": {
3124 "PATH": "${containerEnv:PATH}:/some/other/path",
3125 "OTHER_ENV": "other_env_value"
3126 },
3127
3128 "features": {
3129 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3130 "moby": false,
3131 },
3132 "ghcr.io/devcontainers/features/go:1": {},
3133 },
3134
3135 "customizations": {
3136 "vscode": {
3137 "extensions": [
3138 "dbaeumer.vscode-eslint",
3139 "GitHub.vscode-pull-request-github",
3140 ],
3141 },
3142 "zed": {
3143 "extensions": ["vue", "ruby"],
3144 },
3145 "codespaces": {
3146 "repositories": {
3147 "devcontainers/features": {
3148 "permissions": {
3149 "contents": "write",
3150 "workflows": "write",
3151 },
3152 },
3153 },
3154 },
3155 },
3156 }
3157 "#;
3158
3159 let (test_dependencies, mut devcontainer_manifest) =
3160 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3161 .await
3162 .unwrap();
3163
3164 test_dependencies
3165 .fs
3166 .atomic_write(
3167 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3168 r#"
3169# Copyright (c) Microsoft Corporation. All rights reserved.
3170# Licensed under the MIT License. See License.txt in the project root for license information.
3171ARG VARIANT="16-bullseye"
3172FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3173
3174RUN mkdir -p /workspaces && chown node:node /workspaces
3175
3176ARG USERNAME=node
3177USER $USERNAME
3178
3179# Save command line history
3180RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3181&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3182&& mkdir -p /home/$USERNAME/commandhistory \
3183&& touch /home/$USERNAME/commandhistory/.bash_history \
3184&& chown -R $USERNAME /home/$USERNAME/commandhistory
3185 "#.trim().to_string(),
3186 )
3187 .await
3188 .unwrap();
3189
3190 devcontainer_manifest.parse_nonremote_vars().unwrap();
3191
3192 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3193
3194 assert_eq!(
3195 devcontainer_up.extension_ids,
3196 vec!["vue".to_string(), "ruby".to_string()]
3197 );
3198
3199 let files = test_dependencies.fs.files();
3200 let feature_dockerfile = files
3201 .iter()
3202 .find(|f| {
3203 f.file_name()
3204 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3205 })
3206 .expect("to be found");
3207 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3208 assert_eq!(
3209 &feature_dockerfile,
3210 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3211
3212# Copyright (c) Microsoft Corporation. All rights reserved.
3213# Licensed under the MIT License. See License.txt in the project root for license information.
3214ARG VARIANT="16-bullseye"
3215FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3216
3217RUN mkdir -p /workspaces && chown node:node /workspaces
3218
3219ARG USERNAME=node
3220USER $USERNAME
3221
3222# Save command line history
3223RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3224&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3225&& mkdir -p /home/$USERNAME/commandhistory \
3226&& touch /home/$USERNAME/commandhistory/.bash_history \
3227&& chown -R $USERNAME /home/$USERNAME/commandhistory
3228FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3229
3230FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3231USER root
3232COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3233RUN chmod -R 0755 /tmp/build-features/
3234
3235FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3236
3237USER root
3238
3239RUN mkdir -p /tmp/dev-container-features
3240COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3241
3242RUN \
3243echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3244echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3245
3246
3247RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3248cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3249&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3250&& cd /tmp/dev-container-features/docker-in-docker_0 \
3251&& chmod +x ./devcontainer-features-install.sh \
3252&& ./devcontainer-features-install.sh \
3253&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3254
3255RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3256cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3257&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3258&& cd /tmp/dev-container-features/go_1 \
3259&& chmod +x ./devcontainer-features-install.sh \
3260&& ./devcontainer-features-install.sh \
3261&& rm -rf /tmp/dev-container-features/go_1
3262
3263
3264ARG _DEV_CONTAINERS_IMAGE_USER=root
3265USER $_DEV_CONTAINERS_IMAGE_USER
3266"#
3267 );
3268
3269 let uid_dockerfile = files
3270 .iter()
3271 .find(|f| {
3272 f.file_name()
3273 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3274 })
3275 .expect("to be found");
3276 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3277
3278 assert_eq!(
3279 &uid_dockerfile,
3280 r#"ARG BASE_IMAGE
3281FROM $BASE_IMAGE
3282
3283USER root
3284
3285ARG REMOTE_USER
3286ARG NEW_UID
3287ARG NEW_GID
3288SHELL ["/bin/sh", "-c"]
3289RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3290 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3291 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3292 if [ -z "$OLD_UID" ]; then \
3293 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3294 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3295 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3296 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3297 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3298 else \
3299 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3300 FREE_GID=65532; \
3301 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3302 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3303 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3304 fi; \
3305 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3306 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3307 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3308 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3309 fi; \
3310 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3311 fi;
3312
3313ARG IMAGE_USER
3314USER $IMAGE_USER
3315
3316# Ensure that /etc/profile does not clobber the existing path
3317RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3318
3319ENV DOCKER_BUILDKIT=1
3320
3321ENV GOPATH=/go
3322ENV GOROOT=/usr/local/go
3323ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3324ENV VARIABLE_VALUE=value
3325"#
3326 );
3327
3328 let golang_install_wrapper = files
3329 .iter()
3330 .find(|f| {
3331 f.file_name()
3332 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3333 && f.to_str().is_some_and(|s| s.contains("/go_"))
3334 })
3335 .expect("to be found");
3336 let golang_install_wrapper = test_dependencies
3337 .fs
3338 .load(golang_install_wrapper)
3339 .await
3340 .unwrap();
3341 assert_eq!(
3342 &golang_install_wrapper,
3343 r#"#!/bin/sh
3344set -e
3345
3346on_exit () {
3347 [ $? -eq 0 ] && exit
3348 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3349}
3350
3351trap on_exit EXIT
3352
3353echo ===========================================================================
3354echo 'Feature : go'
3355echo 'Id : ghcr.io/devcontainers/features/go:1'
3356echo 'Options :'
3357echo ' GOLANGCILINTVERSION=latest
3358 VERSION=latest'
3359echo ===========================================================================
3360
3361set -a
3362. ../devcontainer-features.builtin.env
3363. ./devcontainer-features.env
3364set +a
3365
3366chmod +x ./install.sh
3367./install.sh
3368"#
3369 );
3370
3371 let docker_commands = test_dependencies
3372 .command_runner
3373 .commands_by_program("docker");
3374
3375 let docker_run_command = docker_commands
3376 .iter()
3377 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3378 .expect("found");
3379
3380 assert_eq!(
3381 docker_run_command.args,
3382 vec![
3383 "run".to_string(),
3384 "--privileged".to_string(),
3385 "--cap-add=SYS_PTRACE".to_string(),
3386 "--sig-proxy=true".to_string(),
3387 "-d".to_string(),
3388 "--mount".to_string(),
3389 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3390 "--mount".to_string(),
3391 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3392 "--mount".to_string(),
3393 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3394 "-l".to_string(),
3395 "devcontainer.local_folder=/path/to/local/project".to_string(),
3396 "-l".to_string(),
3397 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3398 "-l".to_string(),
3399 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3400 "-p".to_string(),
3401 "8082:8082".to_string(),
3402 "-p".to_string(),
3403 "8083:8083".to_string(),
3404 "-p".to_string(),
3405 "8084:8084".to_string(),
3406 "-p".to_string(),
3407 "8085:8086".to_string(),
3408 "--entrypoint".to_string(),
3409 "/bin/sh".to_string(),
3410 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3411 "-c".to_string(),
3412 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3413 "-".to_string()
3414 ]
3415 );
3416
3417 let docker_exec_commands = test_dependencies
3418 .docker
3419 .exec_commands_recorded
3420 .lock()
3421 .unwrap();
3422
3423 assert!(docker_exec_commands.iter().all(|exec| {
3424 exec.env
3425 == HashMap::from([
3426 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3427 (
3428 "PATH".to_string(),
3429 "/initial/path:/some/other/path".to_string(),
3430 ),
3431 ])
3432 }))
3433 }
3434
3435 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3436 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3437 #[cfg(not(target_os = "windows"))]
3438 #[gpui::test]
3439 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3440 cx.executor().allow_parking();
3441 env_logger::try_init().ok();
3442 let given_devcontainer_contents = r#"
3443 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3444 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3445 {
3446 "features": {
3447 "ghcr.io/devcontainers/features/aws-cli:1": {},
3448 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3449 },
3450 "name": "Rust and PostgreSQL",
3451 "dockerComposeFile": "docker-compose.yml",
3452 "service": "app",
3453 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3454
3455 // Features to add to the dev container. More info: https://containers.dev/features.
3456 // "features": {},
3457
3458 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3459 "forwardPorts": [
3460 8083,
3461 "db:5432",
3462 "db:1234",
3463 ],
3464
3465 // Use 'postCreateCommand' to run commands after the container is created.
3466 // "postCreateCommand": "rustc --version",
3467
3468 // Configure tool-specific properties.
3469 // "customizations": {},
3470
3471 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3472 // "remoteUser": "root"
3473 }
3474 "#;
3475 let (test_dependencies, mut devcontainer_manifest) =
3476 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3477 .await
3478 .unwrap();
3479
3480 test_dependencies
3481 .fs
3482 .atomic_write(
3483 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3484 r#"
3485version: '3.8'
3486
3487volumes:
3488 postgres-data:
3489
3490services:
3491 app:
3492 build:
3493 context: .
3494 dockerfile: Dockerfile
3495 env_file:
3496 # Ensure that the variables in .env match the same variables in devcontainer.json
3497 - .env
3498
3499 volumes:
3500 - ../..:/workspaces:cached
3501
3502 # Overrides default command so things don't shut down after the process ends.
3503 command: sleep infinity
3504
3505 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3506 network_mode: service:db
3507
3508 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3509 # (Adding the "ports" property to this file will not forward from a Codespace.)
3510
3511 db:
3512 image: postgres:14.1
3513 restart: unless-stopped
3514 volumes:
3515 - postgres-data:/var/lib/postgresql/data
3516 env_file:
3517 # Ensure that the variables in .env match the same variables in devcontainer.json
3518 - .env
3519
3520 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3521 # (Adding the "ports" property to this file will not forward from a Codespace.)
3522 "#.trim().to_string(),
3523 )
3524 .await
3525 .unwrap();
3526
3527 test_dependencies.fs.atomic_write(
3528 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3529 r#"
3530FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3531
3532# Include lld linker to improve build times either by using environment variable
3533# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3534RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3535 && apt-get -y install clang lld \
3536 && apt-get autoremove -y && apt-get clean -y
3537 "#.trim().to_string()).await.unwrap();
3538
3539 devcontainer_manifest.parse_nonremote_vars().unwrap();
3540
3541 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3542
3543 let files = test_dependencies.fs.files();
3544 let feature_dockerfile = files
3545 .iter()
3546 .find(|f| {
3547 f.file_name()
3548 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3549 })
3550 .expect("to be found");
3551 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3552 assert_eq!(
3553 &feature_dockerfile,
3554 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3555
3556FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3557
3558# Include lld linker to improve build times either by using environment variable
3559# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3560RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3561 && apt-get -y install clang lld \
3562 && apt-get autoremove -y && apt-get clean -y
3563FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3564
3565FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3566USER root
3567COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3568RUN chmod -R 0755 /tmp/build-features/
3569
3570FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3571
3572USER root
3573
3574RUN mkdir -p /tmp/dev-container-features
3575COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3576
3577RUN \
3578echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3579echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3580
3581
3582RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3583cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3584&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3585&& cd /tmp/dev-container-features/aws-cli_0 \
3586&& chmod +x ./devcontainer-features-install.sh \
3587&& ./devcontainer-features-install.sh \
3588&& rm -rf /tmp/dev-container-features/aws-cli_0
3589
3590RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3591cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3592&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3593&& cd /tmp/dev-container-features/docker-in-docker_1 \
3594&& chmod +x ./devcontainer-features-install.sh \
3595&& ./devcontainer-features-install.sh \
3596&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3597
3598
3599ARG _DEV_CONTAINERS_IMAGE_USER=root
3600USER $_DEV_CONTAINERS_IMAGE_USER
3601"#
3602 );
3603
3604 let uid_dockerfile = files
3605 .iter()
3606 .find(|f| {
3607 f.file_name()
3608 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3609 })
3610 .expect("to be found");
3611 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3612
3613 assert_eq!(
3614 &uid_dockerfile,
3615 r#"ARG BASE_IMAGE
3616FROM $BASE_IMAGE
3617
3618USER root
3619
3620ARG REMOTE_USER
3621ARG NEW_UID
3622ARG NEW_GID
3623SHELL ["/bin/sh", "-c"]
3624RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3625 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3626 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3627 if [ -z "$OLD_UID" ]; then \
3628 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3629 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3630 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3631 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3632 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3633 else \
3634 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3635 FREE_GID=65532; \
3636 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3637 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3638 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3639 fi; \
3640 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3641 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3642 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3643 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3644 fi; \
3645 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3646 fi;
3647
3648ARG IMAGE_USER
3649USER $IMAGE_USER
3650
3651# Ensure that /etc/profile does not clobber the existing path
3652RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3653
3654
3655ENV DOCKER_BUILDKIT=1
3656"#
3657 );
3658
3659 let build_override = files
3660 .iter()
3661 .find(|f| {
3662 f.file_name()
3663 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3664 })
3665 .expect("to be found");
3666 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3667 let build_config: DockerComposeConfig =
3668 serde_json_lenient::from_str(&build_override).unwrap();
3669 let build_context = build_config
3670 .services
3671 .get("app")
3672 .and_then(|s| s.build.as_ref())
3673 .and_then(|b| b.context.clone())
3674 .expect("build override should have a context");
3675 assert_eq!(
3676 build_context, ".",
3677 "build override should preserve the original build context from docker-compose.yml"
3678 );
3679
3680 let runtime_override = files
3681 .iter()
3682 .find(|f| {
3683 f.file_name()
3684 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3685 })
3686 .expect("to be found");
3687 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3688
3689 let expected_runtime_override = DockerComposeConfig {
3690 name: None,
3691 services: HashMap::from([
3692 (
3693 "app".to_string(),
3694 DockerComposeService {
3695 entrypoint: Some(vec![
3696 "/bin/sh".to_string(),
3697 "-c".to_string(),
3698 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3699 "-".to_string(),
3700 ]),
3701 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3702 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3703 privileged: Some(true),
3704 labels: Some(HashMap::from([
3705 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3706 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3707 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3708 ])),
3709 volumes: vec![
3710 MountDefinition {
3711 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3712 target: "/var/lib/docker".to_string(),
3713 mount_type: Some("volume".to_string())
3714 }
3715 ],
3716 ..Default::default()
3717 },
3718 ),
3719 (
3720 "db".to_string(),
3721 DockerComposeService {
3722 ports: vec![
3723 DockerComposeServicePort {
3724 target: "8083".to_string(),
3725 published: "8083".to_string(),
3726 ..Default::default()
3727 },
3728 DockerComposeServicePort {
3729 target: "5432".to_string(),
3730 published: "5432".to_string(),
3731 ..Default::default()
3732 },
3733 DockerComposeServicePort {
3734 target: "1234".to_string(),
3735 published: "1234".to_string(),
3736 ..Default::default()
3737 },
3738 ],
3739 ..Default::default()
3740 },
3741 ),
3742 ]),
3743 volumes: HashMap::from([(
3744 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3745 DockerComposeVolume {
3746 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3747 },
3748 )]),
3749 };
3750
3751 assert_eq!(
3752 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3753 expected_runtime_override
3754 )
3755 }
3756
3757 #[test]
3758 fn test_resolve_compose_dockerfile() {
3759 let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3760
3761 // Bug case (#53473): context ".." with relative dockerfile
3762 assert_eq!(
3763 resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3764 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3765 );
3766
3767 // Compose path containing ".." (as docker_compose_manifest() produces)
3768 assert_eq!(
3769 resolve_compose_dockerfile(
3770 Path::new("/project/.devcontainer/../docker-compose.yml"),
3771 Some("."),
3772 "docker/Dockerfile",
3773 ),
3774 Some(PathBuf::from("/project/docker/Dockerfile")),
3775 );
3776
3777 // Absolute dockerfile returned as-is
3778 assert_eq!(
3779 resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3780 Some(PathBuf::from("/absolute/Dockerfile")),
3781 );
3782
3783 // Absolute context used directly
3784 assert_eq!(
3785 resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3786 Some(PathBuf::from("/abs/context/Dockerfile")),
3787 );
3788
3789 // No context defaults to compose file's directory
3790 assert_eq!(
3791 resolve_compose_dockerfile(compose, None, "Dockerfile"),
3792 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3793 );
3794 }
3795
3796 #[gpui::test]
3797 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3798 cx: &mut TestAppContext,
3799 ) {
3800 cx.executor().allow_parking();
3801 env_logger::try_init().ok();
3802 let given_devcontainer_contents = r#"
3803 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3804 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3805 {
3806 "features": {
3807 "ghcr.io/devcontainers/features/aws-cli:1": {},
3808 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3809 },
3810 "name": "Rust and PostgreSQL",
3811 "dockerComposeFile": "docker-compose.yml",
3812 "service": "app",
3813 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3814
3815 // Features to add to the dev container. More info: https://containers.dev/features.
3816 // "features": {},
3817
3818 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3819 "forwardPorts": [
3820 8083,
3821 "db:5432",
3822 "db:1234",
3823 ],
3824 "updateRemoteUserUID": false,
3825 "appPort": "8084",
3826
3827 // Use 'postCreateCommand' to run commands after the container is created.
3828 // "postCreateCommand": "rustc --version",
3829
3830 // Configure tool-specific properties.
3831 // "customizations": {},
3832
3833 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3834 // "remoteUser": "root"
3835 }
3836 "#;
3837 let (test_dependencies, mut devcontainer_manifest) =
3838 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3839 .await
3840 .unwrap();
3841
3842 test_dependencies
3843 .fs
3844 .atomic_write(
3845 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3846 r#"
3847version: '3.8'
3848
3849volumes:
3850postgres-data:
3851
3852services:
3853app:
3854 build:
3855 context: .
3856 dockerfile: Dockerfile
3857 env_file:
3858 # Ensure that the variables in .env match the same variables in devcontainer.json
3859 - .env
3860
3861 volumes:
3862 - ../..:/workspaces:cached
3863
3864 # Overrides default command so things don't shut down after the process ends.
3865 command: sleep infinity
3866
3867 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3868 network_mode: service:db
3869
3870 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3871 # (Adding the "ports" property to this file will not forward from a Codespace.)
3872
3873db:
3874 image: postgres:14.1
3875 restart: unless-stopped
3876 volumes:
3877 - postgres-data:/var/lib/postgresql/data
3878 env_file:
3879 # Ensure that the variables in .env match the same variables in devcontainer.json
3880 - .env
3881
3882 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3883 # (Adding the "ports" property to this file will not forward from a Codespace.)
3884 "#.trim().to_string(),
3885 )
3886 .await
3887 .unwrap();
3888
3889 test_dependencies.fs.atomic_write(
3890 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3891 r#"
3892FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3893
3894# Include lld linker to improve build times either by using environment variable
3895# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3896RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3897&& apt-get -y install clang lld \
3898&& apt-get autoremove -y && apt-get clean -y
3899 "#.trim().to_string()).await.unwrap();
3900
3901 devcontainer_manifest.parse_nonremote_vars().unwrap();
3902
3903 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3904
3905 let files = test_dependencies.fs.files();
3906 let feature_dockerfile = files
3907 .iter()
3908 .find(|f| {
3909 f.file_name()
3910 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3911 })
3912 .expect("to be found");
3913 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3914 assert_eq!(
3915 &feature_dockerfile,
3916 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3917
3918FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3919
3920# Include lld linker to improve build times either by using environment variable
3921# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3922RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3923&& apt-get -y install clang lld \
3924&& apt-get autoremove -y && apt-get clean -y
3925FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3926
3927FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3928USER root
3929COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3930RUN chmod -R 0755 /tmp/build-features/
3931
3932FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3933
3934USER root
3935
3936RUN mkdir -p /tmp/dev-container-features
3937COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3938
3939RUN \
3940echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3941echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3942
3943
3944RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3945cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3946&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3947&& cd /tmp/dev-container-features/aws-cli_0 \
3948&& chmod +x ./devcontainer-features-install.sh \
3949&& ./devcontainer-features-install.sh \
3950&& rm -rf /tmp/dev-container-features/aws-cli_0
3951
3952RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3953cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3954&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3955&& cd /tmp/dev-container-features/docker-in-docker_1 \
3956&& chmod +x ./devcontainer-features-install.sh \
3957&& ./devcontainer-features-install.sh \
3958&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3959
3960
3961ARG _DEV_CONTAINERS_IMAGE_USER=root
3962USER $_DEV_CONTAINERS_IMAGE_USER
3963
3964# Ensure that /etc/profile does not clobber the existing path
3965RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3966
3967
3968ENV DOCKER_BUILDKIT=1
3969"#
3970 );
3971 }
3972
3973 #[cfg(not(target_os = "windows"))]
3974 #[gpui::test]
3975 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3976 cx.executor().allow_parking();
3977 env_logger::try_init().ok();
3978 let given_devcontainer_contents = r#"
3979 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3980 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3981 {
3982 "features": {
3983 "ghcr.io/devcontainers/features/aws-cli:1": {},
3984 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3985 },
3986 "name": "Rust and PostgreSQL",
3987 "dockerComposeFile": "docker-compose.yml",
3988 "service": "app",
3989 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3990
3991 // Features to add to the dev container. More info: https://containers.dev/features.
3992 // "features": {},
3993
3994 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3995 // "forwardPorts": [5432],
3996
3997 // Use 'postCreateCommand' to run commands after the container is created.
3998 // "postCreateCommand": "rustc --version",
3999
4000 // Configure tool-specific properties.
4001 // "customizations": {},
4002
4003 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4004 // "remoteUser": "root"
4005 }
4006 "#;
4007 let mut fake_docker = FakeDocker::new();
4008 fake_docker.set_podman(true);
4009 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4010 cx,
4011 FakeFs::new(cx.executor()),
4012 fake_http_client(),
4013 Arc::new(fake_docker),
4014 Arc::new(TestCommandRunner::new()),
4015 HashMap::new(),
4016 given_devcontainer_contents,
4017 )
4018 .await
4019 .unwrap();
4020
4021 test_dependencies
4022 .fs
4023 .atomic_write(
4024 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4025 r#"
4026version: '3.8'
4027
4028volumes:
4029postgres-data:
4030
4031services:
4032app:
4033build:
4034 context: .
4035 dockerfile: Dockerfile
4036env_file:
4037 # Ensure that the variables in .env match the same variables in devcontainer.json
4038 - .env
4039
4040volumes:
4041 - ../..:/workspaces:cached
4042
4043# Overrides default command so things don't shut down after the process ends.
4044command: sleep infinity
4045
4046# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4047network_mode: service:db
4048
4049# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4050# (Adding the "ports" property to this file will not forward from a Codespace.)
4051
4052db:
4053image: postgres:14.1
4054restart: unless-stopped
4055volumes:
4056 - postgres-data:/var/lib/postgresql/data
4057env_file:
4058 # Ensure that the variables in .env match the same variables in devcontainer.json
4059 - .env
4060
4061# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4062# (Adding the "ports" property to this file will not forward from a Codespace.)
4063 "#.trim().to_string(),
4064 )
4065 .await
4066 .unwrap();
4067
4068 test_dependencies.fs.atomic_write(
4069 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4070 r#"
4071FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4072
4073# Include lld linker to improve build times either by using environment variable
4074# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4075RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4076&& apt-get -y install clang lld \
4077&& apt-get autoremove -y && apt-get clean -y
4078 "#.trim().to_string()).await.unwrap();
4079
4080 devcontainer_manifest.parse_nonremote_vars().unwrap();
4081
4082 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4083
4084 let files = test_dependencies.fs.files();
4085
4086 let feature_dockerfile = files
4087 .iter()
4088 .find(|f| {
4089 f.file_name()
4090 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4091 })
4092 .expect("to be found");
4093 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4094 assert_eq!(
4095 &feature_dockerfile,
4096 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4097
4098FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4099
4100# Include lld linker to improve build times either by using environment variable
4101# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4102RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4103&& apt-get -y install clang lld \
4104&& apt-get autoremove -y && apt-get clean -y
4105FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4106
4107FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4108
4109FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4110USER root
4111COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4112RUN chmod -R 0755 /tmp/build-features/
4113
4114FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4115
4116USER root
4117
4118RUN mkdir -p /tmp/dev-container-features
4119COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4120
4121RUN \
4122echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4123echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4124
4125
4126COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4127RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4128&& cd /tmp/dev-container-features/aws-cli_0 \
4129&& chmod +x ./devcontainer-features-install.sh \
4130&& ./devcontainer-features-install.sh
4131
4132COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4133RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4134&& cd /tmp/dev-container-features/docker-in-docker_1 \
4135&& chmod +x ./devcontainer-features-install.sh \
4136&& ./devcontainer-features-install.sh
4137
4138
4139ARG _DEV_CONTAINERS_IMAGE_USER=root
4140USER $_DEV_CONTAINERS_IMAGE_USER
4141"#
4142 );
4143
4144 let uid_dockerfile = files
4145 .iter()
4146 .find(|f| {
4147 f.file_name()
4148 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4149 })
4150 .expect("to be found");
4151 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4152
4153 assert_eq!(
4154 &uid_dockerfile,
4155 r#"ARG BASE_IMAGE
4156FROM $BASE_IMAGE
4157
4158USER root
4159
4160ARG REMOTE_USER
4161ARG NEW_UID
4162ARG NEW_GID
4163SHELL ["/bin/sh", "-c"]
4164RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4165 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4166 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4167 if [ -z "$OLD_UID" ]; then \
4168 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4169 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4170 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4171 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4172 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4173 else \
4174 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4175 FREE_GID=65532; \
4176 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4177 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4178 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4179 fi; \
4180 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4181 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4182 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4183 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4184 fi; \
4185 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4186 fi;
4187
4188ARG IMAGE_USER
4189USER $IMAGE_USER
4190
4191# Ensure that /etc/profile does not clobber the existing path
4192RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4193
4194
4195ENV DOCKER_BUILDKIT=1
4196"#
4197 );
4198 }
4199
4200 #[gpui::test]
4201 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4202 cx.executor().allow_parking();
4203 env_logger::try_init().ok();
4204 let given_devcontainer_contents = r#"
4205 /*---------------------------------------------------------------------------------------------
4206 * Copyright (c) Microsoft Corporation. All rights reserved.
4207 * Licensed under the MIT License. See License.txt in the project root for license information.
4208 *--------------------------------------------------------------------------------------------*/
4209 {
4210 "name": "cli-${devcontainerId}",
4211 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4212 "build": {
4213 "dockerfile": "Dockerfile",
4214 "args": {
4215 "VARIANT": "18-bookworm",
4216 "FOO": "bar",
4217 },
4218 "target": "development",
4219 },
4220 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4221 "workspaceFolder": "/workspace2",
4222 "mounts": [
4223 // Keep command history across instances
4224 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4225 ],
4226
4227 "forwardPorts": [
4228 8082,
4229 8083,
4230 ],
4231 "appPort": "8084",
4232 "updateRemoteUserUID": false,
4233
4234 "containerEnv": {
4235 "VARIABLE_VALUE": "value",
4236 },
4237
4238 "initializeCommand": "touch IAM.md",
4239
4240 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4241
4242 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4243
4244 "postCreateCommand": {
4245 "yarn": "yarn install",
4246 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4247 },
4248
4249 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4250
4251 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4252
4253 "remoteUser": "node",
4254
4255 "remoteEnv": {
4256 "PATH": "${containerEnv:PATH}:/some/other/path",
4257 "OTHER_ENV": "other_env_value"
4258 },
4259
4260 "features": {
4261 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4262 "moby": false,
4263 },
4264 "ghcr.io/devcontainers/features/go:1": {},
4265 },
4266
4267 "customizations": {
4268 "vscode": {
4269 "extensions": [
4270 "dbaeumer.vscode-eslint",
4271 "GitHub.vscode-pull-request-github",
4272 ],
4273 },
4274 "zed": {
4275 "extensions": ["vue", "ruby"],
4276 },
4277 "codespaces": {
4278 "repositories": {
4279 "devcontainers/features": {
4280 "permissions": {
4281 "contents": "write",
4282 "workflows": "write",
4283 },
4284 },
4285 },
4286 },
4287 },
4288 }
4289 "#;
4290
4291 let (test_dependencies, mut devcontainer_manifest) =
4292 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4293 .await
4294 .unwrap();
4295
4296 test_dependencies
4297 .fs
4298 .atomic_write(
4299 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4300 r#"
4301# Copyright (c) Microsoft Corporation. All rights reserved.
4302# Licensed under the MIT License. See License.txt in the project root for license information.
4303ARG VARIANT="16-bullseye"
4304FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4305FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4306
4307RUN mkdir -p /workspaces && chown node:node /workspaces
4308
4309ARG USERNAME=node
4310USER $USERNAME
4311
4312# Save command line history
4313RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4314&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4315&& mkdir -p /home/$USERNAME/commandhistory \
4316&& touch /home/$USERNAME/commandhistory/.bash_history \
4317&& chown -R $USERNAME /home/$USERNAME/commandhistory
4318 "#.trim().to_string(),
4319 )
4320 .await
4321 .unwrap();
4322
4323 devcontainer_manifest.parse_nonremote_vars().unwrap();
4324
4325 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4326
4327 assert_eq!(
4328 devcontainer_up.extension_ids,
4329 vec!["vue".to_string(), "ruby".to_string()]
4330 );
4331
4332 let files = test_dependencies.fs.files();
4333 let feature_dockerfile = files
4334 .iter()
4335 .find(|f| {
4336 f.file_name()
4337 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4338 })
4339 .expect("to be found");
4340 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4341 assert_eq!(
4342 &feature_dockerfile,
4343 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4344
4345# Copyright (c) Microsoft Corporation. All rights reserved.
4346# Licensed under the MIT License. See License.txt in the project root for license information.
4347ARG VARIANT="16-bullseye"
4348FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4349FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4350
4351RUN mkdir -p /workspaces && chown node:node /workspaces
4352
4353ARG USERNAME=node
4354USER $USERNAME
4355
4356# Save command line history
4357RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4358&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4359&& mkdir -p /home/$USERNAME/commandhistory \
4360&& touch /home/$USERNAME/commandhistory/.bash_history \
4361&& chown -R $USERNAME /home/$USERNAME/commandhistory
4362FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4363
4364FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4365USER root
4366COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4367RUN chmod -R 0755 /tmp/build-features/
4368
4369FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4370
4371USER root
4372
4373RUN mkdir -p /tmp/dev-container-features
4374COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4375
4376RUN \
4377echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4378echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4379
4380
4381RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4382cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4383&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4384&& cd /tmp/dev-container-features/docker-in-docker_0 \
4385&& chmod +x ./devcontainer-features-install.sh \
4386&& ./devcontainer-features-install.sh \
4387&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4388
4389RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4390cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4391&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4392&& cd /tmp/dev-container-features/go_1 \
4393&& chmod +x ./devcontainer-features-install.sh \
4394&& ./devcontainer-features-install.sh \
4395&& rm -rf /tmp/dev-container-features/go_1
4396
4397
4398ARG _DEV_CONTAINERS_IMAGE_USER=root
4399USER $_DEV_CONTAINERS_IMAGE_USER
4400
4401# Ensure that /etc/profile does not clobber the existing path
4402RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4403
4404ENV DOCKER_BUILDKIT=1
4405
4406ENV GOPATH=/go
4407ENV GOROOT=/usr/local/go
4408ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4409ENV VARIABLE_VALUE=value
4410"#
4411 );
4412
4413 let golang_install_wrapper = files
4414 .iter()
4415 .find(|f| {
4416 f.file_name()
4417 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4418 && f.to_str().is_some_and(|s| s.contains("go_"))
4419 })
4420 .expect("to be found");
4421 let golang_install_wrapper = test_dependencies
4422 .fs
4423 .load(golang_install_wrapper)
4424 .await
4425 .unwrap();
4426 assert_eq!(
4427 &golang_install_wrapper,
4428 r#"#!/bin/sh
4429set -e
4430
4431on_exit () {
4432 [ $? -eq 0 ] && exit
4433 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4434}
4435
4436trap on_exit EXIT
4437
4438echo ===========================================================================
4439echo 'Feature : go'
4440echo 'Id : ghcr.io/devcontainers/features/go:1'
4441echo 'Options :'
4442echo ' GOLANGCILINTVERSION=latest
4443 VERSION=latest'
4444echo ===========================================================================
4445
4446set -a
4447. ../devcontainer-features.builtin.env
4448. ./devcontainer-features.env
4449set +a
4450
4451chmod +x ./install.sh
4452./install.sh
4453"#
4454 );
4455
4456 let docker_commands = test_dependencies
4457 .command_runner
4458 .commands_by_program("docker");
4459
4460 let docker_run_command = docker_commands
4461 .iter()
4462 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4463
4464 assert!(docker_run_command.is_some());
4465
4466 let docker_exec_commands = test_dependencies
4467 .docker
4468 .exec_commands_recorded
4469 .lock()
4470 .unwrap();
4471
4472 assert!(docker_exec_commands.iter().all(|exec| {
4473 exec.env
4474 == HashMap::from([
4475 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4476 (
4477 "PATH".to_string(),
4478 "/initial/path:/some/other/path".to_string(),
4479 ),
4480 ])
4481 }))
4482 }
4483
4484 #[cfg(not(target_os = "windows"))]
4485 #[gpui::test]
4486 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4487 cx.executor().allow_parking();
4488 env_logger::try_init().ok();
4489 let given_devcontainer_contents = r#"
4490 {
4491 "name": "cli-${devcontainerId}",
4492 "image": "test_image:latest",
4493 }
4494 "#;
4495
4496 let (test_dependencies, mut devcontainer_manifest) =
4497 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4498 .await
4499 .unwrap();
4500
4501 devcontainer_manifest.parse_nonremote_vars().unwrap();
4502
4503 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4504
4505 let files = test_dependencies.fs.files();
4506 let uid_dockerfile = files
4507 .iter()
4508 .find(|f| {
4509 f.file_name()
4510 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4511 })
4512 .expect("to be found");
4513 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4514
4515 assert_eq!(
4516 &uid_dockerfile,
4517 r#"ARG BASE_IMAGE
4518FROM $BASE_IMAGE
4519
4520USER root
4521
4522ARG REMOTE_USER
4523ARG NEW_UID
4524ARG NEW_GID
4525SHELL ["/bin/sh", "-c"]
4526RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4527 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4528 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4529 if [ -z "$OLD_UID" ]; then \
4530 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4531 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4532 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4533 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4534 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4535 else \
4536 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4537 FREE_GID=65532; \
4538 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4539 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4540 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4541 fi; \
4542 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4543 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4544 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4545 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4546 fi; \
4547 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4548 fi;
4549
4550ARG IMAGE_USER
4551USER $IMAGE_USER
4552
4553# Ensure that /etc/profile does not clobber the existing path
4554RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4555"#
4556 );
4557 }
4558
4559 #[cfg(target_os = "windows")]
4560 #[gpui::test]
4561 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4562 cx.executor().allow_parking();
4563 env_logger::try_init().ok();
4564 let given_devcontainer_contents = r#"
4565 {
4566 "name": "cli-${devcontainerId}",
4567 "image": "test_image:latest",
4568 }
4569 "#;
4570
4571 let (_, mut devcontainer_manifest) =
4572 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4573 .await
4574 .unwrap();
4575
4576 devcontainer_manifest.parse_nonremote_vars().unwrap();
4577
4578 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4579
4580 assert_eq!(
4581 devcontainer_up.remote_workspace_folder,
4582 "/workspaces/project"
4583 );
4584 }
4585
4586 #[cfg(not(target_os = "windows"))]
4587 #[gpui::test]
4588 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4589 cx.executor().allow_parking();
4590 env_logger::try_init().ok();
4591 let given_devcontainer_contents = r#"
4592 {
4593 "name": "cli-${devcontainerId}",
4594 "dockerComposeFile": "docker-compose-plain.yml",
4595 "service": "app",
4596 }
4597 "#;
4598
4599 let (test_dependencies, mut devcontainer_manifest) =
4600 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4601 .await
4602 .unwrap();
4603
4604 test_dependencies
4605 .fs
4606 .atomic_write(
4607 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4608 r#"
4609services:
4610 app:
4611 image: test_image:latest
4612 command: sleep infinity
4613 volumes:
4614 - ..:/workspace:cached
4615 "#
4616 .trim()
4617 .to_string(),
4618 )
4619 .await
4620 .unwrap();
4621
4622 devcontainer_manifest.parse_nonremote_vars().unwrap();
4623
4624 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4625
4626 let files = test_dependencies.fs.files();
4627 let uid_dockerfile = files
4628 .iter()
4629 .find(|f| {
4630 f.file_name()
4631 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4632 })
4633 .expect("to be found");
4634 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4635
4636 assert_eq!(
4637 &uid_dockerfile,
4638 r#"ARG BASE_IMAGE
4639FROM $BASE_IMAGE
4640
4641USER root
4642
4643ARG REMOTE_USER
4644ARG NEW_UID
4645ARG NEW_GID
4646SHELL ["/bin/sh", "-c"]
4647RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4648 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4649 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4650 if [ -z "$OLD_UID" ]; then \
4651 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4652 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4653 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4654 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4655 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4656 else \
4657 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4658 FREE_GID=65532; \
4659 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4660 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4661 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4662 fi; \
4663 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4664 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4665 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4666 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4667 fi; \
4668 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4669 fi;
4670
4671ARG IMAGE_USER
4672USER $IMAGE_USER
4673
4674# Ensure that /etc/profile does not clobber the existing path
4675RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4676"#
4677 );
4678 }
4679
4680 #[gpui::test]
4681 async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4682 cx.executor().allow_parking();
4683 env_logger::try_init().ok();
4684 let given_devcontainer_contents = r#"
4685 {
4686 "name": "cli-${devcontainerId}",
4687 "build": {
4688 "dockerfile": "Dockerfile",
4689 "args": {
4690 "VERSION": "1.22",
4691 }
4692 },
4693 }
4694 "#;
4695
4696 let (test_dependencies, mut devcontainer_manifest) =
4697 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4698 .await
4699 .unwrap();
4700
4701 test_dependencies
4702 .fs
4703 .atomic_write(
4704 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4705 r#"
4706FROM dontgrabme as build_context
4707ARG VERSION=1.21
4708ARG REPOSITORY=mybuild
4709ARG REGISTRY=docker.io/stuff
4710
4711ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4712
4713FROM ${IMAGE} AS devcontainer
4714 "#
4715 .trim()
4716 .to_string(),
4717 )
4718 .await
4719 .unwrap();
4720
4721 devcontainer_manifest.parse_nonremote_vars().unwrap();
4722
4723 let dockerfile_contents = devcontainer_manifest
4724 .expanded_dockerfile_content()
4725 .await
4726 .unwrap();
4727 let base_image = image_from_dockerfile(
4728 dockerfile_contents,
4729 &devcontainer_manifest
4730 .dev_container()
4731 .build
4732 .as_ref()
4733 .and_then(|b| b.target.clone()),
4734 )
4735 .unwrap();
4736
4737 assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4738 }
4739
4740 #[gpui::test]
4741 async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4742 cx.executor().allow_parking();
4743 env_logger::try_init().ok();
4744 let given_devcontainer_contents = r#"
4745 {
4746 "name": "cli-${devcontainerId}",
4747 "build": {
4748 "dockerfile": "Dockerfile",
4749 "args": {
4750 "VERSION": "1.22",
4751 },
4752 "target": "development"
4753 },
4754 }
4755 "#;
4756
4757 let (test_dependencies, mut devcontainer_manifest) =
4758 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4759 .await
4760 .unwrap();
4761
4762 test_dependencies
4763 .fs
4764 .atomic_write(
4765 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4766 r#"
4767FROM dontgrabme as build_context
4768ARG VERSION=1.21
4769ARG REPOSITORY=mybuild
4770ARG REGISTRY=docker.io/stuff
4771
4772ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4773ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4774
4775FROM ${DEV_IMAGE} AS development
4776FROM ${IMAGE} AS production
4777 "#
4778 .trim()
4779 .to_string(),
4780 )
4781 .await
4782 .unwrap();
4783
4784 devcontainer_manifest.parse_nonremote_vars().unwrap();
4785
4786 let dockerfile_contents = devcontainer_manifest
4787 .expanded_dockerfile_content()
4788 .await
4789 .unwrap();
4790 let base_image = image_from_dockerfile(
4791 dockerfile_contents,
4792 &devcontainer_manifest
4793 .dev_container()
4794 .build
4795 .as_ref()
4796 .and_then(|b| b.target.clone()),
4797 )
4798 .unwrap();
4799
4800 assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4801 }
4802
4803 #[gpui::test]
4804 async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4805 cx.executor().allow_parking();
4806 env_logger::try_init().ok();
4807 let given_devcontainer_contents = r#"
4808 {
4809 "name": "cli-${devcontainerId}",
4810 "build": {
4811 "dockerfile": "Dockerfile",
4812 "args": {
4813 "JSON_ARG": "some-value",
4814 "ELIXIR_VERSION": "1.21",
4815 }
4816 },
4817 }
4818 "#;
4819
4820 let (test_dependencies, mut devcontainer_manifest) =
4821 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4822 .await
4823 .unwrap();
4824
4825 test_dependencies
4826 .fs
4827 .atomic_write(
4828 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4829 r#"
4830ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4831ARG ELIXIR_VERSION=1.20.0-rc.4
4832ARG FOO=foo BAR=bar
4833ARG FOOBAR=${FOO}${BAR}
4834ARG OTP_VERSION=28.4.1
4835ARG DEBIAN_VERSION=trixie-20260316-slim
4836ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
4837ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4838ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
4839ARG FROM_JSON=${JSON_ARG}
4840
4841FROM ${IMAGE} AS devcontainer
4842 "#
4843 .trim()
4844 .to_string(),
4845 )
4846 .await
4847 .unwrap();
4848
4849 devcontainer_manifest.parse_nonremote_vars().unwrap();
4850
4851 let expanded_dockerfile = devcontainer_manifest
4852 .expanded_dockerfile_content()
4853 .await
4854 .unwrap();
4855
4856 assert_eq!(
4857 &expanded_dockerfile,
4858 r#"
4859ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
4860ARG ELIXIR_VERSION=1.20.0-rc.4
4861ARG FOO=foo BAR=bar
4862ARG FOOBAR=foobar
4863ARG OTP_VERSION=28.4.1
4864ARG DEBIAN_VERSION=trixie-20260316-slim
4865ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
4866ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
4867ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
4868ARG FROM_JSON=some-value
4869
4870FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
4871 "#
4872 .trim()
4873 )
4874 }
4875
4876 #[test]
4877 fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
4878
4879 #[test]
4880 fn test_aliases_dockerfile_with_no_aliases_for_build() {}
4881
4882 #[test]
4883 fn test_aliases_dockerfile_with_build_target_specified() {}
4884
4885 pub(crate) struct RecordedExecCommand {
4886 pub(crate) _container_id: String,
4887 pub(crate) _remote_folder: String,
4888 pub(crate) _user: String,
4889 pub(crate) env: HashMap<String, String>,
4890 pub(crate) _inner_command: Command,
4891 }
4892
4893 pub(crate) struct FakeDocker {
4894 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4895 podman: bool,
4896 has_buildx: bool,
4897 }
4898
4899 impl FakeDocker {
4900 pub(crate) fn new() -> Self {
4901 Self {
4902 podman: false,
4903 has_buildx: true,
4904 exec_commands_recorded: Mutex::new(Vec::new()),
4905 }
4906 }
4907 #[cfg(not(target_os = "windows"))]
4908 fn set_podman(&mut self, podman: bool) {
4909 self.podman = podman;
4910 }
4911 }
4912
4913 #[async_trait]
4914 impl DockerClient for FakeDocker {
4915 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4916 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4917 return Ok(DockerInspect {
4918 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4919 .to_string(),
4920 config: DockerInspectConfig {
4921 labels: DockerConfigLabels {
4922 metadata: Some(vec![HashMap::from([(
4923 "remoteUser".to_string(),
4924 Value::String("node".to_string()),
4925 )])]),
4926 },
4927 env: Vec::new(),
4928 image_user: Some("root".to_string()),
4929 },
4930 mounts: None,
4931 state: None,
4932 });
4933 }
4934 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4935 return Ok(DockerInspect {
4936 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4937 .to_string(),
4938 config: DockerInspectConfig {
4939 labels: DockerConfigLabels {
4940 metadata: Some(vec![HashMap::from([(
4941 "remoteUser".to_string(),
4942 Value::String("vscode".to_string()),
4943 )])]),
4944 },
4945 image_user: Some("root".to_string()),
4946 env: Vec::new(),
4947 },
4948 mounts: None,
4949 state: None,
4950 });
4951 }
4952 if id.starts_with("cli_") {
4953 return Ok(DockerInspect {
4954 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4955 .to_string(),
4956 config: DockerInspectConfig {
4957 labels: DockerConfigLabels {
4958 metadata: Some(vec![HashMap::from([(
4959 "remoteUser".to_string(),
4960 Value::String("node".to_string()),
4961 )])]),
4962 },
4963 image_user: Some("root".to_string()),
4964 env: vec!["PATH=/initial/path".to_string()],
4965 },
4966 mounts: None,
4967 state: None,
4968 });
4969 }
4970 if id == "found_docker_ps" {
4971 return Ok(DockerInspect {
4972 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4973 .to_string(),
4974 config: DockerInspectConfig {
4975 labels: DockerConfigLabels {
4976 metadata: Some(vec![HashMap::from([(
4977 "remoteUser".to_string(),
4978 Value::String("node".to_string()),
4979 )])]),
4980 },
4981 image_user: Some("root".to_string()),
4982 env: vec!["PATH=/initial/path".to_string()],
4983 },
4984 mounts: Some(vec![DockerInspectMount {
4985 source: "/path/to/local/project".to_string(),
4986 destination: "/workspaces/project".to_string(),
4987 }]),
4988 state: None,
4989 });
4990 }
4991 if id.starts_with("rust_a-") {
4992 return Ok(DockerInspect {
4993 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4994 .to_string(),
4995 config: DockerInspectConfig {
4996 labels: DockerConfigLabels {
4997 metadata: Some(vec![HashMap::from([(
4998 "remoteUser".to_string(),
4999 Value::String("vscode".to_string()),
5000 )])]),
5001 },
5002 image_user: Some("root".to_string()),
5003 env: Vec::new(),
5004 },
5005 mounts: None,
5006 state: None,
5007 });
5008 }
5009 if id == "test_image:latest" {
5010 return Ok(DockerInspect {
5011 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5012 .to_string(),
5013 config: DockerInspectConfig {
5014 labels: DockerConfigLabels {
5015 metadata: Some(vec![HashMap::from([(
5016 "remoteUser".to_string(),
5017 Value::String("node".to_string()),
5018 )])]),
5019 },
5020 env: Vec::new(),
5021 image_user: Some("root".to_string()),
5022 },
5023 mounts: None,
5024 state: None,
5025 });
5026 }
5027
5028 Err(DevContainerError::DockerNotAvailable)
5029 }
5030 async fn get_docker_compose_config(
5031 &self,
5032 config_files: &Vec<PathBuf>,
5033 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5034 let project_path = PathBuf::from(TEST_PROJECT_PATH);
5035 if config_files.len() == 1
5036 && config_files.get(0)
5037 == Some(
5038 &project_path
5039 .join(".devcontainer")
5040 .join("docker-compose.yml"),
5041 )
5042 {
5043 return Ok(Some(DockerComposeConfig {
5044 name: None,
5045 services: HashMap::from([
5046 (
5047 "app".to_string(),
5048 DockerComposeService {
5049 build: Some(DockerComposeServiceBuild {
5050 context: Some(".".to_string()),
5051 dockerfile: Some("Dockerfile".to_string()),
5052 args: None,
5053 additional_contexts: None,
5054 target: None,
5055 }),
5056 volumes: vec![MountDefinition {
5057 source: Some("../..".to_string()),
5058 target: "/workspaces".to_string(),
5059 mount_type: Some("bind".to_string()),
5060 }],
5061 network_mode: Some("service:db".to_string()),
5062 ..Default::default()
5063 },
5064 ),
5065 (
5066 "db".to_string(),
5067 DockerComposeService {
5068 image: Some("postgres:14.1".to_string()),
5069 volumes: vec![MountDefinition {
5070 source: Some("postgres-data".to_string()),
5071 target: "/var/lib/postgresql/data".to_string(),
5072 mount_type: Some("volume".to_string()),
5073 }],
5074 env_file: Some(vec![".env".to_string()]),
5075 ..Default::default()
5076 },
5077 ),
5078 ]),
5079 volumes: HashMap::from([(
5080 "postgres-data".to_string(),
5081 DockerComposeVolume::default(),
5082 )]),
5083 }));
5084 }
5085 if config_files.len() == 1
5086 && config_files.get(0)
5087 == Some(&PathBuf::from(
5088 "/path/to/local/project/.devcontainer/docker-compose-context-parent.yml",
5089 ))
5090 {
5091 return Ok(Some(DockerComposeConfig {
5092 name: None,
5093 services: HashMap::from([(
5094 "app".to_string(),
5095 DockerComposeService {
5096 build: Some(DockerComposeServiceBuild {
5097 context: Some("..".to_string()),
5098 dockerfile: Some(".devcontainer/Dockerfile".to_string()),
5099 args: None,
5100 additional_contexts: None,
5101 target: None,
5102 }),
5103 ..Default::default()
5104 },
5105 )]),
5106 volumes: HashMap::new(),
5107 }));
5108 }
5109 if config_files.len() == 1
5110 && config_files.get(0)
5111 == Some(&PathBuf::from(
5112 "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
5113 ))
5114 {
5115 return Ok(Some(DockerComposeConfig {
5116 name: None,
5117 services: HashMap::from([(
5118 "app".to_string(),
5119 DockerComposeService {
5120 image: Some("test_image:latest".to_string()),
5121 command: vec!["sleep".to_string(), "infinity".to_string()],
5122 ..Default::default()
5123 },
5124 )]),
5125 ..Default::default()
5126 }));
5127 }
5128 Err(DevContainerError::DockerNotAvailable)
5129 }
5130 async fn docker_compose_build(
5131 &self,
5132 _config_files: &Vec<PathBuf>,
5133 _project_name: &str,
5134 ) -> Result<(), DevContainerError> {
5135 Ok(())
5136 }
5137 async fn run_docker_exec(
5138 &self,
5139 container_id: &str,
5140 remote_folder: &str,
5141 user: &str,
5142 env: &HashMap<String, String>,
5143 inner_command: Command,
5144 ) -> Result<(), DevContainerError> {
5145 let mut record = self
5146 .exec_commands_recorded
5147 .lock()
5148 .expect("should be available");
5149 record.push(RecordedExecCommand {
5150 _container_id: container_id.to_string(),
5151 _remote_folder: remote_folder.to_string(),
5152 _user: user.to_string(),
5153 env: env.clone(),
5154 _inner_command: inner_command,
5155 });
5156 Ok(())
5157 }
5158 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5159 Err(DevContainerError::DockerNotAvailable)
5160 }
5161 async fn find_process_by_filters(
5162 &self,
5163 _filters: Vec<String>,
5164 ) -> Result<Option<DockerPs>, DevContainerError> {
5165 Ok(Some(DockerPs {
5166 id: "found_docker_ps".to_string(),
5167 }))
5168 }
5169 fn supports_compose_buildkit(&self) -> bool {
5170 !self.podman && self.has_buildx
5171 }
5172 fn docker_cli(&self) -> String {
5173 if self.podman {
5174 "podman".to_string()
5175 } else {
5176 "docker".to_string()
5177 }
5178 }
5179 }
5180
5181 #[derive(Debug, Clone)]
5182 pub(crate) struct TestCommand {
5183 pub(crate) program: String,
5184 pub(crate) args: Vec<String>,
5185 }
5186
5187 pub(crate) struct TestCommandRunner {
5188 commands_recorded: Mutex<Vec<TestCommand>>,
5189 }
5190
5191 impl TestCommandRunner {
5192 fn new() -> Self {
5193 Self {
5194 commands_recorded: Mutex::new(Vec::new()),
5195 }
5196 }
5197
5198 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5199 let record = self.commands_recorded.lock().expect("poisoned");
5200 record
5201 .iter()
5202 .filter(|r| r.program == program)
5203 .map(|r| r.clone())
5204 .collect()
5205 }
5206 }
5207
5208 #[async_trait]
5209 impl CommandRunner for TestCommandRunner {
5210 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5211 let mut record = self.commands_recorded.lock().expect("poisoned");
5212
5213 record.push(TestCommand {
5214 program: command.get_program().display().to_string(),
5215 args: command
5216 .get_args()
5217 .map(|a| a.display().to_string())
5218 .collect(),
5219 });
5220
5221 Ok(Output {
5222 status: ExitStatus::default(),
5223 stdout: vec![],
5224 stderr: vec![],
5225 })
5226 }
5227 }
5228
5229 fn fake_http_client() -> Arc<dyn HttpClient> {
5230 FakeHttpClient::create(|request| async move {
5231 let (parts, _body) = request.into_parts();
5232 if parts.uri.path() == "/token" {
5233 let token_response = TokenResponse {
5234 token: "token".to_string(),
5235 };
5236 return Ok(http::Response::builder()
5237 .status(200)
5238 .body(http_client::AsyncBody::from(
5239 serde_json_lenient::to_string(&token_response).unwrap(),
5240 ))
5241 .unwrap());
5242 }
5243
5244 // OCI specific things
5245 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5246 let response = r#"
5247 {
5248 "schemaVersion": 2,
5249 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5250 "config": {
5251 "mediaType": "application/vnd.devcontainers",
5252 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5253 "size": 2
5254 },
5255 "layers": [
5256 {
5257 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5258 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5259 "size": 59392,
5260 "annotations": {
5261 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5262 }
5263 }
5264 ],
5265 "annotations": {
5266 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5267 "com.github.package.type": "devcontainer_feature"
5268 }
5269 }
5270 "#;
5271 return Ok(http::Response::builder()
5272 .status(200)
5273 .body(http_client::AsyncBody::from(response))
5274 .unwrap());
5275 }
5276
5277 if parts.uri.path()
5278 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5279 {
5280 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5285 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5286 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5287 ```
5288 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5289 ```
5290 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5291
5292
5293 ## OS Support
5294
5295 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5296
5297 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5298
5299 `bash` is required to execute the `install.sh` script."#),
5300 ("./README.md", r#"
5301 # Docker (Docker-in-Docker) (docker-in-docker)
5302
5303 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5304
5305 ## Example Usage
5306
5307 ```json
5308 "features": {
5309 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5310 }
5311 ```
5312
5313 ## Options
5314
5315 | Options Id | Description | Type | Default Value |
5316 |-----|-----|-----|-----|
5317 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5318 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5319 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5320 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5321 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5322 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5323 | installDockerBuildx | Install Docker Buildx | boolean | true |
5324 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5325 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5326
5327 ## Customizations
5328
5329 ### VS Code Extensions
5330
5331 - `ms-azuretools.vscode-containers`
5332
5333 ## Limitations
5334
5335 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5336 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5337 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5338 ```
5339 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5340 ```
5341 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5342
5343
5344 ## OS Support
5345
5346 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5347
5348 `bash` is required to execute the `install.sh` script.
5349
5350
5351 ---
5352
5353 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
5354 ("./devcontainer-feature.json", r#"
5355 {
5356 "id": "docker-in-docker",
5357 "version": "2.16.1",
5358 "name": "Docker (Docker-in-Docker)",
5359 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5360 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5361 "options": {
5362 "version": {
5363 "type": "string",
5364 "proposals": [
5365 "latest",
5366 "none",
5367 "20.10"
5368 ],
5369 "default": "latest",
5370 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5371 },
5372 "moby": {
5373 "type": "boolean",
5374 "default": true,
5375 "description": "Install OSS Moby build instead of Docker CE"
5376 },
5377 "mobyBuildxVersion": {
5378 "type": "string",
5379 "default": "latest",
5380 "description": "Install a specific version of moby-buildx when using Moby"
5381 },
5382 "dockerDashComposeVersion": {
5383 "type": "string",
5384 "enum": [
5385 "none",
5386 "v1",
5387 "v2"
5388 ],
5389 "default": "v2",
5390 "description": "Default version of Docker Compose (v1, v2 or none)"
5391 },
5392 "azureDnsAutoDetection": {
5393 "type": "boolean",
5394 "default": true,
5395 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5396 },
5397 "dockerDefaultAddressPool": {
5398 "type": "string",
5399 "default": "",
5400 "proposals": [],
5401 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5402 },
5403 "installDockerBuildx": {
5404 "type": "boolean",
5405 "default": true,
5406 "description": "Install Docker Buildx"
5407 },
5408 "installDockerComposeSwitch": {
5409 "type": "boolean",
5410 "default": false,
5411 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5412 },
5413 "disableIp6tables": {
5414 "type": "boolean",
5415 "default": false,
5416 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5417 }
5418 },
5419 "entrypoint": "/usr/local/share/docker-init.sh",
5420 "privileged": true,
5421 "containerEnv": {
5422 "DOCKER_BUILDKIT": "1"
5423 },
5424 "customizations": {
5425 "vscode": {
5426 "extensions": [
5427 "ms-azuretools.vscode-containers"
5428 ],
5429 "settings": {
5430 "github.copilot.chat.codeGeneration.instructions": [
5431 {
5432 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5433 }
5434 ]
5435 }
5436 }
5437 },
5438 "mounts": [
5439 {
5440 "source": "dind-var-lib-docker-${devcontainerId}",
5441 "target": "/var/lib/docker",
5442 "type": "volume"
5443 }
5444 ],
5445 "installsAfter": [
5446 "ghcr.io/devcontainers/features/common-utils"
5447 ]
5448 }"#),
5449 ("./install.sh", r#"
5450 #!/usr/bin/env bash
5451 #-------------------------------------------------------------------------------------------------------------
5452 # Copyright (c) Microsoft Corporation. All rights reserved.
5453 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5454 #-------------------------------------------------------------------------------------------------------------
5455 #
5456 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5457 # Maintainer: The Dev Container spec maintainers
5458
5459
5460 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5461 USE_MOBY="${MOBY:-"true"}"
5462 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5463 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5464 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5465 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5466 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5467 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5468 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5469 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5470 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5471 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5472 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5473 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5474
5475 # Default: Exit on any failure.
5476 set -e
5477
5478 # Clean up
5479 rm -rf /var/lib/apt/lists/*
5480
5481 # Setup STDERR.
5482 err() {
5483 echo "(!) $*" >&2
5484 }
5485
5486 if [ "$(id -u)" -ne 0 ]; then
5487 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5488 exit 1
5489 fi
5490
5491 ###################
5492 # Helper Functions
5493 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5494 ###################
5495
5496 # Determine the appropriate non-root user
5497 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5498 USERNAME=""
5499 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5500 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5501 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5502 USERNAME=${CURRENT_USER}
5503 break
5504 fi
5505 done
5506 if [ "${USERNAME}" = "" ]; then
5507 USERNAME=root
5508 fi
5509 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5510 USERNAME=root
5511 fi
5512
5513 # Package manager update function
5514 pkg_mgr_update() {
5515 case ${ADJUSTED_ID} in
5516 debian)
5517 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5518 echo "Running apt-get update..."
5519 apt-get update -y
5520 fi
5521 ;;
5522 rhel)
5523 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5524 cache_check_dir="/var/cache/yum"
5525 else
5526 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5527 fi
5528 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5529 echo "Running ${PKG_MGR_CMD} makecache ..."
5530 ${PKG_MGR_CMD} makecache
5531 fi
5532 ;;
5533 esac
5534 }
5535
5536 # Checks if packages are installed and installs them if not
5537 check_packages() {
5538 case ${ADJUSTED_ID} in
5539 debian)
5540 if ! dpkg -s "$@" > /dev/null 2>&1; then
5541 pkg_mgr_update
5542 apt-get -y install --no-install-recommends "$@"
5543 fi
5544 ;;
5545 rhel)
5546 if ! rpm -q "$@" > /dev/null 2>&1; then
5547 pkg_mgr_update
5548 ${PKG_MGR_CMD} -y install "$@"
5549 fi
5550 ;;
5551 esac
5552 }
5553
5554 # Figure out correct version of a three part version number is not passed
5555 find_version_from_git_tags() {
5556 local variable_name=$1
5557 local requested_version=${!variable_name}
5558 if [ "${requested_version}" = "none" ]; then return; fi
5559 local repository=$2
5560 local prefix=${3:-"tags/v"}
5561 local separator=${4:-"."}
5562 local last_part_optional=${5:-"false"}
5563 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5564 local escaped_separator=${separator//./\\.}
5565 local last_part
5566 if [ "${last_part_optional}" = "true" ]; then
5567 last_part="(${escaped_separator}[0-9]+)?"
5568 else
5569 last_part="${escaped_separator}[0-9]+"
5570 fi
5571 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5572 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5573 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5574 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5575 else
5576 set +e
5577 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5578 set -e
5579 fi
5580 fi
5581 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5582 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5583 exit 1
5584 fi
5585 echo "${variable_name}=${!variable_name}"
5586 }
5587
5588 # Use semver logic to decrement a version number then look for the closest match
5589 find_prev_version_from_git_tags() {
5590 local variable_name=$1
5591 local current_version=${!variable_name}
5592 local repository=$2
5593 # Normally a "v" is used before the version number, but support alternate cases
5594 local prefix=${3:-"tags/v"}
5595 # Some repositories use "_" instead of "." for version number part separation, support that
5596 local separator=${4:-"."}
5597 # Some tools release versions that omit the last digit (e.g. go)
5598 local last_part_optional=${5:-"false"}
5599 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5600 local version_suffix_regex=$6
5601 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5602 set +e
5603 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5604 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5605 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5606
5607 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5608 ((major=major-1))
5609 declare -g ${variable_name}="${major}"
5610 # Look for latest version from previous major release
5611 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5612 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5613 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5614 ((minor=minor-1))
5615 declare -g ${variable_name}="${major}.${minor}"
5616 # Look for latest version from previous minor release
5617 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5618 else
5619 ((breakfix=breakfix-1))
5620 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5621 declare -g ${variable_name}="${major}.${minor}"
5622 else
5623 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5624 fi
5625 fi
5626 set -e
5627 }
5628
5629 # Function to fetch the version released prior to the latest version
5630 get_previous_version() {
5631 local url=$1
5632 local repo_url=$2
5633 local variable_name=$3
5634 prev_version=${!variable_name}
5635
5636 output=$(curl -s "$repo_url");
5637 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5638 message=$(echo "$output" | jq -r '.message')
5639
5640 if [[ $message == "API rate limit exceeded"* ]]; then
5641 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5642 echo -e "\nAttempting to find latest version using GitHub tags."
5643 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5644 declare -g ${variable_name}="${prev_version}"
5645 fi
5646 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5647 echo -e "\nAttempting to find latest version using GitHub Api."
5648 version=$(echo "$output" | jq -r '.[1].tag_name')
5649 declare -g ${variable_name}="${version#v}"
5650 fi
5651 echo "${variable_name}=${!variable_name}"
5652 }
5653
5654 get_github_api_repo_url() {
5655 local url=$1
5656 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5657 }
5658
5659 ###########################################
5660 # Start docker-in-docker installation
5661 ###########################################
5662
5663 # Ensure apt is in non-interactive to avoid prompts
5664 export DEBIAN_FRONTEND=noninteractive
5665
5666 # Source /etc/os-release to get OS info
5667 . /etc/os-release
5668
5669 # Determine adjusted ID and package manager
5670 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5671 ADJUSTED_ID="debian"
5672 PKG_MGR_CMD="apt-get"
5673 # Use dpkg for Debian-based systems
5674 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5675 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5676 ADJUSTED_ID="rhel"
5677 # Determine the appropriate package manager for RHEL-based systems
5678 for pkg_mgr in tdnf dnf microdnf yum; do
5679 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5680 PKG_MGR_CMD="$pkg_mgr"
5681 break
5682 fi
5683 done
5684
5685 if [ -z "${PKG_MGR_CMD}" ]; then
5686 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5687 exit 1
5688 fi
5689
5690 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5691 else
5692 err "Linux distro ${ID} not supported."
5693 exit 1
5694 fi
5695
5696 # Azure Linux specific setup
5697 if [ "${ID}" = "azurelinux" ]; then
5698 VERSION_CODENAME="azurelinux${VERSION_ID}"
5699 fi
5700
5701 # Prevent attempting to install Moby on Debian trixie (packages removed)
5702 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5703 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5704 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5705 exit 1
5706 fi
5707
5708 # Check if distro is supported
5709 if [ "${USE_MOBY}" = "true" ]; then
5710 if [ "${ADJUSTED_ID}" = "debian" ]; then
5711 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5712 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5713 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5714 exit 1
5715 fi
5716 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5717 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5718 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5719 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5720 else
5721 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5722 fi
5723 fi
5724 else
5725 if [ "${ADJUSTED_ID}" = "debian" ]; then
5726 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5727 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5728 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5729 exit 1
5730 fi
5731 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5732 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5733
5734 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5735 fi
5736 fi
5737
5738 # Install base dependencies
5739 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5740 case ${ADJUSTED_ID} in
5741 debian)
5742 check_packages apt-transport-https $base_packages dirmngr
5743 ;;
5744 rhel)
5745 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5746
5747 ;;
5748 esac
5749
5750 # Install git if not already present
5751 if ! command -v git >/dev/null 2>&1; then
5752 check_packages git
5753 fi
5754
5755 # Update CA certificates to ensure HTTPS connections work properly
5756 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5757 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5758 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5759 update-ca-certificates
5760 fi
5761
5762 # Swap to legacy iptables for compatibility (Debian only)
5763 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5764 update-alternatives --set iptables /usr/sbin/iptables-legacy
5765 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5766 fi
5767
5768 # Set up the necessary repositories
5769 if [ "${USE_MOBY}" = "true" ]; then
5770 # Name of open source engine/cli
5771 engine_package_name="moby-engine"
5772 cli_package_name="moby-cli"
5773
5774 case ${ADJUSTED_ID} in
5775 debian)
5776 # Import key safely and import Microsoft apt repo
5777 {
5778 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5779 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5780 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5781 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5782 ;;
5783 rhel)
5784 echo "(*) ${ID} detected - checking for Moby packages..."
5785
5786 # Check if moby packages are available in default repos
5787 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5788 echo "(*) Using built-in ${ID} Moby packages"
5789 else
5790 case "${ID}" in
5791 azurelinux)
5792 echo "(*) Moby packages not found in Azure Linux repositories"
5793 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5794 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5795 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5796 exit 1
5797 ;;
5798 mariner)
5799 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5800 # Add Microsoft repository if packages aren't available locally
5801 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5802 cat > /etc/yum.repos.d/microsoft.repo << EOF
5803 [microsoft]
5804 name=Microsoft Repository
5805 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5806 enabled=1
5807 gpgcheck=1
5808 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5809 EOF
5810 # Verify packages are available after adding repo
5811 pkg_mgr_update
5812 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5813 echo "(*) Moby packages not found in Microsoft repository either"
5814 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5815 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5816 exit 1
5817 fi
5818 ;;
5819 *)
5820 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5821 exit 1
5822 ;;
5823 esac
5824 fi
5825 ;;
5826 esac
5827 else
5828 # Name of licensed engine/cli
5829 engine_package_name="docker-ce"
5830 cli_package_name="docker-ce-cli"
5831 case ${ADJUSTED_ID} in
5832 debian)
5833 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5834 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5835 ;;
5836 rhel)
5837 # Docker CE repository setup for RHEL-based systems
5838 setup_docker_ce_repo() {
5839 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5840 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5841 [docker-ce-stable]
5842 name=Docker CE Stable
5843 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5844 enabled=1
5845 gpgcheck=1
5846 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5847 skip_if_unavailable=1
5848 module_hotfixes=1
5849 EOF
5850 }
5851 install_azure_linux_deps() {
5852 echo "(*) Installing device-mapper libraries for Docker CE..."
5853 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5854 echo "(*) Installing additional Docker CE dependencies..."
5855 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5856 echo "(*) Some optional dependencies could not be installed, continuing..."
5857 }
5858 }
5859 setup_selinux_context() {
5860 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5861 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5862 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5863 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5864 fi
5865 }
5866
5867 # Special handling for RHEL Docker CE installation
5868 case "${ID}" in
5869 azurelinux|mariner)
5870 echo "(*) ${ID} detected"
5871 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5872 echo "(*) Setting up Docker CE repository..."
5873
5874 setup_docker_ce_repo
5875 install_azure_linux_deps
5876
5877 if [ "${USE_MOBY}" != "true" ]; then
5878 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5879 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5880 setup_selinux_context
5881 else
5882 echo "(*) Using Moby - container-selinux not required"
5883 fi
5884 ;;
5885 *)
5886 # Standard RHEL/CentOS/Fedora approach
5887 if command -v dnf >/dev/null 2>&1; then
5888 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5889 elif command -v yum-config-manager >/dev/null 2>&1; then
5890 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5891 else
5892 # Manual fallback
5893 setup_docker_ce_repo
5894 fi
5895 ;;
5896 esac
5897 ;;
5898 esac
5899 fi
5900
5901 # Refresh package database
5902 case ${ADJUSTED_ID} in
5903 debian)
5904 apt-get update
5905 ;;
5906 rhel)
5907 pkg_mgr_update
5908 ;;
5909 esac
5910
5911 # Soft version matching
5912 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5913 # Empty, meaning grab whatever "latest" is in apt repo
5914 engine_version_suffix=""
5915 cli_version_suffix=""
5916 else
5917 case ${ADJUSTED_ID} in
5918 debian)
5919 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5920 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5921 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5922 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5923 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5924 set +e # Don't exit if finding version fails - will handle gracefully
5925 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5926 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5927 set -e
5928 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5929 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5930 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5931 exit 1
5932 fi
5933 ;;
5934 rhel)
5935 # For RHEL-based systems, use dnf/yum to find versions
5936 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5937 set +e # Don't exit if finding version fails - will handle gracefully
5938 if [ "${USE_MOBY}" = "true" ]; then
5939 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5940 else
5941 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5942 fi
5943 set -e
5944 if [ -n "${available_versions}" ]; then
5945 engine_version_suffix="-${available_versions}"
5946 cli_version_suffix="-${available_versions}"
5947 else
5948 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5949 engine_version_suffix=""
5950 cli_version_suffix=""
5951 fi
5952 ;;
5953 esac
5954 fi
5955
5956 # Version matching for moby-buildx
5957 if [ "${USE_MOBY}" = "true" ]; then
5958 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5959 # Empty, meaning grab whatever "latest" is in apt repo
5960 buildx_version_suffix=""
5961 else
5962 case ${ADJUSTED_ID} in
5963 debian)
5964 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5965 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5966 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5967 set +e
5968 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5969 set -e
5970 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5971 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5972 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5973 exit 1
5974 fi
5975 ;;
5976 rhel)
5977 # For RHEL-based systems, try to find buildx version or use latest
5978 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5979 set +e
5980 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5981 set -e
5982 if [ -n "${available_buildx}" ]; then
5983 buildx_version_suffix="-${available_buildx}"
5984 else
5985 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5986 buildx_version_suffix=""
5987 fi
5988 ;;
5989 esac
5990 echo "buildx_version_suffix ${buildx_version_suffix}"
5991 fi
5992 fi
5993
5994 # Install Docker / Moby CLI if not already installed
5995 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5996 echo "Docker / Moby CLI and Engine already installed."
5997 else
5998 case ${ADJUSTED_ID} in
5999 debian)
6000 if [ "${USE_MOBY}" = "true" ]; then
6001 # Install engine
6002 set +e # Handle error gracefully
6003 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6004 exit_code=$?
6005 set -e
6006
6007 if [ ${exit_code} -ne 0 ]; then
6008 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6009 exit 1
6010 fi
6011
6012 # Install compose
6013 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6014 else
6015 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6016 # Install compose
6017 apt-mark hold docker-ce docker-ce-cli
6018 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6019 fi
6020 ;;
6021 rhel)
6022 if [ "${USE_MOBY}" = "true" ]; then
6023 set +e # Handle error gracefully
6024 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6025 exit_code=$?
6026 set -e
6027
6028 if [ ${exit_code} -ne 0 ]; then
6029 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6030 exit 1
6031 fi
6032
6033 # Install compose
6034 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6035 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6036 fi
6037 else
6038 # Special handling for Azure Linux Docker CE installation
6039 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6040 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6041
6042 # Use rpm with --force and --nodeps for Azure Linux
6043 set +e # Don't exit on error for this section
6044 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6045 install_result=$?
6046 set -e
6047
6048 if [ $install_result -ne 0 ]; then
6049 echo "(*) Standard installation failed, trying manual installation..."
6050
6051 echo "(*) Standard installation failed, trying manual installation..."
6052
6053 # Create directory for downloading packages
6054 mkdir -p /tmp/docker-ce-install
6055
6056 # Download packages manually using curl since tdnf doesn't support download
6057 echo "(*) Downloading Docker CE packages manually..."
6058
6059 # Get the repository baseurl
6060 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6061
6062 # Download packages directly
6063 cd /tmp/docker-ce-install
6064
6065 # Get package names with versions
6066 if [ -n "${cli_version_suffix}" ]; then
6067 docker_ce_version="${cli_version_suffix#-}"
6068 docker_cli_version="${engine_version_suffix#-}"
6069 else
6070 # Get latest version from repository
6071 docker_ce_version="latest"
6072 fi
6073
6074 echo "(*) Attempting to download Docker CE packages from repository..."
6075
6076 # Try to download latest packages if specific version fails
6077 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6078 # Fallback: try to get latest available version
6079 echo "(*) Specific version not found, trying latest..."
6080 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6081 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6082 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6083
6084 if [ -n "${latest_docker}" ]; then
6085 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6086 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6087 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6088 else
6089 echo "(*) ERROR: Could not find Docker CE packages in repository"
6090 echo "(*) Please check repository configuration or use 'moby': true"
6091 exit 1
6092 fi
6093 fi
6094 # Install systemd libraries required by Docker CE
6095 echo "(*) Installing systemd libraries required by Docker CE..."
6096 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6097 echo "(*) WARNING: Could not install systemd libraries"
6098 echo "(*) Docker may fail to start without these"
6099 }
6100
6101 # Install with rpm --force --nodeps
6102 echo "(*) Installing Docker CE packages with dependency override..."
6103 rpm -Uvh --force --nodeps *.rpm
6104
6105 # Cleanup
6106 cd /
6107 rm -rf /tmp/docker-ce-install
6108
6109 echo "(*) Docker CE installation completed with dependency bypass"
6110 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6111 fi
6112 else
6113 # Standard installation for other RHEL-based systems
6114 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6115 fi
6116 # Install compose
6117 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6118 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6119 fi
6120 fi
6121 ;;
6122 esac
6123 fi
6124
6125 echo "Finished installing docker / moby!"
6126
6127 docker_home="/usr/libexec/docker"
6128 cli_plugins_dir="${docker_home}/cli-plugins"
6129
6130 # fallback for docker-compose
6131 fallback_compose(){
6132 local url=$1
6133 local repo_url=$(get_github_api_repo_url "$url")
6134 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6135 get_previous_version "${url}" "${repo_url}" compose_version
6136 echo -e "\nAttempting to install v${compose_version}"
6137 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6138 }
6139
6140 # If 'docker-compose' command is to be included
6141 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6142 case "${architecture}" in
6143 amd64|x86_64) target_compose_arch=x86_64 ;;
6144 arm64|aarch64) target_compose_arch=aarch64 ;;
6145 *)
6146 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6147 exit 1
6148 esac
6149
6150 docker_compose_path="/usr/local/bin/docker-compose"
6151 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6152 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6153 INSTALL_DOCKER_COMPOSE_SWITCH="false"
6154
6155 if [ "${target_compose_arch}" = "x86_64" ]; then
6156 echo "(*) Installing docker compose v1..."
6157 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6158 chmod +x ${docker_compose_path}
6159
6160 # Download the SHA256 checksum
6161 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6162 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6163 sha256sum -c docker-compose.sha256sum --ignore-missing
6164 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6165 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6166 exit 1
6167 else
6168 # Use pip to get a version that runs on this architecture
6169 check_packages python3-minimal python3-pip libffi-dev python3-venv
6170 echo "(*) Installing docker compose v1 via pip..."
6171 export PYTHONUSERBASE=/usr/local
6172 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6173 fi
6174 else
6175 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6176 docker_compose_url="https://github.com/docker/compose"
6177 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6178 echo "(*) Installing docker-compose ${compose_version}..."
6179 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6180 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6181 fallback_compose "$docker_compose_url"
6182 }
6183
6184 chmod +x ${docker_compose_path}
6185
6186 # Download the SHA256 checksum
6187 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6188 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6189 sha256sum -c docker-compose.sha256sum --ignore-missing
6190
6191 mkdir -p ${cli_plugins_dir}
6192 cp ${docker_compose_path} ${cli_plugins_dir}
6193 fi
6194 fi
6195
6196 # fallback method for compose-switch
6197 fallback_compose-switch() {
6198 local url=$1
6199 local repo_url=$(get_github_api_repo_url "$url")
6200 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6201 get_previous_version "$url" "$repo_url" compose_switch_version
6202 echo -e "\nAttempting to install v${compose_switch_version}"
6203 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6204 }
6205 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6206 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6207 if type docker-compose > /dev/null 2>&1; then
6208 echo "(*) Installing compose-switch..."
6209 current_compose_path="$(command -v docker-compose)"
6210 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6211 compose_switch_version="latest"
6212 compose_switch_url="https://github.com/docker/compose-switch"
6213 # Try to get latest version, fallback to known stable version if GitHub API fails
6214 set +e
6215 find_version_from_git_tags compose_switch_version "$compose_switch_url"
6216 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6217 echo "(*) GitHub API rate limited or failed, using fallback method"
6218 fallback_compose-switch "$compose_switch_url"
6219 fi
6220 set -e
6221
6222 # Map architecture for compose-switch downloads
6223 case "${architecture}" in
6224 amd64|x86_64) target_switch_arch=amd64 ;;
6225 arm64|aarch64) target_switch_arch=arm64 ;;
6226 *) target_switch_arch=${architecture} ;;
6227 esac
6228 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6229 chmod +x /usr/local/bin/compose-switch
6230 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6231 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6232 mv "${current_compose_path}" "${target_compose_path}"
6233 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6234 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6235 else
6236 err "Skipping installation of compose-switch as docker compose is unavailable..."
6237 fi
6238 fi
6239
6240 # If init file already exists, exit
6241 if [ -f "/usr/local/share/docker-init.sh" ]; then
6242 echo "/usr/local/share/docker-init.sh already exists, so exiting."
6243 # Clean up
6244 rm -rf /var/lib/apt/lists/*
6245 exit 0
6246 fi
6247 echo "docker-init doesn't exist, adding..."
6248
6249 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6250 groupadd -r docker
6251 fi
6252
6253 usermod -aG docker ${USERNAME}
6254
6255 # fallback for docker/buildx
6256 fallback_buildx() {
6257 local url=$1
6258 local repo_url=$(get_github_api_repo_url "$url")
6259 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6260 get_previous_version "$url" "$repo_url" buildx_version
6261 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6262 echo -e "\nAttempting to install v${buildx_version}"
6263 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6264 }
6265
6266 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6267 buildx_version="latest"
6268 docker_buildx_url="https://github.com/docker/buildx"
6269 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6270 echo "(*) Installing buildx ${buildx_version}..."
6271
6272 # Map architecture for buildx downloads
6273 case "${architecture}" in
6274 amd64|x86_64) target_buildx_arch=amd64 ;;
6275 arm64|aarch64) target_buildx_arch=arm64 ;;
6276 *) target_buildx_arch=${architecture} ;;
6277 esac
6278
6279 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6280
6281 cd /tmp
6282 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6283
6284 docker_home="/usr/libexec/docker"
6285 cli_plugins_dir="${docker_home}/cli-plugins"
6286
6287 mkdir -p ${cli_plugins_dir}
6288 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6289 chmod +x ${cli_plugins_dir}/docker-buildx
6290
6291 chown -R "${USERNAME}:docker" "${docker_home}"
6292 chmod -R g+r+w "${docker_home}"
6293 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6294 fi
6295
6296 DOCKER_DEFAULT_IP6_TABLES=""
6297 if [ "$DISABLE_IP6_TABLES" == true ]; then
6298 requested_version=""
6299 # checking whether the version requested either is in semver format or just a number denoting the major version
6300 # and, extracting the major version number out of the two scenarios
6301 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6302 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6303 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6304 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6305 requested_version=$DOCKER_VERSION
6306 fi
6307 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6308 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6309 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6310 fi
6311 fi
6312
6313 if [ ! -d /usr/local/share ]; then
6314 mkdir -p /usr/local/share
6315 fi
6316
6317 tee /usr/local/share/docker-init.sh > /dev/null \
6318 << EOF
6319 #!/bin/sh
6320 #-------------------------------------------------------------------------------------------------------------
6321 # Copyright (c) Microsoft Corporation. All rights reserved.
6322 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6323 #-------------------------------------------------------------------------------------------------------------
6324
6325 set -e
6326
6327 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6328 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6329 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6330 EOF
6331
6332 tee -a /usr/local/share/docker-init.sh > /dev/null \
6333 << 'EOF'
6334 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6335 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6336 find /run /var/run -iname 'docker*.pid' -delete || :
6337 find /run /var/run -iname 'container*.pid' -delete || :
6338
6339 # -- Start: dind wrapper script --
6340 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6341
6342 export container=docker
6343
6344 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6345 mount -t securityfs none /sys/kernel/security || {
6346 echo >&2 'Could not mount /sys/kernel/security.'
6347 echo >&2 'AppArmor detection and --privileged mode might break.'
6348 }
6349 fi
6350
6351 # Mount /tmp (conditionally)
6352 if ! mountpoint -q /tmp; then
6353 mount -t tmpfs none /tmp
6354 fi
6355
6356 set_cgroup_nesting()
6357 {
6358 # cgroup v2: enable nesting
6359 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6360 # move the processes from the root group to the /init group,
6361 # otherwise writing subtree_control fails with EBUSY.
6362 # An error during moving non-existent process (i.e., "cat") is ignored.
6363 mkdir -p /sys/fs/cgroup/init
6364 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6365 # enable controllers
6366 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6367 > /sys/fs/cgroup/cgroup.subtree_control
6368 fi
6369 }
6370
6371 # Set cgroup nesting, retrying if necessary
6372 retry_cgroup_nesting=0
6373
6374 until [ "${retry_cgroup_nesting}" -eq "5" ];
6375 do
6376 set +e
6377 set_cgroup_nesting
6378
6379 if [ $? -ne 0 ]; then
6380 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6381 else
6382 break
6383 fi
6384
6385 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6386 set -e
6387 done
6388
6389 # -- End: dind wrapper script --
6390
6391 # Handle DNS
6392 set +e
6393 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6394 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6395 then
6396 echo "Setting dockerd Azure DNS."
6397 CUSTOMDNS="--dns 168.63.129.16"
6398 else
6399 echo "Not setting dockerd DNS manually."
6400 CUSTOMDNS=""
6401 fi
6402 set -e
6403
6404 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6405 then
6406 DEFAULT_ADDRESS_POOL=""
6407 else
6408 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6409 fi
6410
6411 # Start docker/moby engine
6412 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6413 INNEREOF
6414 )"
6415
6416 sudo_if() {
6417 COMMAND="$*"
6418
6419 if [ "$(id -u)" -ne 0 ]; then
6420 sudo $COMMAND
6421 else
6422 $COMMAND
6423 fi
6424 }
6425
6426 retry_docker_start_count=0
6427 docker_ok="false"
6428
6429 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6430 do
6431 # Start using sudo if not invoked as root
6432 if [ "$(id -u)" -ne 0 ]; then
6433 sudo /bin/sh -c "${dockerd_start}"
6434 else
6435 eval "${dockerd_start}"
6436 fi
6437
6438 retry_count=0
6439 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6440 do
6441 sleep 1s
6442 set +e
6443 docker info > /dev/null 2>&1 && docker_ok="true"
6444 set -e
6445
6446 retry_count=`expr $retry_count + 1`
6447 done
6448
6449 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6450 echo "(*) Failed to start docker, retrying..."
6451 set +e
6452 sudo_if pkill dockerd
6453 sudo_if pkill containerd
6454 set -e
6455 fi
6456
6457 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6458 done
6459
6460 # Execute whatever commands were passed in (if any). This allows us
6461 # to set this script to ENTRYPOINT while still executing the default CMD.
6462 exec "$@"
6463 EOF
6464
6465 chmod +x /usr/local/share/docker-init.sh
6466 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6467
6468 # Clean up
6469 rm -rf /var/lib/apt/lists/*
6470
6471 echo 'docker-in-docker-debian script has completed!'"#),
6472 ]).await;
6473
6474 return Ok(http::Response::builder()
6475 .status(200)
6476 .body(AsyncBody::from(response))
6477 .unwrap());
6478 }
6479 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6480 let response = r#"
6481 {
6482 "schemaVersion": 2,
6483 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6484 "config": {
6485 "mediaType": "application/vnd.devcontainers",
6486 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6487 "size": 2
6488 },
6489 "layers": [
6490 {
6491 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6492 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6493 "size": 20992,
6494 "annotations": {
6495 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6496 }
6497 }
6498 ],
6499 "annotations": {
6500 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6501 "com.github.package.type": "devcontainer_feature"
6502 }
6503 }
6504 "#;
6505
6506 return Ok(http::Response::builder()
6507 .status(200)
6508 .body(http_client::AsyncBody::from(response))
6509 .unwrap());
6510 }
6511 if parts.uri.path()
6512 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6513 {
6514 let response = build_tarball(vec![
6515 ("./devcontainer-feature.json", r#"
6516 {
6517 "id": "go",
6518 "version": "1.3.3",
6519 "name": "Go",
6520 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6521 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6522 "options": {
6523 "version": {
6524 "type": "string",
6525 "proposals": [
6526 "latest",
6527 "none",
6528 "1.24",
6529 "1.23"
6530 ],
6531 "default": "latest",
6532 "description": "Select or enter a Go version to install"
6533 },
6534 "golangciLintVersion": {
6535 "type": "string",
6536 "default": "latest",
6537 "description": "Version of golangci-lint to install"
6538 }
6539 },
6540 "init": true,
6541 "customizations": {
6542 "vscode": {
6543 "extensions": [
6544 "golang.Go"
6545 ],
6546 "settings": {
6547 "github.copilot.chat.codeGeneration.instructions": [
6548 {
6549 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6550 }
6551 ]
6552 }
6553 }
6554 },
6555 "containerEnv": {
6556 "GOROOT": "/usr/local/go",
6557 "GOPATH": "/go",
6558 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6559 },
6560 "capAdd": [
6561 "SYS_PTRACE"
6562 ],
6563 "securityOpt": [
6564 "seccomp=unconfined"
6565 ],
6566 "installsAfter": [
6567 "ghcr.io/devcontainers/features/common-utils"
6568 ]
6569 }
6570 "#),
6571 ("./install.sh", r#"
6572 #!/usr/bin/env bash
6573 #-------------------------------------------------------------------------------------------------------------
6574 # Copyright (c) Microsoft Corporation. All rights reserved.
6575 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6576 #-------------------------------------------------------------------------------------------------------------
6577 #
6578 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6579 # Maintainer: The VS Code and Codespaces Teams
6580
6581 TARGET_GO_VERSION="${VERSION:-"latest"}"
6582 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6583
6584 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6585 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6586 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6587 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6588
6589 # https://www.google.com/linuxrepositories/
6590 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6591
6592 set -e
6593
6594 if [ "$(id -u)" -ne 0 ]; then
6595 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6596 exit 1
6597 fi
6598
6599 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6600 . /etc/os-release
6601 # Get an adjusted ID independent of distro variants
6602 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6603 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6604 ADJUSTED_ID="debian"
6605 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6606 ADJUSTED_ID="rhel"
6607 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6608 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6609 else
6610 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6611 fi
6612 else
6613 echo "Linux distro ${ID} not supported."
6614 exit 1
6615 fi
6616
6617 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6618 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6619 # Update the repo files to reference vault.centos.org.
6620 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6621 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6622 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6623 fi
6624
6625 # Setup INSTALL_CMD & PKG_MGR_CMD
6626 if type apt-get > /dev/null 2>&1; then
6627 PKG_MGR_CMD=apt-get
6628 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6629 elif type microdnf > /dev/null 2>&1; then
6630 PKG_MGR_CMD=microdnf
6631 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6632 elif type dnf > /dev/null 2>&1; then
6633 PKG_MGR_CMD=dnf
6634 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6635 else
6636 PKG_MGR_CMD=yum
6637 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6638 fi
6639
6640 # Clean up
6641 clean_up() {
6642 case ${ADJUSTED_ID} in
6643 debian)
6644 rm -rf /var/lib/apt/lists/*
6645 ;;
6646 rhel)
6647 rm -rf /var/cache/dnf/* /var/cache/yum/*
6648 rm -rf /tmp/yum.log
6649 rm -rf ${GPG_INSTALL_PATH}
6650 ;;
6651 esac
6652 }
6653 clean_up
6654
6655
6656 # Figure out correct version of a three part version number is not passed
6657 find_version_from_git_tags() {
6658 local variable_name=$1
6659 local requested_version=${!variable_name}
6660 if [ "${requested_version}" = "none" ]; then return; fi
6661 local repository=$2
6662 local prefix=${3:-"tags/v"}
6663 local separator=${4:-"."}
6664 local last_part_optional=${5:-"false"}
6665 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6666 local escaped_separator=${separator//./\\.}
6667 local last_part
6668 if [ "${last_part_optional}" = "true" ]; then
6669 last_part="(${escaped_separator}[0-9]+)?"
6670 else
6671 last_part="${escaped_separator}[0-9]+"
6672 fi
6673 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6674 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6675 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6676 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6677 else
6678 set +e
6679 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6680 set -e
6681 fi
6682 fi
6683 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6684 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6685 exit 1
6686 fi
6687 echo "${variable_name}=${!variable_name}"
6688 }
6689
6690 pkg_mgr_update() {
6691 case $ADJUSTED_ID in
6692 debian)
6693 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6694 echo "Running apt-get update..."
6695 ${PKG_MGR_CMD} update -y
6696 fi
6697 ;;
6698 rhel)
6699 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6700 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6701 echo "Running ${PKG_MGR_CMD} makecache ..."
6702 ${PKG_MGR_CMD} makecache
6703 fi
6704 else
6705 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6706 echo "Running ${PKG_MGR_CMD} check-update ..."
6707 set +e
6708 ${PKG_MGR_CMD} check-update
6709 rc=$?
6710 if [ $rc != 0 ] && [ $rc != 100 ]; then
6711 exit 1
6712 fi
6713 set -e
6714 fi
6715 fi
6716 ;;
6717 esac
6718 }
6719
6720 # Checks if packages are installed and installs them if not
6721 check_packages() {
6722 case ${ADJUSTED_ID} in
6723 debian)
6724 if ! dpkg -s "$@" > /dev/null 2>&1; then
6725 pkg_mgr_update
6726 ${INSTALL_CMD} "$@"
6727 fi
6728 ;;
6729 rhel)
6730 if ! rpm -q "$@" > /dev/null 2>&1; then
6731 pkg_mgr_update
6732 ${INSTALL_CMD} "$@"
6733 fi
6734 ;;
6735 esac
6736 }
6737
6738 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6739 rm -f /etc/profile.d/00-restore-env.sh
6740 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6741 chmod +x /etc/profile.d/00-restore-env.sh
6742
6743 # Some distributions do not install awk by default (e.g. Mariner)
6744 if ! type awk >/dev/null 2>&1; then
6745 check_packages awk
6746 fi
6747
6748 # Determine the appropriate non-root user
6749 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6750 USERNAME=""
6751 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6752 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6753 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6754 USERNAME=${CURRENT_USER}
6755 break
6756 fi
6757 done
6758 if [ "${USERNAME}" = "" ]; then
6759 USERNAME=root
6760 fi
6761 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6762 USERNAME=root
6763 fi
6764
6765 export DEBIAN_FRONTEND=noninteractive
6766
6767 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6768
6769 if [ $ADJUSTED_ID = "debian" ]; then
6770 check_packages g++ libc6-dev
6771 else
6772 check_packages gcc-c++ glibc-devel
6773 fi
6774 # Install curl, git, other dependencies if missing
6775 if ! type curl > /dev/null 2>&1; then
6776 check_packages curl
6777 fi
6778 if ! type git > /dev/null 2>&1; then
6779 check_packages git
6780 fi
6781 # Some systems, e.g. Mariner, still a few more packages
6782 if ! type as > /dev/null 2>&1; then
6783 check_packages binutils
6784 fi
6785 if ! [ -f /usr/include/linux/errno.h ]; then
6786 check_packages kernel-headers
6787 fi
6788 # Minimal RHEL install may need findutils installed
6789 if ! [ -f /usr/bin/find ]; then
6790 check_packages findutils
6791 fi
6792
6793 # Get closest match for version number specified
6794 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6795
6796 architecture="$(uname -m)"
6797 case $architecture in
6798 x86_64) architecture="amd64";;
6799 aarch64 | armv8*) architecture="arm64";;
6800 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6801 i?86) architecture="386";;
6802 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6803 esac
6804
6805 # Install Go
6806 umask 0002
6807 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6808 groupadd -r golang
6809 fi
6810 usermod -a -G golang "${USERNAME}"
6811 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6812
6813 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6814 # Use a temporary location for gpg keys to avoid polluting image
6815 export GNUPGHOME="/tmp/tmp-gnupg"
6816 mkdir -p ${GNUPGHOME}
6817 chmod 700 ${GNUPGHOME}
6818 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6819 gpg -q --import /tmp/tmp-gnupg/golang_key
6820 echo "Downloading Go ${TARGET_GO_VERSION}..."
6821 set +e
6822 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6823 exit_code=$?
6824 set -e
6825 if [ "$exit_code" != "0" ]; then
6826 echo "(!) Download failed."
6827 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6828 set +e
6829 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6830 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6831 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6832 # Handle Go's odd version pattern where "0" releases omit the last part
6833 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6834 ((minor=minor-1))
6835 TARGET_GO_VERSION="${major}.${minor}"
6836 # Look for latest version from previous minor release
6837 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6838 else
6839 ((breakfix=breakfix-1))
6840 if [ "${breakfix}" = "0" ]; then
6841 TARGET_GO_VERSION="${major}.${minor}"
6842 else
6843 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6844 fi
6845 fi
6846 set -e
6847 echo "Trying ${TARGET_GO_VERSION}..."
6848 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6849 fi
6850 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6851 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6852 echo "Extracting Go ${TARGET_GO_VERSION}..."
6853 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6854 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6855 else
6856 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6857 fi
6858
6859 # Install Go tools that are isImportant && !replacedByGopls based on
6860 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6861 GO_TOOLS="\
6862 golang.org/x/tools/gopls@latest \
6863 honnef.co/go/tools/cmd/staticcheck@latest \
6864 golang.org/x/lint/golint@latest \
6865 github.com/mgechev/revive@latest \
6866 github.com/go-delve/delve/cmd/dlv@latest \
6867 github.com/fatih/gomodifytags@latest \
6868 github.com/haya14busa/goplay/cmd/goplay@latest \
6869 github.com/cweill/gotests/gotests@latest \
6870 github.com/josharian/impl@latest"
6871
6872 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6873 echo "Installing common Go tools..."
6874 export PATH=${TARGET_GOROOT}/bin:${PATH}
6875 export GOPATH=/tmp/gotools
6876 export GOCACHE="${GOPATH}/cache"
6877
6878 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6879 cd "${GOPATH}"
6880
6881 # Use go get for versions of go under 1.16
6882 go_install_command=install
6883 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6884 export GO111MODULE=on
6885 go_install_command=get
6886 echo "Go version < 1.16, using go get."
6887 fi
6888
6889 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6890
6891 # Move Go tools into path
6892 if [ -d "${GOPATH}/bin" ]; then
6893 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6894 fi
6895
6896 # Install golangci-lint from precompiled binaries
6897 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6898 echo "Installing golangci-lint latest..."
6899 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6900 sh -s -- -b "${TARGET_GOPATH}/bin"
6901 else
6902 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6903 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6904 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6905 fi
6906
6907 # Remove Go tools temp directory
6908 rm -rf "${GOPATH}"
6909 fi
6910
6911
6912 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6913 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6914 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6915 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6916
6917 # Clean up
6918 clean_up
6919
6920 echo "Done!"
6921 "#),
6922 ])
6923 .await;
6924 return Ok(http::Response::builder()
6925 .status(200)
6926 .body(AsyncBody::from(response))
6927 .unwrap());
6928 }
6929 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6930 let response = r#"
6931 {
6932 "schemaVersion": 2,
6933 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6934 "config": {
6935 "mediaType": "application/vnd.devcontainers",
6936 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6937 "size": 2
6938 },
6939 "layers": [
6940 {
6941 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6942 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6943 "size": 19968,
6944 "annotations": {
6945 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6946 }
6947 }
6948 ],
6949 "annotations": {
6950 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6951 "com.github.package.type": "devcontainer_feature"
6952 }
6953 }"#;
6954 return Ok(http::Response::builder()
6955 .status(200)
6956 .body(AsyncBody::from(response))
6957 .unwrap());
6958 }
6959 if parts.uri.path()
6960 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6961 {
6962 let response = build_tarball(vec![
6963 (
6964 "./devcontainer-feature.json",
6965 r#"
6966{
6967 "id": "aws-cli",
6968 "version": "1.1.3",
6969 "name": "AWS CLI",
6970 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6971 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6972 "options": {
6973 "version": {
6974 "type": "string",
6975 "proposals": [
6976 "latest"
6977 ],
6978 "default": "latest",
6979 "description": "Select or enter an AWS CLI version."
6980 },
6981 "verbose": {
6982 "type": "boolean",
6983 "default": true,
6984 "description": "Suppress verbose output."
6985 }
6986 },
6987 "customizations": {
6988 "vscode": {
6989 "extensions": [
6990 "AmazonWebServices.aws-toolkit-vscode"
6991 ],
6992 "settings": {
6993 "github.copilot.chat.codeGeneration.instructions": [
6994 {
6995 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6996 }
6997 ]
6998 }
6999 }
7000 },
7001 "installsAfter": [
7002 "ghcr.io/devcontainers/features/common-utils"
7003 ]
7004}
7005 "#,
7006 ),
7007 (
7008 "./install.sh",
7009 r#"#!/usr/bin/env bash
7010 #-------------------------------------------------------------------------------------------------------------
7011 # Copyright (c) Microsoft Corporation. All rights reserved.
7012 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7013 #-------------------------------------------------------------------------------------------------------------
7014 #
7015 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7016 # Maintainer: The VS Code and Codespaces Teams
7017
7018 set -e
7019
7020 # Clean up
7021 rm -rf /var/lib/apt/lists/*
7022
7023 VERSION=${VERSION:-"latest"}
7024 VERBOSE=${VERBOSE:-"true"}
7025
7026 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7027 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7028
7029 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7030 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7031 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7032 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7033 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7034 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7035 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7036 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7037 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7038 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7039 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7040 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7041 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7042 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7043 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7044 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7045 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7046 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7047 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7048 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7049 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7050 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7051 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7052 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7053 YLZATHZKTJyiqA==
7054 =vYOk
7055 -----END PGP PUBLIC KEY BLOCK-----"
7056
7057 if [ "$(id -u)" -ne 0 ]; then
7058 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7059 exit 1
7060 fi
7061
7062 apt_get_update()
7063 {
7064 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7065 echo "Running apt-get update..."
7066 apt-get update -y
7067 fi
7068 }
7069
7070 # Checks if packages are installed and installs them if not
7071 check_packages() {
7072 if ! dpkg -s "$@" > /dev/null 2>&1; then
7073 apt_get_update
7074 apt-get -y install --no-install-recommends "$@"
7075 fi
7076 }
7077
7078 export DEBIAN_FRONTEND=noninteractive
7079
7080 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7081
7082 verify_aws_cli_gpg_signature() {
7083 local filePath=$1
7084 local sigFilePath=$2
7085 local awsGpgKeyring=aws-cli-public-key.gpg
7086
7087 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7088 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7089 local status=$?
7090
7091 rm "./${awsGpgKeyring}"
7092
7093 return ${status}
7094 }
7095
7096 install() {
7097 local scriptZipFile=awscli.zip
7098 local scriptSigFile=awscli.sig
7099
7100 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7101 if [ "${VERSION}" != "latest" ]; then
7102 local versionStr=-${VERSION}
7103 fi
7104 architecture=$(dpkg --print-architecture)
7105 case "${architecture}" in
7106 amd64) architectureStr=x86_64 ;;
7107 arm64) architectureStr=aarch64 ;;
7108 *)
7109 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7110 exit 1
7111 esac
7112 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7113 curl "${scriptUrl}" -o "${scriptZipFile}"
7114 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7115
7116 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7117 if (( $? > 0 )); then
7118 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7119 exit 1
7120 fi
7121
7122 if [ "${VERBOSE}" = "false" ]; then
7123 unzip -q "${scriptZipFile}"
7124 else
7125 unzip "${scriptZipFile}"
7126 fi
7127
7128 ./aws/install
7129
7130 # kubectl bash completion
7131 mkdir -p /etc/bash_completion.d
7132 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7133
7134 # kubectl zsh completion
7135 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7136 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7137 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7138 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7139 fi
7140
7141 rm -rf ./aws
7142 }
7143
7144 echo "(*) Installing AWS CLI..."
7145
7146 install
7147
7148 # Clean up
7149 rm -rf /var/lib/apt/lists/*
7150
7151 echo "Done!""#,
7152 ),
7153 ("./scripts/", r#""#),
7154 (
7155 "./scripts/fetch-latest-completer-scripts.sh",
7156 r#"
7157 #!/bin/bash
7158 #-------------------------------------------------------------------------------------------------------------
7159 # Copyright (c) Microsoft Corporation. All rights reserved.
7160 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7161 #-------------------------------------------------------------------------------------------------------------
7162 #
7163 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7164 # Maintainer: The Dev Container spec maintainers
7165 #
7166 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7167 #
7168 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7169 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7170 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7171
7172 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7173 chmod +x "$BASH_COMPLETER_SCRIPT"
7174
7175 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7176 chmod +x "$ZSH_COMPLETER_SCRIPT"
7177 "#,
7178 ),
7179 ("./scripts/vendor/", r#""#),
7180 (
7181 "./scripts/vendor/aws_bash_completer",
7182 r#"
7183 # Typically that would be added under one of the following paths:
7184 # - /etc/bash_completion.d
7185 # - /usr/local/etc/bash_completion.d
7186 # - /usr/share/bash-completion/completions
7187
7188 complete -C aws_completer aws
7189 "#,
7190 ),
7191 (
7192 "./scripts/vendor/aws_zsh_completer.sh",
7193 r#"
7194 # Source this file to activate auto completion for zsh using the bash
7195 # compatibility helper. Make sure to run `compinit` before, which should be
7196 # given usually.
7197 #
7198 # % source /path/to/zsh_complete.sh
7199 #
7200 # Typically that would be called somewhere in your .zshrc.
7201 #
7202 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7203 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7204 #
7205 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7206 #
7207 # zsh releases prior to that version do not export the required env variables!
7208
7209 autoload -Uz bashcompinit
7210 bashcompinit -i
7211
7212 _bash_complete() {
7213 local ret=1
7214 local -a suf matches
7215 local -x COMP_POINT COMP_CWORD
7216 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7217 local -x COMP_LINE="$words"
7218 local -A savejobstates savejobtexts
7219
7220 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7221 (( COMP_CWORD = CURRENT - 1))
7222 COMP_WORDS=( $words )
7223 BASH_VERSINFO=( 2 05b 0 1 release )
7224
7225 savejobstates=( ${(kv)jobstates} )
7226 savejobtexts=( ${(kv)jobtexts} )
7227
7228 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7229
7230 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7231
7232 if [[ -n $matches ]]; then
7233 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7234 compset -P '*/' && matches=( ${matches##*/} )
7235 compset -S '/*' && matches=( ${matches%%/*} )
7236 compadd -Q -f "${suf[@]}" -a matches && ret=0
7237 else
7238 compadd -Q "${suf[@]}" -a matches && ret=0
7239 fi
7240 fi
7241
7242 if (( ret )); then
7243 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7244 _default "${suf[@]}" && ret=0
7245 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7246 _directories "${suf[@]}" && ret=0
7247 fi
7248 fi
7249
7250 return ret
7251 }
7252
7253 complete -C aws_completer aws
7254 "#,
7255 ),
7256 ]).await;
7257
7258 return Ok(http::Response::builder()
7259 .status(200)
7260 .body(AsyncBody::from(response))
7261 .unwrap());
7262 }
7263
7264 Ok(http::Response::builder()
7265 .status(404)
7266 .body(http_client::AsyncBody::default())
7267 .unwrap())
7268 })
7269 }
7270}