1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use fs::Fs;
10use http_client::HttpClient;
11use util::{ResultExt, command::Command};
12
13use crate::{
14 DevContainerConfig, DevContainerContext,
15 command_json::{CommandRunner, DefaultCommandRunner},
16 devcontainer_api::{DevContainerError, DevContainerUp},
17 devcontainer_json::{
18 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
19 deserialize_devcontainer_json,
20 },
21 docker::{
22 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
23 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
24 get_remote_dir_from_config,
25 },
26 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
27 get_oci_token,
28 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
29 safe_id_lower,
30};
31
32enum ConfigStatus {
33 Deserialized(DevContainer),
34 VariableParsed(DevContainer),
35}
36
37#[derive(Debug, Clone, Eq, PartialEq, Default)]
38pub(crate) struct DockerComposeResources {
39 files: Vec<PathBuf>,
40 config: DockerComposeConfig,
41}
42
43struct DevContainerManifest {
44 http_client: Arc<dyn HttpClient>,
45 fs: Arc<dyn Fs>,
46 docker_client: Arc<dyn DockerClient>,
47 command_runner: Arc<dyn CommandRunner>,
48 raw_config: String,
49 config: ConfigStatus,
50 local_environment: HashMap<String, String>,
51 local_project_directory: PathBuf,
52 config_directory: PathBuf,
53 file_name: String,
54 root_image: Option<DockerInspect>,
55 features_build_info: Option<FeaturesBuildInfo>,
56 features: Vec<FeatureManifest>,
57}
58const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
59impl DevContainerManifest {
60 async fn new(
61 context: &DevContainerContext,
62 environment: HashMap<String, String>,
63 docker_client: Arc<dyn DockerClient>,
64 command_runner: Arc<dyn CommandRunner>,
65 local_config: DevContainerConfig,
66 local_project_path: &Path,
67 ) -> Result<Self, DevContainerError> {
68 let config_path = local_project_path.join(local_config.config_path.clone());
69 log::debug!("parsing devcontainer json found in {:?}", &config_path);
70 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
71 log::error!("Unable to read devcontainer contents: {e}");
72 DevContainerError::DevContainerParseFailed
73 })?;
74
75 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
76
77 let devcontainer_directory = config_path.parent().ok_or_else(|| {
78 log::error!("Dev container file should be in a directory");
79 DevContainerError::NotInValidProject
80 })?;
81 let file_name = config_path
82 .file_name()
83 .and_then(|f| f.to_str())
84 .ok_or_else(|| {
85 log::error!("Dev container file has no file name, or is invalid unicode");
86 DevContainerError::DevContainerParseFailed
87 })?;
88
89 Ok(Self {
90 fs: context.fs.clone(),
91 http_client: context.http_client.clone(),
92 docker_client,
93 command_runner,
94 raw_config: devcontainer_contents,
95 config: ConfigStatus::Deserialized(devcontainer),
96 local_project_directory: local_project_path.to_path_buf(),
97 local_environment: environment,
98 config_directory: devcontainer_directory.to_path_buf(),
99 file_name: file_name.to_string(),
100 root_image: None,
101 features_build_info: None,
102 features: Vec::new(),
103 })
104 }
105
106 fn devcontainer_id(&self) -> String {
107 let mut labels = self.identifying_labels();
108 labels.sort_by_key(|(key, _)| *key);
109
110 let mut hasher = DefaultHasher::new();
111 for (key, value) in &labels {
112 key.hash(&mut hasher);
113 value.hash(&mut hasher);
114 }
115
116 format!("{:016x}", hasher.finish())
117 }
118
119 fn identifying_labels(&self) -> Vec<(&str, String)> {
120 let labels = vec![
121 (
122 "devcontainer.local_folder",
123 (self.local_project_directory.display()).to_string(),
124 ),
125 (
126 "devcontainer.config_file",
127 (self.config_file().display()).to_string(),
128 ),
129 ];
130 labels
131 }
132
133 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
134 let mut replaced_content = content
135 .replace("${devcontainerId}", &self.devcontainer_id())
136 .replace(
137 "${containerWorkspaceFolderBasename}",
138 &self.remote_workspace_base_name().unwrap_or_default(),
139 )
140 .replace(
141 "${localWorkspaceFolderBasename}",
142 &self.local_workspace_base_name()?,
143 )
144 .replace(
145 "${containerWorkspaceFolder}",
146 &self
147 .remote_workspace_folder()
148 .map(|path| path.display().to_string())
149 .unwrap_or_default()
150 .replace('\\', "/"),
151 )
152 .replace(
153 "${localWorkspaceFolder}",
154 &self.local_workspace_folder().replace('\\', "/"),
155 );
156 for (k, v) in &self.local_environment {
157 let find = format!("${{localEnv:{k}}}");
158 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
159 }
160
161 Ok(replaced_content)
162 }
163
164 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
165 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
166 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
167
168 self.config = ConfigStatus::VariableParsed(parsed_config);
169
170 Ok(())
171 }
172
173 fn runtime_remote_env(
174 &self,
175 container_env: &HashMap<String, String>,
176 ) -> Result<HashMap<String, String>, DevContainerError> {
177 let mut merged_remote_env = container_env.clone();
178 // HOME is user-specific, and we will often not run as the image user
179 merged_remote_env.remove("HOME");
180 if let Some(remote_env) = self.dev_container().remote_env.clone() {
181 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
182 log::error!(
183 "Unexpected error serializing dev container remote_env: {e} - {:?}",
184 remote_env
185 );
186 DevContainerError::DevContainerParseFailed
187 })?;
188 for (k, v) in container_env {
189 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
190 }
191 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
192 .map_err(|e| {
193 log::error!(
194 "Unexpected error reserializing dev container remote env: {e} - {:?}",
195 &raw
196 );
197 DevContainerError::DevContainerParseFailed
198 })?;
199 for (k, v) in reserialized {
200 merged_remote_env.insert(k, v);
201 }
202 }
203 Ok(merged_remote_env)
204 }
205
206 fn config_file(&self) -> PathBuf {
207 self.config_directory.join(&self.file_name)
208 }
209
210 fn dev_container(&self) -> &DevContainer {
211 match &self.config {
212 ConfigStatus::Deserialized(dev_container) => dev_container,
213 ConfigStatus::VariableParsed(dev_container) => dev_container,
214 }
215 }
216
217 async fn dockerfile_location(&self) -> Option<PathBuf> {
218 let dev_container = self.dev_container();
219 match dev_container.build_type() {
220 DevContainerBuildType::Image => None,
221 DevContainerBuildType::Dockerfile => dev_container
222 .build
223 .as_ref()
224 .map(|build| self.config_directory.join(&build.dockerfile)),
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service
234 .build
235 .and_then(|b| b.dockerfile)
236 .map(|dockerfile| self.config_directory.join(dockerfile))
237 }
238 DevContainerBuildType::None => None,
239 }
240 }
241
242 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
243 let mut hasher = DefaultHasher::new();
244 let prefix = match &self.dev_container().name {
245 Some(name) => &safe_id_lower(name),
246 None => "zed-dc",
247 };
248 let prefix = prefix.get(..6).unwrap_or(prefix);
249
250 dockerfile_build_path.hash(&mut hasher);
251
252 let hash = hasher.finish();
253 format!("{}-{:x}-features", prefix, hash)
254 }
255
256 /// Gets the base image from the devcontainer with the following precedence:
257 /// - The devcontainer image if an image is specified
258 /// - The image sourced in the Dockerfile if a Dockerfile is specified
259 /// - The image sourced in the docker-compose main service, if one is specified
260 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
261 /// If no such image is available, return an error
262 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
263 if let Some(image) = &self.dev_container().image {
264 return Ok(image.to_string());
265 }
266 if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
267 let dockerfile_contents = self
268 .fs
269 .load(&self.config_directory.join(dockerfile))
270 .await
271 .map_err(|e| {
272 log::error!("Error reading dockerfile: {e}");
273 DevContainerError::DevContainerParseFailed
274 })?;
275 return image_from_dockerfile(self, dockerfile_contents);
276 }
277 if self.dev_container().docker_compose_file.is_some() {
278 let docker_compose_manifest = self.docker_compose_manifest().await?;
279 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
280
281 if let Some(dockerfile) = main_service
282 .build
283 .as_ref()
284 .and_then(|b| b.dockerfile.as_ref())
285 {
286 let dockerfile_contents = self
287 .fs
288 .load(&self.config_directory.join(dockerfile))
289 .await
290 .map_err(|e| {
291 log::error!("Error reading dockerfile: {e}");
292 DevContainerError::DevContainerParseFailed
293 })?;
294 return image_from_dockerfile(self, dockerfile_contents);
295 }
296 if let Some(image) = &main_service.image {
297 return Ok(image.to_string());
298 }
299
300 log::error!("No valid base image found in docker-compose configuration");
301 return Err(DevContainerError::DevContainerParseFailed);
302 }
303 log::error!("No valid base image found in dev container configuration");
304 Err(DevContainerError::DevContainerParseFailed)
305 }
306
307 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
308 let dev_container = match &self.config {
309 ConfigStatus::Deserialized(_) => {
310 log::error!(
311 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
312 );
313 return Err(DevContainerError::DevContainerParseFailed);
314 }
315 ConfigStatus::VariableParsed(dev_container) => dev_container,
316 };
317 let root_image_tag = self.get_base_image_from_config().await?;
318 let root_image = self.docker_client.inspect(&root_image_tag).await?;
319
320 let temp_base = std::env::temp_dir().join("devcontainer-zed");
321 let timestamp = std::time::SystemTime::now()
322 .duration_since(std::time::UNIX_EPOCH)
323 .map(|d| d.as_millis())
324 .unwrap_or(0);
325
326 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
327 let empty_context_dir = temp_base.join("empty-folder");
328
329 self.fs
330 .create_dir(&features_content_dir)
331 .await
332 .map_err(|e| {
333 log::error!("Failed to create features content dir: {e}");
334 DevContainerError::FilesystemError
335 })?;
336
337 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
338 log::error!("Failed to create empty context dir: {e}");
339 DevContainerError::FilesystemError
340 })?;
341
342 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
343 let image_tag =
344 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
345
346 let build_info = FeaturesBuildInfo {
347 dockerfile_path,
348 features_content_dir,
349 empty_context_dir,
350 build_image: dev_container.image.clone(),
351 image_tag,
352 };
353
354 let features = match &dev_container.features {
355 Some(features) => features,
356 None => &HashMap::new(),
357 };
358
359 let container_user = get_container_user_from_config(&root_image, self)?;
360 let remote_user = get_remote_user_from_config(&root_image, self)?;
361
362 let builtin_env_content = format!(
363 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
364 container_user, remote_user
365 );
366
367 let builtin_env_path = build_info
368 .features_content_dir
369 .join("devcontainer-features.builtin.env");
370
371 self.fs
372 .write(&builtin_env_path, &builtin_env_content.as_bytes())
373 .await
374 .map_err(|e| {
375 log::error!("Failed to write builtin env file: {e}");
376 DevContainerError::FilesystemError
377 })?;
378
379 let ordered_features =
380 resolve_feature_order(features, &dev_container.override_feature_install_order);
381
382 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
383 if matches!(options, FeatureOptions::Bool(false)) {
384 log::debug!(
385 "Feature '{}' is disabled (set to false), skipping",
386 feature_ref
387 );
388 continue;
389 }
390
391 let feature_id = extract_feature_id(feature_ref);
392 let consecutive_id = format!("{}_{}", feature_id, index);
393 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
394
395 self.fs.create_dir(&feature_dir).await.map_err(|e| {
396 log::error!(
397 "Failed to create feature directory for {}: {e}",
398 feature_ref
399 );
400 DevContainerError::FilesystemError
401 })?;
402
403 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
404 log::error!(
405 "Feature '{}' is not a supported OCI feature reference",
406 feature_ref
407 );
408 DevContainerError::DevContainerParseFailed
409 })?;
410 let TokenResponse { token } =
411 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
412 .await
413 .map_err(|e| {
414 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
415 DevContainerError::ResourceFetchFailed
416 })?;
417 let manifest = get_oci_manifest(
418 &oci_ref.registry,
419 &oci_ref.path,
420 &token,
421 &self.http_client,
422 &oci_ref.version,
423 None,
424 )
425 .await
426 .map_err(|e| {
427 log::error!(
428 "Failed to fetch OCI manifest for feature '{}': {e}",
429 feature_ref
430 );
431 DevContainerError::ResourceFetchFailed
432 })?;
433 let digest = &manifest
434 .layers
435 .first()
436 .ok_or_else(|| {
437 log::error!(
438 "OCI manifest for feature '{}' contains no layers",
439 feature_ref
440 );
441 DevContainerError::ResourceFetchFailed
442 })?
443 .digest;
444 download_oci_tarball(
445 &token,
446 &oci_ref.registry,
447 &oci_ref.path,
448 digest,
449 "application/vnd.devcontainers.layer.v1+tar",
450 &feature_dir,
451 &self.http_client,
452 &self.fs,
453 None,
454 )
455 .await?;
456
457 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
458 if !self.fs.is_file(feature_json_path).await {
459 let message = format!(
460 "No devcontainer-feature.json found in {:?}, no defaults to apply",
461 feature_json_path
462 );
463 log::error!("{}", &message);
464 return Err(DevContainerError::ResourceFetchFailed);
465 }
466
467 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
468 log::error!("error reading devcontainer-feature.json: {:?}", e);
469 DevContainerError::FilesystemError
470 })?;
471
472 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
473
474 let feature_json: DevContainerFeatureJson =
475 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
476 log::error!("Failed to parse devcontainer-feature.json: {e}");
477 DevContainerError::ResourceFetchFailed
478 })?;
479
480 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
481
482 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
483
484 let env_content = feature_manifest
485 .write_feature_env(&self.fs, options)
486 .await?;
487
488 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
489
490 self.fs
491 .write(
492 &feature_manifest
493 .file_path()
494 .join("devcontainer-features-install.sh"),
495 &wrapper_content.as_bytes(),
496 )
497 .await
498 .map_err(|e| {
499 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
500 DevContainerError::FilesystemError
501 })?;
502
503 self.features.push(feature_manifest);
504 }
505
506 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
507
508 let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
509 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
510
511 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
512 self.fs.load(location).await.log_err()
513 } else {
514 None
515 };
516
517 let dockerfile_content = self.generate_dockerfile_extended(
518 &container_user,
519 &remote_user,
520 dockerfile_base_content,
521 use_buildkit,
522 );
523
524 self.fs
525 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
526 .await
527 .map_err(|e| {
528 log::error!("Failed to write Dockerfile.extended: {e}");
529 DevContainerError::FilesystemError
530 })?;
531
532 log::debug!(
533 "Features build resources written to {:?}",
534 build_info.features_content_dir
535 );
536
537 self.root_image = Some(root_image);
538 self.features_build_info = Some(build_info);
539
540 Ok(())
541 }
542
543 fn generate_dockerfile_extended(
544 &self,
545 container_user: &str,
546 remote_user: &str,
547 dockerfile_content: Option<String>,
548 use_buildkit: bool,
549 ) -> String {
550 #[cfg(not(target_os = "windows"))]
551 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
552 #[cfg(target_os = "windows")]
553 let update_remote_user_uid = false;
554 let feature_layers: String = self
555 .features
556 .iter()
557 .map(|manifest| {
558 manifest.generate_dockerfile_feature_layer(
559 use_buildkit,
560 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
561 )
562 })
563 .collect();
564
565 let container_home_cmd = get_ent_passwd_shell_command(container_user);
566 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
567
568 let dockerfile_content = dockerfile_content
569 .map(|content| {
570 if dockerfile_alias(&content).is_some() {
571 content
572 } else {
573 dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
574 }
575 })
576 .unwrap_or("".to_string());
577
578 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
579
580 let feature_content_source_stage = if use_buildkit {
581 "".to_string()
582 } else {
583 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
584 .to_string()
585 };
586
587 let builtin_env_source_path = if use_buildkit {
588 "./devcontainer-features.builtin.env"
589 } else {
590 "/tmp/build-features/devcontainer-features.builtin.env"
591 };
592
593 let mut extended_dockerfile = format!(
594 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
595
596{dockerfile_content}
597{feature_content_source_stage}
598FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
599USER root
600COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
601RUN chmod -R 0755 /tmp/build-features/
602
603FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
604
605USER root
606
607RUN mkdir -p {dest}
608COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
609
610RUN \
611echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
612echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
613
614{feature_layers}
615
616ARG _DEV_CONTAINERS_IMAGE_USER=root
617USER $_DEV_CONTAINERS_IMAGE_USER
618"#
619 );
620
621 // If we're not adding a uid update layer, then we should add env vars to this layer instead
622 if !update_remote_user_uid {
623 extended_dockerfile = format!(
624 r#"{extended_dockerfile}
625# Ensure that /etc/profile does not clobber the existing path
626RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
627"#
628 );
629
630 for feature in &self.features {
631 let container_env_layer = feature.generate_dockerfile_env();
632 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
633 }
634
635 if let Some(env) = &self.dev_container().container_env {
636 for (key, value) in env {
637 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
638 }
639 }
640 }
641
642 extended_dockerfile
643 }
644
645 fn build_merged_resources(
646 &self,
647 base_image: DockerInspect,
648 ) -> Result<DockerBuildResources, DevContainerError> {
649 let dev_container = match &self.config {
650 ConfigStatus::Deserialized(_) => {
651 log::error!(
652 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
653 );
654 return Err(DevContainerError::DevContainerParseFailed);
655 }
656 ConfigStatus::VariableParsed(dev_container) => dev_container,
657 };
658 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
659
660 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
661
662 mounts.append(&mut feature_mounts);
663
664 let privileged = dev_container.privileged.unwrap_or(false)
665 || self.features.iter().any(|f| f.privileged());
666
667 let mut entrypoint_script_lines = vec![
668 "echo Container started".to_string(),
669 "trap \"exit 0\" 15".to_string(),
670 ];
671
672 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
673 entrypoint_script_lines.push(entrypoint.clone());
674 }
675 entrypoint_script_lines.append(&mut vec![
676 "exec \"$@\"".to_string(),
677 "while sleep 1 & wait $!; do :; done".to_string(),
678 ]);
679
680 Ok(DockerBuildResources {
681 image: base_image,
682 additional_mounts: mounts,
683 privileged,
684 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
685 })
686 }
687
688 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
689 if let ConfigStatus::Deserialized(_) = &self.config {
690 log::error!(
691 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
692 );
693 return Err(DevContainerError::DevContainerParseFailed);
694 }
695 let dev_container = self.dev_container();
696 match dev_container.build_type() {
697 DevContainerBuildType::Image => {
698 let built_docker_image = self.build_docker_image().await?;
699 let Some(base_image) = dev_container.image.as_ref() else {
700 log::error!("Dev container is using and image which can't be referenced");
701 return Err(DevContainerError::DevContainerParseFailed);
702 };
703 let built_docker_image = self
704 .update_remote_user_uid(built_docker_image, base_image)
705 .await?;
706
707 let resources = self.build_merged_resources(built_docker_image)?;
708 Ok(DevContainerBuildResources::Docker(resources))
709 }
710 DevContainerBuildType::Dockerfile => {
711 let built_docker_image = self.build_docker_image().await?;
712 let Some(features_build_info) = &self.features_build_info else {
713 log::error!(
714 "Can't attempt to build update UID dockerfile before initial docker build"
715 );
716 return Err(DevContainerError::DevContainerParseFailed);
717 };
718 let built_docker_image = self
719 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
720 .await?;
721
722 let resources = self.build_merged_resources(built_docker_image)?;
723 Ok(DevContainerBuildResources::Docker(resources))
724 }
725 DevContainerBuildType::DockerCompose => {
726 log::debug!("Using docker compose. Building extended compose files");
727 let docker_compose_resources = self.build_and_extend_compose_files().await?;
728
729 return Ok(DevContainerBuildResources::DockerCompose(
730 docker_compose_resources,
731 ));
732 }
733 DevContainerBuildType::None => {
734 return Err(DevContainerError::DevContainerParseFailed);
735 }
736 }
737 }
738
739 async fn run_dev_container(
740 &self,
741 build_resources: DevContainerBuildResources,
742 ) -> Result<DevContainerUp, DevContainerError> {
743 let ConfigStatus::VariableParsed(_) = &self.config else {
744 log::error!(
745 "Variables have not been parsed; cannot proceed with running the dev container"
746 );
747 return Err(DevContainerError::DevContainerParseFailed);
748 };
749 let running_container = match build_resources {
750 DevContainerBuildResources::DockerCompose(resources) => {
751 self.run_docker_compose(resources).await?
752 }
753 DevContainerBuildResources::Docker(resources) => {
754 self.run_docker_image(resources).await?
755 }
756 };
757
758 let remote_user = get_remote_user_from_config(&running_container, self)?;
759 let remote_workspace_folder = get_remote_dir_from_config(
760 &running_container,
761 (&self.local_project_directory.display()).to_string(),
762 )?;
763
764 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
765
766 Ok(DevContainerUp {
767 container_id: running_container.id,
768 remote_user,
769 remote_workspace_folder,
770 extension_ids: self.extension_ids(),
771 remote_env,
772 })
773 }
774
775 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
776 let dev_container = match &self.config {
777 ConfigStatus::Deserialized(_) => {
778 log::error!(
779 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
780 );
781 return Err(DevContainerError::DevContainerParseFailed);
782 }
783 ConfigStatus::VariableParsed(dev_container) => dev_container,
784 };
785 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
786 return Err(DevContainerError::DevContainerParseFailed);
787 };
788 let docker_compose_full_paths = docker_compose_files
789 .iter()
790 .map(|relative| self.config_directory.join(relative))
791 .collect::<Vec<PathBuf>>();
792
793 let Some(config) = self
794 .docker_client
795 .get_docker_compose_config(&docker_compose_full_paths)
796 .await?
797 else {
798 log::error!("Output could not deserialize into DockerComposeConfig");
799 return Err(DevContainerError::DevContainerParseFailed);
800 };
801 Ok(DockerComposeResources {
802 files: docker_compose_full_paths,
803 config,
804 })
805 }
806
807 async fn build_and_extend_compose_files(
808 &self,
809 ) -> Result<DockerComposeResources, DevContainerError> {
810 let dev_container = match &self.config {
811 ConfigStatus::Deserialized(_) => {
812 log::error!(
813 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
814 );
815 return Err(DevContainerError::DevContainerParseFailed);
816 }
817 ConfigStatus::VariableParsed(dev_container) => dev_container,
818 };
819
820 let Some(features_build_info) = &self.features_build_info else {
821 log::error!(
822 "Cannot build and extend compose files: features build info is not yet constructed"
823 );
824 return Err(DevContainerError::DevContainerParseFailed);
825 };
826 let mut docker_compose_resources = self.docker_compose_manifest().await?;
827 let supports_buildkit = self.docker_client.supports_compose_buildkit();
828
829 let (main_service_name, main_service) =
830 find_primary_service(&docker_compose_resources, self)?;
831 let (built_service_image, built_service_image_tag) = if main_service
832 .build
833 .as_ref()
834 .map(|b| b.dockerfile.as_ref())
835 .is_some()
836 {
837 if !supports_buildkit {
838 self.build_feature_content_image().await?;
839 }
840
841 let dockerfile_path = &features_build_info.dockerfile_path;
842
843 let build_args = if !supports_buildkit {
844 HashMap::from([
845 (
846 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
847 "dev_container_auto_added_stage_label".to_string(),
848 ),
849 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
850 ])
851 } else {
852 HashMap::from([
853 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
854 (
855 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
856 "dev_container_auto_added_stage_label".to_string(),
857 ),
858 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
859 ])
860 };
861
862 let additional_contexts = if !supports_buildkit {
863 None
864 } else {
865 Some(HashMap::from([(
866 "dev_containers_feature_content_source".to_string(),
867 features_build_info
868 .features_content_dir
869 .display()
870 .to_string(),
871 )]))
872 };
873
874 let build_override = DockerComposeConfig {
875 name: None,
876 services: HashMap::from([(
877 main_service_name.clone(),
878 DockerComposeService {
879 image: Some(features_build_info.image_tag.clone()),
880 entrypoint: None,
881 cap_add: None,
882 security_opt: None,
883 labels: None,
884 build: Some(DockerComposeServiceBuild {
885 context: Some(
886 main_service
887 .build
888 .as_ref()
889 .and_then(|b| b.context.clone())
890 .unwrap_or_else(|| {
891 features_build_info.empty_context_dir.display().to_string()
892 }),
893 ),
894 dockerfile: Some(dockerfile_path.display().to_string()),
895 args: Some(build_args),
896 additional_contexts,
897 }),
898 volumes: Vec::new(),
899 ..Default::default()
900 },
901 )]),
902 volumes: HashMap::new(),
903 };
904
905 let temp_base = std::env::temp_dir().join("devcontainer-zed");
906 let config_location = temp_base.join("docker_compose_build.json");
907
908 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
909 log::error!("Error serializing docker compose runtime override: {e}");
910 DevContainerError::DevContainerParseFailed
911 })?;
912
913 self.fs
914 .write(&config_location, config_json.as_bytes())
915 .await
916 .map_err(|e| {
917 log::error!("Error writing the runtime override file: {e}");
918 DevContainerError::FilesystemError
919 })?;
920
921 docker_compose_resources.files.push(config_location);
922
923 self.docker_client
924 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
925 .await?;
926 (
927 self.docker_client
928 .inspect(&features_build_info.image_tag)
929 .await?,
930 &features_build_info.image_tag,
931 )
932 } else if let Some(image) = &main_service.image {
933 if dev_container
934 .features
935 .as_ref()
936 .is_none_or(|features| features.is_empty())
937 {
938 (self.docker_client.inspect(image).await?, image)
939 } else {
940 if !supports_buildkit {
941 self.build_feature_content_image().await?;
942 }
943
944 let dockerfile_path = &features_build_info.dockerfile_path;
945
946 let build_args = if !supports_buildkit {
947 HashMap::from([
948 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
949 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
950 ])
951 } else {
952 HashMap::from([
953 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
954 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
955 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
956 ])
957 };
958
959 let additional_contexts = if !supports_buildkit {
960 None
961 } else {
962 Some(HashMap::from([(
963 "dev_containers_feature_content_source".to_string(),
964 features_build_info
965 .features_content_dir
966 .display()
967 .to_string(),
968 )]))
969 };
970
971 let build_override = DockerComposeConfig {
972 name: None,
973 services: HashMap::from([(
974 main_service_name.clone(),
975 DockerComposeService {
976 image: Some(features_build_info.image_tag.clone()),
977 entrypoint: None,
978 cap_add: None,
979 security_opt: None,
980 labels: None,
981 build: Some(DockerComposeServiceBuild {
982 context: Some(
983 features_build_info.empty_context_dir.display().to_string(),
984 ),
985 dockerfile: Some(dockerfile_path.display().to_string()),
986 args: Some(build_args),
987 additional_contexts,
988 }),
989 volumes: Vec::new(),
990 ..Default::default()
991 },
992 )]),
993 volumes: HashMap::new(),
994 };
995
996 let temp_base = std::env::temp_dir().join("devcontainer-zed");
997 let config_location = temp_base.join("docker_compose_build.json");
998
999 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1000 log::error!("Error serializing docker compose runtime override: {e}");
1001 DevContainerError::DevContainerParseFailed
1002 })?;
1003
1004 self.fs
1005 .write(&config_location, config_json.as_bytes())
1006 .await
1007 .map_err(|e| {
1008 log::error!("Error writing the runtime override file: {e}");
1009 DevContainerError::FilesystemError
1010 })?;
1011
1012 docker_compose_resources.files.push(config_location);
1013
1014 self.docker_client
1015 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1016 .await?;
1017
1018 (
1019 self.docker_client
1020 .inspect(&features_build_info.image_tag)
1021 .await?,
1022 &features_build_info.image_tag,
1023 )
1024 }
1025 } else {
1026 log::error!("Docker compose must have either image or dockerfile defined");
1027 return Err(DevContainerError::DevContainerParseFailed);
1028 };
1029
1030 let built_service_image = self
1031 .update_remote_user_uid(built_service_image, built_service_image_tag)
1032 .await?;
1033
1034 let resources = self.build_merged_resources(built_service_image)?;
1035
1036 let network_mode = main_service.network_mode.as_ref();
1037 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1038 let runtime_override_file = self
1039 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1040 .await?;
1041
1042 docker_compose_resources.files.push(runtime_override_file);
1043
1044 Ok(docker_compose_resources)
1045 }
1046
1047 async fn write_runtime_override_file(
1048 &self,
1049 main_service_name: &str,
1050 network_mode_service: Option<&str>,
1051 resources: DockerBuildResources,
1052 ) -> Result<PathBuf, DevContainerError> {
1053 let config =
1054 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1055 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1056 let config_location = temp_base.join("docker_compose_runtime.json");
1057
1058 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1059 log::error!("Error serializing docker compose runtime override: {e}");
1060 DevContainerError::DevContainerParseFailed
1061 })?;
1062
1063 self.fs
1064 .write(&config_location, config_json.as_bytes())
1065 .await
1066 .map_err(|e| {
1067 log::error!("Error writing the runtime override file: {e}");
1068 DevContainerError::FilesystemError
1069 })?;
1070
1071 Ok(config_location)
1072 }
1073
1074 fn build_runtime_override(
1075 &self,
1076 main_service_name: &str,
1077 network_mode_service: Option<&str>,
1078 resources: DockerBuildResources,
1079 ) -> Result<DockerComposeConfig, DevContainerError> {
1080 let mut runtime_labels = HashMap::new();
1081
1082 if let Some(metadata) = &resources.image.config.labels.metadata {
1083 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1084 log::error!("Error serializing docker image metadata: {e}");
1085 DevContainerError::ContainerNotValid(resources.image.id.clone())
1086 })?;
1087
1088 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1089 }
1090
1091 for (k, v) in self.identifying_labels() {
1092 runtime_labels.insert(k.to_string(), v.to_string());
1093 }
1094
1095 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1096 .additional_mounts
1097 .iter()
1098 .filter_map(|mount| {
1099 if let Some(mount_type) = &mount.mount_type
1100 && mount_type.to_lowercase() == "volume"
1101 && let Some(source) = &mount.source
1102 {
1103 Some((
1104 source.clone(),
1105 DockerComposeVolume {
1106 name: source.clone(),
1107 },
1108 ))
1109 } else {
1110 None
1111 }
1112 })
1113 .collect();
1114
1115 let volumes: Vec<MountDefinition> = resources
1116 .additional_mounts
1117 .iter()
1118 .map(|v| MountDefinition {
1119 source: v.source.clone(),
1120 target: v.target.clone(),
1121 mount_type: v.mount_type.clone(),
1122 })
1123 .collect();
1124
1125 let mut main_service = DockerComposeService {
1126 entrypoint: Some(vec![
1127 "/bin/sh".to_string(),
1128 "-c".to_string(),
1129 resources.entrypoint_script,
1130 "-".to_string(),
1131 ]),
1132 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1133 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1134 labels: Some(runtime_labels),
1135 volumes,
1136 privileged: Some(resources.privileged),
1137 ..Default::default()
1138 };
1139 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1140 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1141 if let Some(forward_ports) = &self.dev_container().forward_ports {
1142 let main_service_ports: Vec<String> = forward_ports
1143 .iter()
1144 .filter_map(|f| match f {
1145 ForwardPort::Number(port) => Some(port.to_string()),
1146 ForwardPort::String(port) => {
1147 let parts: Vec<&str> = port.split(":").collect();
1148 if parts.len() <= 1 {
1149 Some(port.to_string())
1150 } else if parts.len() == 2 {
1151 if parts[0] == main_service_name {
1152 Some(parts[1].to_string())
1153 } else {
1154 None
1155 }
1156 } else {
1157 None
1158 }
1159 }
1160 })
1161 .collect();
1162 for port in main_service_ports {
1163 // If the main service uses a different service's network bridge, append to that service's ports instead
1164 if let Some(network_service_name) = network_mode_service {
1165 if let Some(service) = service_declarations.get_mut(network_service_name) {
1166 service.ports.push(DockerComposeServicePort {
1167 target: port.clone(),
1168 published: port.clone(),
1169 ..Default::default()
1170 });
1171 } else {
1172 service_declarations.insert(
1173 network_service_name.to_string(),
1174 DockerComposeService {
1175 ports: vec![DockerComposeServicePort {
1176 target: port.clone(),
1177 published: port.clone(),
1178 ..Default::default()
1179 }],
1180 ..Default::default()
1181 },
1182 );
1183 }
1184 } else {
1185 main_service.ports.push(DockerComposeServicePort {
1186 target: port.clone(),
1187 published: port.clone(),
1188 ..Default::default()
1189 });
1190 }
1191 }
1192 let other_service_ports: Vec<(&str, &str)> = forward_ports
1193 .iter()
1194 .filter_map(|f| match f {
1195 ForwardPort::Number(_) => None,
1196 ForwardPort::String(port) => {
1197 let parts: Vec<&str> = port.split(":").collect();
1198 if parts.len() != 2 {
1199 None
1200 } else {
1201 if parts[0] == main_service_name {
1202 None
1203 } else {
1204 Some((parts[0], parts[1]))
1205 }
1206 }
1207 }
1208 })
1209 .collect();
1210 for (service_name, port) in other_service_ports {
1211 if let Some(service) = service_declarations.get_mut(service_name) {
1212 service.ports.push(DockerComposeServicePort {
1213 target: port.to_string(),
1214 published: port.to_string(),
1215 ..Default::default()
1216 });
1217 } else {
1218 service_declarations.insert(
1219 service_name.to_string(),
1220 DockerComposeService {
1221 ports: vec![DockerComposeServicePort {
1222 target: port.to_string(),
1223 published: port.to_string(),
1224 ..Default::default()
1225 }],
1226 ..Default::default()
1227 },
1228 );
1229 }
1230 }
1231 }
1232 if let Some(port) = &self.dev_container().app_port {
1233 if let Some(network_service_name) = network_mode_service {
1234 if let Some(service) = service_declarations.get_mut(network_service_name) {
1235 service.ports.push(DockerComposeServicePort {
1236 target: port.clone(),
1237 published: port.clone(),
1238 ..Default::default()
1239 });
1240 } else {
1241 service_declarations.insert(
1242 network_service_name.to_string(),
1243 DockerComposeService {
1244 ports: vec![DockerComposeServicePort {
1245 target: port.clone(),
1246 published: port.clone(),
1247 ..Default::default()
1248 }],
1249 ..Default::default()
1250 },
1251 );
1252 }
1253 } else {
1254 main_service.ports.push(DockerComposeServicePort {
1255 target: port.clone(),
1256 published: port.clone(),
1257 ..Default::default()
1258 });
1259 }
1260 }
1261
1262 service_declarations.insert(main_service_name.to_string(), main_service);
1263 let new_docker_compose_config = DockerComposeConfig {
1264 name: None,
1265 services: service_declarations,
1266 volumes: config_volumes,
1267 };
1268
1269 Ok(new_docker_compose_config)
1270 }
1271
1272 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1273 let dev_container = match &self.config {
1274 ConfigStatus::Deserialized(_) => {
1275 log::error!(
1276 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1277 );
1278 return Err(DevContainerError::DevContainerParseFailed);
1279 }
1280 ConfigStatus::VariableParsed(dev_container) => dev_container,
1281 };
1282
1283 match dev_container.build_type() {
1284 DevContainerBuildType::Image => {
1285 let Some(image_tag) = &dev_container.image else {
1286 return Err(DevContainerError::DevContainerParseFailed);
1287 };
1288 let base_image = self.docker_client.inspect(image_tag).await?;
1289 if dev_container
1290 .features
1291 .as_ref()
1292 .is_none_or(|features| features.is_empty())
1293 {
1294 log::debug!("No features to add. Using base image");
1295 return Ok(base_image);
1296 }
1297 }
1298 DevContainerBuildType::Dockerfile => {}
1299 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1300 return Err(DevContainerError::DevContainerParseFailed);
1301 }
1302 };
1303
1304 let mut command = self.create_docker_build()?;
1305
1306 let output = self
1307 .command_runner
1308 .run_command(&mut command)
1309 .await
1310 .map_err(|e| {
1311 log::error!("Error building docker image: {e}");
1312 DevContainerError::CommandFailed(command.get_program().display().to_string())
1313 })?;
1314
1315 if !output.status.success() {
1316 let stderr = String::from_utf8_lossy(&output.stderr);
1317 log::error!("docker buildx build failed: {stderr}");
1318 return Err(DevContainerError::CommandFailed(
1319 command.get_program().display().to_string(),
1320 ));
1321 }
1322
1323 // After a successful build, inspect the newly tagged image to get its metadata
1324 let Some(features_build_info) = &self.features_build_info else {
1325 log::error!("Features build info expected, but not created");
1326 return Err(DevContainerError::DevContainerParseFailed);
1327 };
1328 let image = self
1329 .docker_client
1330 .inspect(&features_build_info.image_tag)
1331 .await?;
1332
1333 Ok(image)
1334 }
1335
1336 #[cfg(target_os = "windows")]
1337 async fn update_remote_user_uid(
1338 &self,
1339 image: DockerInspect,
1340 _base_image: &str,
1341 ) -> Result<DockerInspect, DevContainerError> {
1342 Ok(image)
1343 }
1344 #[cfg(not(target_os = "windows"))]
1345 async fn update_remote_user_uid(
1346 &self,
1347 image: DockerInspect,
1348 base_image: &str,
1349 ) -> Result<DockerInspect, DevContainerError> {
1350 let dev_container = self.dev_container();
1351
1352 let Some(features_build_info) = &self.features_build_info else {
1353 return Ok(image);
1354 };
1355
1356 // updateRemoteUserUID defaults to true per the devcontainers spec
1357 if dev_container.update_remote_user_uid == Some(false) {
1358 return Ok(image);
1359 }
1360
1361 let remote_user = get_remote_user_from_config(&image, self)?;
1362 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1363 return Ok(image);
1364 }
1365
1366 let image_user = image
1367 .config
1368 .image_user
1369 .as_deref()
1370 .unwrap_or("root")
1371 .to_string();
1372
1373 let host_uid = Command::new("id")
1374 .arg("-u")
1375 .output()
1376 .await
1377 .map_err(|e| {
1378 log::error!("Failed to get host UID: {e}");
1379 DevContainerError::CommandFailed("id -u".to_string())
1380 })
1381 .and_then(|output| {
1382 String::from_utf8_lossy(&output.stdout)
1383 .trim()
1384 .parse::<u32>()
1385 .map_err(|e| {
1386 log::error!("Failed to parse host UID: {e}");
1387 DevContainerError::CommandFailed("id -u".to_string())
1388 })
1389 })?;
1390
1391 let host_gid = Command::new("id")
1392 .arg("-g")
1393 .output()
1394 .await
1395 .map_err(|e| {
1396 log::error!("Failed to get host GID: {e}");
1397 DevContainerError::CommandFailed("id -g".to_string())
1398 })
1399 .and_then(|output| {
1400 String::from_utf8_lossy(&output.stdout)
1401 .trim()
1402 .parse::<u32>()
1403 .map_err(|e| {
1404 log::error!("Failed to parse host GID: {e}");
1405 DevContainerError::CommandFailed("id -g".to_string())
1406 })
1407 })?;
1408
1409 let dockerfile_content = self.generate_update_uid_dockerfile();
1410
1411 let dockerfile_path = features_build_info
1412 .features_content_dir
1413 .join("updateUID.Dockerfile");
1414 self.fs
1415 .write(&dockerfile_path, dockerfile_content.as_bytes())
1416 .await
1417 .map_err(|e| {
1418 log::error!("Failed to write updateUID Dockerfile: {e}");
1419 DevContainerError::FilesystemError
1420 })?;
1421
1422 let updated_image_tag = format!("{}-uid", features_build_info.image_tag);
1423
1424 let mut command = Command::new(self.docker_client.docker_cli());
1425 command.args(["build"]);
1426 command.args(["-f", &dockerfile_path.display().to_string()]);
1427 command.args(["-t", &updated_image_tag]);
1428 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1429 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1430 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1431 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1432 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1433 command.arg(features_build_info.empty_context_dir.display().to_string());
1434
1435 let output = self
1436 .command_runner
1437 .run_command(&mut command)
1438 .await
1439 .map_err(|e| {
1440 log::error!("Error building UID update image: {e}");
1441 DevContainerError::CommandFailed(command.get_program().display().to_string())
1442 })?;
1443
1444 if !output.status.success() {
1445 let stderr = String::from_utf8_lossy(&output.stderr);
1446 log::error!("UID update build failed: {stderr}");
1447 return Err(DevContainerError::CommandFailed(
1448 command.get_program().display().to_string(),
1449 ));
1450 }
1451
1452 self.docker_client.inspect(&updated_image_tag).await
1453 }
1454
1455 #[cfg(not(target_os = "windows"))]
1456 fn generate_update_uid_dockerfile(&self) -> String {
1457 let mut dockerfile = r#"ARG BASE_IMAGE
1458FROM $BASE_IMAGE
1459
1460USER root
1461
1462ARG REMOTE_USER
1463ARG NEW_UID
1464ARG NEW_GID
1465SHELL ["/bin/sh", "-c"]
1466RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1467 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1468 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1469 if [ -z "$OLD_UID" ]; then \
1470 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1471 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1472 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1473 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1474 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1475 else \
1476 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1477 FREE_GID=65532; \
1478 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1479 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1480 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1481 fi; \
1482 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1483 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1484 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1485 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1486 fi; \
1487 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1488 fi;
1489
1490ARG IMAGE_USER
1491USER $IMAGE_USER
1492
1493# Ensure that /etc/profile does not clobber the existing path
1494RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1495"#.to_string();
1496 for feature in &self.features {
1497 let container_env_layer = feature.generate_dockerfile_env();
1498 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1499 }
1500
1501 if let Some(env) = &self.dev_container().container_env {
1502 for (key, value) in env {
1503 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1504 }
1505 }
1506 dockerfile
1507 }
1508
1509 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1510 let Some(features_build_info) = &self.features_build_info else {
1511 log::error!("Features build info not available for building feature content image");
1512 return Err(DevContainerError::DevContainerParseFailed);
1513 };
1514 let features_content_dir = &features_build_info.features_content_dir;
1515
1516 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1517 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1518
1519 self.fs
1520 .write(&dockerfile_path, dockerfile_content.as_bytes())
1521 .await
1522 .map_err(|e| {
1523 log::error!("Failed to write feature content Dockerfile: {e}");
1524 DevContainerError::FilesystemError
1525 })?;
1526
1527 let mut command = Command::new(self.docker_client.docker_cli());
1528 command.args([
1529 "build",
1530 "-t",
1531 "dev_container_feature_content_temp",
1532 "-f",
1533 &dockerfile_path.display().to_string(),
1534 &features_content_dir.display().to_string(),
1535 ]);
1536
1537 let output = self
1538 .command_runner
1539 .run_command(&mut command)
1540 .await
1541 .map_err(|e| {
1542 log::error!("Error building feature content image: {e}");
1543 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1544 })?;
1545
1546 if !output.status.success() {
1547 let stderr = String::from_utf8_lossy(&output.stderr);
1548 log::error!("Feature content image build failed: {stderr}");
1549 return Err(DevContainerError::CommandFailed(
1550 self.docker_client.docker_cli(),
1551 ));
1552 }
1553
1554 Ok(())
1555 }
1556
1557 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1558 let dev_container = match &self.config {
1559 ConfigStatus::Deserialized(_) => {
1560 log::error!(
1561 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1562 );
1563 return Err(DevContainerError::DevContainerParseFailed);
1564 }
1565 ConfigStatus::VariableParsed(dev_container) => dev_container,
1566 };
1567
1568 let Some(features_build_info) = &self.features_build_info else {
1569 log::error!(
1570 "Cannot create docker build command; features build info has not been constructed"
1571 );
1572 return Err(DevContainerError::DevContainerParseFailed);
1573 };
1574 let mut command = Command::new(self.docker_client.docker_cli());
1575
1576 command.args(["buildx", "build"]);
1577
1578 // --load is short for --output=docker, loading the built image into the local docker images
1579 command.arg("--load");
1580
1581 // BuildKit build context: provides the features content directory as a named context
1582 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1583 command.args([
1584 "--build-context",
1585 &format!(
1586 "dev_containers_feature_content_source={}",
1587 features_build_info.features_content_dir.display()
1588 ),
1589 ]);
1590
1591 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1592 if let Some(build_image) = &features_build_info.build_image {
1593 command.args([
1594 "--build-arg",
1595 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1596 ]);
1597 } else {
1598 command.args([
1599 "--build-arg",
1600 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1601 ]);
1602 }
1603
1604 command.args([
1605 "--build-arg",
1606 &format!(
1607 "_DEV_CONTAINERS_IMAGE_USER={}",
1608 self.root_image
1609 .as_ref()
1610 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1611 .unwrap_or(&"root".to_string())
1612 ),
1613 ]);
1614
1615 command.args([
1616 "--build-arg",
1617 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1618 ]);
1619
1620 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1621 for (key, value) in args {
1622 command.args(["--build-arg", &format!("{}={}", key, value)]);
1623 }
1624 }
1625
1626 command.args(["--target", "dev_containers_target_stage"]);
1627
1628 command.args([
1629 "-f",
1630 &features_build_info.dockerfile_path.display().to_string(),
1631 ]);
1632
1633 command.args(["-t", &features_build_info.image_tag]);
1634
1635 if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1636 command.arg(self.config_directory.display().to_string());
1637 } else {
1638 // Use an empty folder as the build context to avoid pulling in unneeded files.
1639 // The actual feature content is supplied via the BuildKit build context above.
1640 command.arg(features_build_info.empty_context_dir.display().to_string());
1641 }
1642
1643 Ok(command)
1644 }
1645
1646 async fn run_docker_compose(
1647 &self,
1648 resources: DockerComposeResources,
1649 ) -> Result<DockerInspect, DevContainerError> {
1650 let mut command = Command::new(self.docker_client.docker_cli());
1651 command.args(&["compose", "--project-name", &self.project_name()]);
1652 for docker_compose_file in resources.files {
1653 command.args(&["-f", &docker_compose_file.display().to_string()]);
1654 }
1655 command.args(&["up", "-d"]);
1656
1657 let output = self
1658 .command_runner
1659 .run_command(&mut command)
1660 .await
1661 .map_err(|e| {
1662 log::error!("Error running docker compose up: {e}");
1663 DevContainerError::CommandFailed(command.get_program().display().to_string())
1664 })?;
1665
1666 if !output.status.success() {
1667 let stderr = String::from_utf8_lossy(&output.stderr);
1668 log::error!("Non-success status from docker compose up: {}", stderr);
1669 return Err(DevContainerError::CommandFailed(
1670 command.get_program().display().to_string(),
1671 ));
1672 }
1673
1674 if let Some(docker_ps) = self.check_for_existing_container().await? {
1675 log::debug!("Found newly created dev container");
1676 return self.docker_client.inspect(&docker_ps.id).await;
1677 }
1678
1679 log::error!("Could not find existing container after docker compose up");
1680
1681 Err(DevContainerError::DevContainerParseFailed)
1682 }
1683
1684 async fn run_docker_image(
1685 &self,
1686 build_resources: DockerBuildResources,
1687 ) -> Result<DockerInspect, DevContainerError> {
1688 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1689
1690 let output = self
1691 .command_runner
1692 .run_command(&mut docker_run_command)
1693 .await
1694 .map_err(|e| {
1695 log::error!("Error running docker run: {e}");
1696 DevContainerError::CommandFailed(
1697 docker_run_command.get_program().display().to_string(),
1698 )
1699 })?;
1700
1701 if !output.status.success() {
1702 let std_err = String::from_utf8_lossy(&output.stderr);
1703 log::error!("Non-success status from docker run. StdErr: {std_err}");
1704 return Err(DevContainerError::CommandFailed(
1705 docker_run_command.get_program().display().to_string(),
1706 ));
1707 }
1708
1709 log::debug!("Checking for container that was started");
1710 let Some(docker_ps) = self.check_for_existing_container().await? else {
1711 log::error!("Could not locate container just created");
1712 return Err(DevContainerError::DevContainerParseFailed);
1713 };
1714 self.docker_client.inspect(&docker_ps.id).await
1715 }
1716
1717 fn local_workspace_folder(&self) -> String {
1718 self.local_project_directory.display().to_string()
1719 }
1720 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1721 self.local_project_directory
1722 .file_name()
1723 .map(|f| f.display().to_string())
1724 .ok_or(DevContainerError::DevContainerParseFailed)
1725 }
1726
1727 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1728 self.dev_container()
1729 .workspace_folder
1730 .as_ref()
1731 .map(|folder| PathBuf::from(folder))
1732 .or(Some(
1733 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1734 ))
1735 .ok_or(DevContainerError::DevContainerParseFailed)
1736 }
1737 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1738 self.remote_workspace_folder().and_then(|f| {
1739 f.file_name()
1740 .map(|file_name| file_name.display().to_string())
1741 .ok_or(DevContainerError::DevContainerParseFailed)
1742 })
1743 }
1744
1745 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1746 if let Some(mount) = &self.dev_container().workspace_mount {
1747 return Ok(mount.clone());
1748 }
1749 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1750 return Err(DevContainerError::DevContainerParseFailed);
1751 };
1752
1753 Ok(MountDefinition {
1754 source: Some(self.local_workspace_folder()),
1755 target: format!("/workspaces/{}", project_directory_name.display()),
1756 mount_type: None,
1757 })
1758 }
1759
1760 fn create_docker_run_command(
1761 &self,
1762 build_resources: DockerBuildResources,
1763 ) -> Result<Command, DevContainerError> {
1764 let remote_workspace_mount = self.remote_workspace_mount()?;
1765
1766 let docker_cli = self.docker_client.docker_cli();
1767 let mut command = Command::new(&docker_cli);
1768
1769 command.arg("run");
1770
1771 if build_resources.privileged {
1772 command.arg("--privileged");
1773 }
1774
1775 if &docker_cli == "podman" {
1776 command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1777 }
1778
1779 command.arg("--sig-proxy=false");
1780 command.arg("-d");
1781 command.arg("--mount");
1782 command.arg(remote_workspace_mount.to_string());
1783
1784 for mount in &build_resources.additional_mounts {
1785 command.arg("--mount");
1786 command.arg(mount.to_string());
1787 }
1788
1789 for (key, val) in self.identifying_labels() {
1790 command.arg("-l");
1791 command.arg(format!("{}={}", key, val));
1792 }
1793
1794 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1795 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1796 log::error!("Problem serializing image metadata: {e}");
1797 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1798 })?;
1799 command.arg("-l");
1800 command.arg(format!(
1801 "{}={}",
1802 "devcontainer.metadata", serialized_metadata
1803 ));
1804 }
1805
1806 if let Some(forward_ports) = &self.dev_container().forward_ports {
1807 for port in forward_ports {
1808 if let ForwardPort::Number(port_number) = port {
1809 command.arg("-p");
1810 command.arg(format!("{port_number}:{port_number}"));
1811 }
1812 }
1813 }
1814 if let Some(app_port) = &self.dev_container().app_port {
1815 command.arg("-p");
1816 command.arg(format!("{app_port}:{app_port}"));
1817 }
1818
1819 command.arg("--entrypoint");
1820 command.arg("/bin/sh");
1821 command.arg(&build_resources.image.id);
1822 command.arg("-c");
1823
1824 command.arg(build_resources.entrypoint_script);
1825 command.arg("-");
1826
1827 Ok(command)
1828 }
1829
1830 fn extension_ids(&self) -> Vec<String> {
1831 self.dev_container()
1832 .customizations
1833 .as_ref()
1834 .map(|c| c.zed.extensions.clone())
1835 .unwrap_or_default()
1836 }
1837
1838 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1839 self.run_initialize_commands().await?;
1840
1841 self.download_feature_and_dockerfile_resources().await?;
1842
1843 let build_resources = self.build_resources().await?;
1844
1845 let devcontainer_up = self.run_dev_container(build_resources).await?;
1846
1847 self.run_remote_scripts(&devcontainer_up, true).await?;
1848
1849 Ok(devcontainer_up)
1850 }
1851
1852 async fn run_remote_scripts(
1853 &self,
1854 devcontainer_up: &DevContainerUp,
1855 new_container: bool,
1856 ) -> Result<(), DevContainerError> {
1857 let ConfigStatus::VariableParsed(config) = &self.config else {
1858 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1859 return Err(DevContainerError::DevContainerScriptsFailed);
1860 };
1861 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1862
1863 if new_container {
1864 if let Some(on_create_command) = &config.on_create_command {
1865 for (command_name, command) in on_create_command.script_commands() {
1866 log::debug!("Running on create command {command_name}");
1867 self.docker_client
1868 .run_docker_exec(
1869 &devcontainer_up.container_id,
1870 &remote_folder,
1871 "root",
1872 &devcontainer_up.remote_env,
1873 command,
1874 )
1875 .await?;
1876 }
1877 }
1878 if let Some(update_content_command) = &config.update_content_command {
1879 for (command_name, command) in update_content_command.script_commands() {
1880 log::debug!("Running update content command {command_name}");
1881 self.docker_client
1882 .run_docker_exec(
1883 &devcontainer_up.container_id,
1884 &remote_folder,
1885 "root",
1886 &devcontainer_up.remote_env,
1887 command,
1888 )
1889 .await?;
1890 }
1891 }
1892
1893 if let Some(post_create_command) = &config.post_create_command {
1894 for (command_name, command) in post_create_command.script_commands() {
1895 log::debug!("Running post create command {command_name}");
1896 self.docker_client
1897 .run_docker_exec(
1898 &devcontainer_up.container_id,
1899 &remote_folder,
1900 &devcontainer_up.remote_user,
1901 &devcontainer_up.remote_env,
1902 command,
1903 )
1904 .await?;
1905 }
1906 }
1907 if let Some(post_start_command) = &config.post_start_command {
1908 for (command_name, command) in post_start_command.script_commands() {
1909 log::debug!("Running post start command {command_name}");
1910 self.docker_client
1911 .run_docker_exec(
1912 &devcontainer_up.container_id,
1913 &remote_folder,
1914 &devcontainer_up.remote_user,
1915 &devcontainer_up.remote_env,
1916 command,
1917 )
1918 .await?;
1919 }
1920 }
1921 }
1922 if let Some(post_attach_command) = &config.post_attach_command {
1923 for (command_name, command) in post_attach_command.script_commands() {
1924 log::debug!("Running post attach command {command_name}");
1925 self.docker_client
1926 .run_docker_exec(
1927 &devcontainer_up.container_id,
1928 &remote_folder,
1929 &devcontainer_up.remote_user,
1930 &devcontainer_up.remote_env,
1931 command,
1932 )
1933 .await?;
1934 }
1935 }
1936
1937 Ok(())
1938 }
1939
1940 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1941 let ConfigStatus::VariableParsed(config) = &self.config else {
1942 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1943 return Err(DevContainerError::DevContainerParseFailed);
1944 };
1945
1946 if let Some(initialize_command) = &config.initialize_command {
1947 log::debug!("Running initialize command");
1948 initialize_command
1949 .run(&self.command_runner, &self.local_project_directory)
1950 .await
1951 } else {
1952 log::warn!("No initialize command found");
1953 Ok(())
1954 }
1955 }
1956
1957 async fn check_for_existing_devcontainer(
1958 &self,
1959 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1960 if let Some(docker_ps) = self.check_for_existing_container().await? {
1961 log::debug!("Dev container already found. Proceeding with it");
1962
1963 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1964
1965 if !docker_inspect.is_running() {
1966 log::debug!("Container not running. Will attempt to start, and then proceed");
1967 self.docker_client.start_container(&docker_ps.id).await?;
1968 }
1969
1970 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1971
1972 let remote_folder = get_remote_dir_from_config(
1973 &docker_inspect,
1974 (&self.local_project_directory.display()).to_string(),
1975 )?;
1976
1977 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1978
1979 let dev_container_up = DevContainerUp {
1980 container_id: docker_ps.id,
1981 remote_user: remote_user,
1982 remote_workspace_folder: remote_folder,
1983 extension_ids: self.extension_ids(),
1984 remote_env,
1985 };
1986
1987 self.run_remote_scripts(&dev_container_up, false).await?;
1988
1989 Ok(Some(dev_container_up))
1990 } else {
1991 log::debug!("Existing container not found.");
1992
1993 Ok(None)
1994 }
1995 }
1996
1997 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1998 self.docker_client
1999 .find_process_by_filters(
2000 self.identifying_labels()
2001 .iter()
2002 .map(|(k, v)| format!("label={k}={v}"))
2003 .collect(),
2004 )
2005 .await
2006 }
2007
2008 fn project_name(&self) -> String {
2009 if let Some(name) = &self.dev_container().name {
2010 safe_id_lower(name)
2011 } else {
2012 let alternate_name = &self
2013 .local_workspace_base_name()
2014 .unwrap_or(self.local_workspace_folder());
2015 safe_id_lower(alternate_name)
2016 }
2017 }
2018}
2019
2020/// Holds all the information needed to construct a `docker buildx build` command
2021/// that extends a base image with dev container features.
2022///
2023/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2024/// (cli/src/spec-node/containerFeatures.ts).
2025#[derive(Debug, Eq, PartialEq)]
2026pub(crate) struct FeaturesBuildInfo {
2027 /// Path to the generated Dockerfile.extended
2028 pub dockerfile_path: PathBuf,
2029 /// Path to the features content directory (used as a BuildKit build context)
2030 pub features_content_dir: PathBuf,
2031 /// Path to an empty directory used as the Docker build context
2032 pub empty_context_dir: PathBuf,
2033 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2034 pub build_image: Option<String>,
2035 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2036 pub image_tag: String,
2037}
2038
2039pub(crate) async fn read_devcontainer_configuration(
2040 config: DevContainerConfig,
2041 context: &DevContainerContext,
2042 environment: HashMap<String, String>,
2043) -> Result<DevContainer, DevContainerError> {
2044 let docker = if context.use_podman {
2045 Docker::new("podman")
2046 } else {
2047 Docker::new("docker")
2048 };
2049 let mut dev_container = DevContainerManifest::new(
2050 context,
2051 environment,
2052 Arc::new(docker),
2053 Arc::new(DefaultCommandRunner::new()),
2054 config,
2055 &context.project_directory.as_ref(),
2056 )
2057 .await?;
2058 dev_container.parse_nonremote_vars()?;
2059 Ok(dev_container.dev_container().clone())
2060}
2061
2062pub(crate) async fn spawn_dev_container(
2063 context: &DevContainerContext,
2064 environment: HashMap<String, String>,
2065 config: DevContainerConfig,
2066 local_project_path: &Path,
2067) -> Result<DevContainerUp, DevContainerError> {
2068 let docker = if context.use_podman {
2069 Docker::new("podman")
2070 } else {
2071 Docker::new("docker")
2072 };
2073 let mut devcontainer_manifest = DevContainerManifest::new(
2074 context,
2075 environment,
2076 Arc::new(docker),
2077 Arc::new(DefaultCommandRunner::new()),
2078 config,
2079 local_project_path,
2080 )
2081 .await?;
2082
2083 devcontainer_manifest.parse_nonremote_vars()?;
2084
2085 log::debug!("Checking for existing container");
2086 if let Some(devcontainer) = devcontainer_manifest
2087 .check_for_existing_devcontainer()
2088 .await?
2089 {
2090 Ok(devcontainer)
2091 } else {
2092 log::debug!("Existing container not found. Building");
2093
2094 devcontainer_manifest.build_and_run().await
2095 }
2096}
2097
2098#[derive(Debug)]
2099struct DockerBuildResources {
2100 image: DockerInspect,
2101 additional_mounts: Vec<MountDefinition>,
2102 privileged: bool,
2103 entrypoint_script: String,
2104}
2105
2106#[derive(Debug)]
2107enum DevContainerBuildResources {
2108 DockerCompose(DockerComposeResources),
2109 Docker(DockerBuildResources),
2110}
2111
2112fn find_primary_service(
2113 docker_compose: &DockerComposeResources,
2114 devcontainer: &DevContainerManifest,
2115) -> Result<(String, DockerComposeService), DevContainerError> {
2116 let Some(service_name) = &devcontainer.dev_container().service else {
2117 return Err(DevContainerError::DevContainerParseFailed);
2118 };
2119
2120 match docker_compose.config.services.get(service_name) {
2121 Some(service) => Ok((service_name.clone(), service.clone())),
2122 None => Err(DevContainerError::DevContainerParseFailed),
2123 }
2124}
2125
2126/// Destination folder inside the container where feature content is staged during build.
2127/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2128const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2129
2130/// Escapes regex special characters in a string.
2131fn escape_regex_chars(input: &str) -> String {
2132 let mut result = String::with_capacity(input.len() * 2);
2133 for c in input.chars() {
2134 if ".*+?^${}()|[]\\".contains(c) {
2135 result.push('\\');
2136 }
2137 result.push(c);
2138 }
2139 result
2140}
2141
2142/// Extracts the short feature ID from a full feature reference string.
2143///
2144/// Examples:
2145/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2146/// - `ghcr.io/user/repo/go` → `go`
2147/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2148/// - `./myFeature` → `myFeature`
2149fn extract_feature_id(feature_ref: &str) -> &str {
2150 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2151 &feature_ref[..at_idx]
2152 } else {
2153 let last_slash = feature_ref.rfind('/');
2154 let last_colon = feature_ref.rfind(':');
2155 match (last_slash, last_colon) {
2156 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2157 _ => feature_ref,
2158 }
2159 };
2160 match without_version.rfind('/') {
2161 Some(idx) => &without_version[idx + 1..],
2162 None => without_version,
2163 }
2164}
2165
2166/// Generates a shell command that looks up a user's passwd entry.
2167///
2168/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2169/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2170fn get_ent_passwd_shell_command(user: &str) -> String {
2171 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2172 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2173 format!(
2174 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2175 shell = escaped_for_shell,
2176 re = escaped_for_regex,
2177 )
2178}
2179
2180/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2181///
2182/// Features listed in the override come first (in the specified order), followed
2183/// by any remaining features sorted lexicographically by their full reference ID.
2184fn resolve_feature_order<'a>(
2185 features: &'a HashMap<String, FeatureOptions>,
2186 override_order: &Option<Vec<String>>,
2187) -> Vec<(&'a String, &'a FeatureOptions)> {
2188 if let Some(order) = override_order {
2189 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2190 for ordered_id in order {
2191 if let Some((key, options)) = features.get_key_value(ordered_id) {
2192 ordered.push((key, options));
2193 }
2194 }
2195 let mut remaining: Vec<_> = features
2196 .iter()
2197 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2198 .collect();
2199 remaining.sort_by_key(|(id, _)| id.as_str());
2200 ordered.extend(remaining);
2201 ordered
2202 } else {
2203 let mut entries: Vec<_> = features.iter().collect();
2204 entries.sort_by_key(|(id, _)| id.as_str());
2205 entries
2206 }
2207}
2208
2209/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2210///
2211/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2212/// `containerFeaturesConfiguration.ts`.
2213fn generate_install_wrapper(
2214 feature_ref: &str,
2215 feature_id: &str,
2216 env_variables: &str,
2217) -> Result<String, DevContainerError> {
2218 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2219 log::error!("Error escaping feature ref {feature_ref}: {e}");
2220 DevContainerError::DevContainerParseFailed
2221 })?;
2222 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2223 log::error!("Error escaping feature {feature_id}: {e}");
2224 DevContainerError::DevContainerParseFailed
2225 })?;
2226 let options_indented: String = env_variables
2227 .lines()
2228 .filter(|l| !l.is_empty())
2229 .map(|l| format!(" {}", l))
2230 .collect::<Vec<_>>()
2231 .join("\n");
2232 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2233 log::error!("Error escaping options {options_indented}: {e}");
2234 DevContainerError::DevContainerParseFailed
2235 })?;
2236
2237 let script = format!(
2238 r#"#!/bin/sh
2239set -e
2240
2241on_exit () {{
2242 [ $? -eq 0 ] && exit
2243 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2244}}
2245
2246trap on_exit EXIT
2247
2248echo ===========================================================================
2249echo 'Feature : {escaped_name}'
2250echo 'Id : {escaped_id}'
2251echo 'Options :'
2252echo {escaped_options}
2253echo ===========================================================================
2254
2255set -a
2256. ../devcontainer-features.builtin.env
2257. ./devcontainer-features.env
2258set +a
2259
2260chmod +x ./install.sh
2261./install.sh
2262"#
2263 );
2264
2265 Ok(script)
2266}
2267
2268// Dockerfile actions need to be moved to their own file
2269fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2270 dockerfile_content
2271 .lines()
2272 .find(|line| line.starts_with("FROM"))
2273 .and_then(|line| {
2274 let words: Vec<&str> = line.split(" ").collect();
2275 if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2276 return Some(words[words.len() - 1].to_string());
2277 } else {
2278 return None;
2279 }
2280 })
2281}
2282
2283fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2284 if dockerfile_alias(dockerfile_content).is_some() {
2285 dockerfile_content.to_string()
2286 } else {
2287 dockerfile_content
2288 .lines()
2289 .map(|line| {
2290 if line.starts_with("FROM") {
2291 format!("{} AS {}", line, alias)
2292 } else {
2293 line.to_string()
2294 }
2295 })
2296 .collect::<Vec<String>>()
2297 .join("\n")
2298 }
2299}
2300
2301fn image_from_dockerfile(
2302 devcontainer: &DevContainerManifest,
2303 dockerfile_contents: String,
2304) -> Result<String, DevContainerError> {
2305 let mut raw_contents = dockerfile_contents
2306 .lines()
2307 .find(|line| line.starts_with("FROM"))
2308 .and_then(|from_line| {
2309 from_line
2310 .split(' ')
2311 .collect::<Vec<&str>>()
2312 .get(1)
2313 .map(|s| s.to_string())
2314 })
2315 .ok_or_else(|| {
2316 log::error!("Could not find an image definition in dockerfile");
2317 DevContainerError::DevContainerParseFailed
2318 })?;
2319
2320 for (k, v) in devcontainer
2321 .dev_container()
2322 .build
2323 .as_ref()
2324 .and_then(|b| b.args.as_ref())
2325 .unwrap_or(&HashMap::new())
2326 {
2327 raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2328 }
2329 Ok(raw_contents)
2330}
2331
2332// Container user things
2333// This should come from spec - see the docs
2334fn get_remote_user_from_config(
2335 docker_config: &DockerInspect,
2336 devcontainer: &DevContainerManifest,
2337) -> Result<String, DevContainerError> {
2338 if let DevContainer {
2339 remote_user: Some(user),
2340 ..
2341 } = &devcontainer.dev_container()
2342 {
2343 return Ok(user.clone());
2344 }
2345 if let Some(metadata) = &docker_config.config.labels.metadata {
2346 for metadatum in metadata {
2347 if let Some(remote_user) = metadatum.get("remoteUser") {
2348 if let Some(remote_user_str) = remote_user.as_str() {
2349 return Ok(remote_user_str.to_string());
2350 }
2351 }
2352 }
2353 }
2354 if let Some(image_user) = &docker_config.config.image_user {
2355 if !image_user.is_empty() {
2356 return Ok(image_user.to_string());
2357 }
2358 }
2359 Ok("root".to_string())
2360}
2361
2362// This should come from spec - see the docs
2363fn get_container_user_from_config(
2364 docker_config: &DockerInspect,
2365 devcontainer: &DevContainerManifest,
2366) -> Result<String, DevContainerError> {
2367 if let Some(user) = &devcontainer.dev_container().container_user {
2368 return Ok(user.to_string());
2369 }
2370 if let Some(metadata) = &docker_config.config.labels.metadata {
2371 for metadatum in metadata {
2372 if let Some(container_user) = metadatum.get("containerUser") {
2373 if let Some(container_user_str) = container_user.as_str() {
2374 return Ok(container_user_str.to_string());
2375 }
2376 }
2377 }
2378 }
2379 if let Some(image_user) = &docker_config.config.image_user {
2380 return Ok(image_user.to_string());
2381 }
2382
2383 Ok("root".to_string())
2384}
2385
2386#[cfg(test)]
2387mod test {
2388 use std::{
2389 collections::HashMap,
2390 ffi::OsStr,
2391 path::PathBuf,
2392 process::{ExitStatus, Output},
2393 sync::{Arc, Mutex},
2394 };
2395
2396 use async_trait::async_trait;
2397 use fs::{FakeFs, Fs};
2398 use gpui::{AppContext, TestAppContext};
2399 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2400 use project::{
2401 ProjectEnvironment,
2402 worktree_store::{WorktreeIdCounter, WorktreeStore},
2403 };
2404 use serde_json_lenient::Value;
2405 use util::{command::Command, paths::SanitizedPath};
2406
2407 #[cfg(not(target_os = "windows"))]
2408 use crate::docker::DockerComposeServicePort;
2409 use crate::{
2410 DevContainerConfig, DevContainerContext,
2411 command_json::CommandRunner,
2412 devcontainer_api::DevContainerError,
2413 devcontainer_json::MountDefinition,
2414 devcontainer_manifest::{
2415 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2416 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2417 },
2418 docker::{
2419 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2420 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2421 DockerPs,
2422 },
2423 oci::TokenResponse,
2424 };
2425 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2426
2427 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2428 let buffer = futures::io::Cursor::new(Vec::new());
2429 let mut builder = async_tar::Builder::new(buffer);
2430 for (file_name, content) in content {
2431 if content.is_empty() {
2432 let mut header = async_tar::Header::new_gnu();
2433 header.set_size(0);
2434 header.set_mode(0o755);
2435 header.set_entry_type(async_tar::EntryType::Directory);
2436 header.set_cksum();
2437 builder
2438 .append_data(&mut header, file_name, &[] as &[u8])
2439 .await
2440 .unwrap();
2441 } else {
2442 let data = content.as_bytes();
2443 let mut header = async_tar::Header::new_gnu();
2444 header.set_size(data.len() as u64);
2445 header.set_mode(0o755);
2446 header.set_entry_type(async_tar::EntryType::Regular);
2447 header.set_cksum();
2448 builder
2449 .append_data(&mut header, file_name, data)
2450 .await
2451 .unwrap();
2452 }
2453 }
2454 let buffer = builder.into_inner().await.unwrap();
2455 buffer.into_inner()
2456 }
2457
2458 fn test_project_filename() -> String {
2459 PathBuf::from(TEST_PROJECT_PATH)
2460 .file_name()
2461 .expect("is valid")
2462 .display()
2463 .to_string()
2464 }
2465
2466 async fn init_devcontainer_config(
2467 fs: &Arc<FakeFs>,
2468 devcontainer_contents: &str,
2469 ) -> DevContainerConfig {
2470 fs.insert_tree(
2471 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2472 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2473 )
2474 .await;
2475
2476 DevContainerConfig::default_config()
2477 }
2478
2479 struct TestDependencies {
2480 fs: Arc<FakeFs>,
2481 _http_client: Arc<dyn HttpClient>,
2482 docker: Arc<FakeDocker>,
2483 command_runner: Arc<TestCommandRunner>,
2484 }
2485
2486 async fn init_default_devcontainer_manifest(
2487 cx: &mut TestAppContext,
2488 devcontainer_contents: &str,
2489 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2490 let fs = FakeFs::new(cx.executor());
2491 let http_client = fake_http_client();
2492 let command_runner = Arc::new(TestCommandRunner::new());
2493 let docker = Arc::new(FakeDocker::new());
2494 let environment = HashMap::new();
2495
2496 init_devcontainer_manifest(
2497 cx,
2498 fs,
2499 http_client,
2500 docker,
2501 command_runner,
2502 environment,
2503 devcontainer_contents,
2504 )
2505 .await
2506 }
2507
2508 async fn init_devcontainer_manifest(
2509 cx: &mut TestAppContext,
2510 fs: Arc<FakeFs>,
2511 http_client: Arc<dyn HttpClient>,
2512 docker_client: Arc<FakeDocker>,
2513 command_runner: Arc<TestCommandRunner>,
2514 environment: HashMap<String, String>,
2515 devcontainer_contents: &str,
2516 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2517 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2518 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2519 let worktree_store =
2520 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2521 let project_environment =
2522 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2523
2524 let context = DevContainerContext {
2525 project_directory: SanitizedPath::cast_arc(project_path),
2526 use_podman: false,
2527 fs: fs.clone(),
2528 http_client: http_client.clone(),
2529 environment: project_environment.downgrade(),
2530 };
2531
2532 let test_dependencies = TestDependencies {
2533 fs: fs.clone(),
2534 _http_client: http_client.clone(),
2535 docker: docker_client.clone(),
2536 command_runner: command_runner.clone(),
2537 };
2538 let manifest = DevContainerManifest::new(
2539 &context,
2540 environment,
2541 docker_client,
2542 command_runner,
2543 local_config,
2544 &PathBuf::from(TEST_PROJECT_PATH),
2545 )
2546 .await?;
2547
2548 Ok((test_dependencies, manifest))
2549 }
2550
2551 #[gpui::test]
2552 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2553 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2554 cx,
2555 r#"
2556// These are some external comments. serde_lenient should handle them
2557{
2558 // These are some internal comments
2559 "image": "image",
2560 "remoteUser": "root",
2561}
2562 "#,
2563 )
2564 .await
2565 .unwrap();
2566
2567 let mut metadata = HashMap::new();
2568 metadata.insert(
2569 "remoteUser".to_string(),
2570 serde_json_lenient::Value::String("vsCode".to_string()),
2571 );
2572 let given_docker_config = DockerInspect {
2573 id: "docker_id".to_string(),
2574 config: DockerInspectConfig {
2575 labels: DockerConfigLabels {
2576 metadata: Some(vec![metadata]),
2577 },
2578 image_user: None,
2579 env: Vec::new(),
2580 },
2581 mounts: None,
2582 state: None,
2583 };
2584
2585 let remote_user =
2586 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2587
2588 assert_eq!(remote_user, "root".to_string())
2589 }
2590
2591 #[gpui::test]
2592 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2593 let (_, devcontainer_manifest) =
2594 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2595 let mut metadata = HashMap::new();
2596 metadata.insert(
2597 "remoteUser".to_string(),
2598 serde_json_lenient::Value::String("vsCode".to_string()),
2599 );
2600 let given_docker_config = DockerInspect {
2601 id: "docker_id".to_string(),
2602 config: DockerInspectConfig {
2603 labels: DockerConfigLabels {
2604 metadata: Some(vec![metadata]),
2605 },
2606 image_user: None,
2607 env: Vec::new(),
2608 },
2609 mounts: None,
2610 state: None,
2611 };
2612
2613 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2614
2615 assert!(remote_user.is_ok());
2616 let remote_user = remote_user.expect("ok");
2617 assert_eq!(&remote_user, "vsCode")
2618 }
2619
2620 #[test]
2621 fn should_extract_feature_id_from_references() {
2622 assert_eq!(
2623 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2624 "aws-cli"
2625 );
2626 assert_eq!(
2627 extract_feature_id("ghcr.io/devcontainers/features/go"),
2628 "go"
2629 );
2630 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2631 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2632 assert_eq!(
2633 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2634 "rust"
2635 );
2636 }
2637
2638 #[gpui::test]
2639 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2640 let mut metadata = HashMap::new();
2641 metadata.insert(
2642 "remoteUser".to_string(),
2643 serde_json_lenient::Value::String("vsCode".to_string()),
2644 );
2645
2646 let (_, devcontainer_manifest) =
2647 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2648 let build_resources = DockerBuildResources {
2649 image: DockerInspect {
2650 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2651 config: DockerInspectConfig {
2652 labels: DockerConfigLabels { metadata: None },
2653 image_user: None,
2654 env: Vec::new(),
2655 },
2656 mounts: None,
2657 state: None,
2658 },
2659 additional_mounts: vec![],
2660 privileged: false,
2661 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2662 };
2663 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2664
2665 assert!(docker_run_command.is_ok());
2666 let docker_run_command = docker_run_command.expect("ok");
2667
2668 assert_eq!(docker_run_command.get_program(), "docker");
2669 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2670 .join(".devcontainer")
2671 .join("devcontainer.json");
2672 let expected_config_file_label = expected_config_file_label.display();
2673 assert_eq!(
2674 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2675 vec![
2676 OsStr::new("run"),
2677 OsStr::new("--sig-proxy=false"),
2678 OsStr::new("-d"),
2679 OsStr::new("--mount"),
2680 OsStr::new(
2681 "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2682 ),
2683 OsStr::new("-l"),
2684 OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2685 OsStr::new("-l"),
2686 OsStr::new(&format!(
2687 "devcontainer.config_file={expected_config_file_label}"
2688 )),
2689 OsStr::new("--entrypoint"),
2690 OsStr::new("/bin/sh"),
2691 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2692 OsStr::new("-c"),
2693 OsStr::new(
2694 "
2695 echo Container started
2696 trap \"exit 0\" 15
2697 exec \"$@\"
2698 while sleep 1 & wait $!; do :; done
2699 "
2700 .trim()
2701 ),
2702 OsStr::new("-"),
2703 ]
2704 )
2705 }
2706
2707 #[gpui::test]
2708 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2709 // State where service not defined in dev container
2710 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2711 let given_docker_compose_config = DockerComposeResources {
2712 config: DockerComposeConfig {
2713 name: Some("devcontainers".to_string()),
2714 services: HashMap::new(),
2715 ..Default::default()
2716 },
2717 ..Default::default()
2718 };
2719
2720 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2721
2722 assert!(bad_result.is_err());
2723
2724 // State where service defined in devcontainer, not found in DockerCompose config
2725 let (_, given_dev_container) =
2726 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2727 .await
2728 .unwrap();
2729 let given_docker_compose_config = DockerComposeResources {
2730 config: DockerComposeConfig {
2731 name: Some("devcontainers".to_string()),
2732 services: HashMap::new(),
2733 ..Default::default()
2734 },
2735 ..Default::default()
2736 };
2737
2738 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2739
2740 assert!(bad_result.is_err());
2741 // State where service defined in devcontainer and in DockerCompose config
2742
2743 let (_, given_dev_container) =
2744 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2745 .await
2746 .unwrap();
2747 let given_docker_compose_config = DockerComposeResources {
2748 config: DockerComposeConfig {
2749 name: Some("devcontainers".to_string()),
2750 services: HashMap::from([(
2751 "found_service".to_string(),
2752 DockerComposeService {
2753 ..Default::default()
2754 },
2755 )]),
2756 ..Default::default()
2757 },
2758 ..Default::default()
2759 };
2760
2761 let (service_name, _) =
2762 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2763
2764 assert_eq!(service_name, "found_service".to_string());
2765 }
2766
2767 #[gpui::test]
2768 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2769 let fs = FakeFs::new(cx.executor());
2770 let given_devcontainer_contents = r#"
2771// These are some external comments. serde_lenient should handle them
2772{
2773 // These are some internal comments
2774 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2775 "name": "myDevContainer-${devcontainerId}",
2776 "remoteUser": "root",
2777 "remoteEnv": {
2778 "DEVCONTAINER_ID": "${devcontainerId}",
2779 "MYVAR2": "myvarothervalue",
2780 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2781 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2782 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2783 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2784 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2785 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2786
2787 }
2788}
2789 "#;
2790 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2791 cx,
2792 fs,
2793 fake_http_client(),
2794 Arc::new(FakeDocker::new()),
2795 Arc::new(TestCommandRunner::new()),
2796 HashMap::from([
2797 ("local_env_1".to_string(), "local_env_value1".to_string()),
2798 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2799 ]),
2800 given_devcontainer_contents,
2801 )
2802 .await
2803 .unwrap();
2804
2805 devcontainer_manifest.parse_nonremote_vars().unwrap();
2806
2807 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2808 &devcontainer_manifest.config
2809 else {
2810 panic!("Config not parsed");
2811 };
2812
2813 // ${devcontainerId}
2814 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2815 assert_eq!(
2816 variable_replaced_devcontainer.name,
2817 Some(format!("myDevContainer-{devcontainer_id}"))
2818 );
2819 assert_eq!(
2820 variable_replaced_devcontainer
2821 .remote_env
2822 .as_ref()
2823 .and_then(|env| env.get("DEVCONTAINER_ID")),
2824 Some(&devcontainer_id)
2825 );
2826
2827 // ${containerWorkspaceFolderBasename}
2828 assert_eq!(
2829 variable_replaced_devcontainer
2830 .remote_env
2831 .as_ref()
2832 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2833 Some(&test_project_filename())
2834 );
2835
2836 // ${localWorkspaceFolderBasename}
2837 assert_eq!(
2838 variable_replaced_devcontainer
2839 .remote_env
2840 .as_ref()
2841 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2842 Some(&test_project_filename())
2843 );
2844
2845 // ${containerWorkspaceFolder}
2846 assert_eq!(
2847 variable_replaced_devcontainer
2848 .remote_env
2849 .as_ref()
2850 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2851 Some(&format!("/workspaces/{}", test_project_filename()))
2852 );
2853
2854 // ${localWorkspaceFolder}
2855 assert_eq!(
2856 variable_replaced_devcontainer
2857 .remote_env
2858 .as_ref()
2859 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2860 Some(&TEST_PROJECT_PATH.to_string())
2861 );
2862
2863 // ${localEnv:VARIABLE_NAME}
2864 assert_eq!(
2865 variable_replaced_devcontainer
2866 .remote_env
2867 .as_ref()
2868 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2869 Some(&"local_env_value1".to_string())
2870 );
2871 assert_eq!(
2872 variable_replaced_devcontainer
2873 .remote_env
2874 .as_ref()
2875 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2876 Some(&"THISVALUEHERE".to_string())
2877 );
2878 }
2879
2880 #[gpui::test]
2881 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2882 let given_devcontainer_contents = r#"
2883 // These are some external comments. serde_lenient should handle them
2884 {
2885 // These are some internal comments
2886 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2887 "name": "myDevContainer-${devcontainerId}",
2888 "remoteUser": "root",
2889 "remoteEnv": {
2890 "DEVCONTAINER_ID": "${devcontainerId}",
2891 "MYVAR2": "myvarothervalue",
2892 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2893 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2894 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2895 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2896
2897 },
2898 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2899 "workspaceFolder": "/workspace/customfolder"
2900 }
2901 "#;
2902
2903 let (_, mut devcontainer_manifest) =
2904 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2905 .await
2906 .unwrap();
2907
2908 devcontainer_manifest.parse_nonremote_vars().unwrap();
2909
2910 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2911 &devcontainer_manifest.config
2912 else {
2913 panic!("Config not parsed");
2914 };
2915
2916 // ${devcontainerId}
2917 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2918 assert_eq!(
2919 variable_replaced_devcontainer.name,
2920 Some(format!("myDevContainer-{devcontainer_id}"))
2921 );
2922 assert_eq!(
2923 variable_replaced_devcontainer
2924 .remote_env
2925 .as_ref()
2926 .and_then(|env| env.get("DEVCONTAINER_ID")),
2927 Some(&devcontainer_id)
2928 );
2929
2930 // ${containerWorkspaceFolderBasename}
2931 assert_eq!(
2932 variable_replaced_devcontainer
2933 .remote_env
2934 .as_ref()
2935 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2936 Some(&"customfolder".to_string())
2937 );
2938
2939 // ${localWorkspaceFolderBasename}
2940 assert_eq!(
2941 variable_replaced_devcontainer
2942 .remote_env
2943 .as_ref()
2944 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2945 Some(&"project".to_string())
2946 );
2947
2948 // ${containerWorkspaceFolder}
2949 assert_eq!(
2950 variable_replaced_devcontainer
2951 .remote_env
2952 .as_ref()
2953 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2954 Some(&"/workspace/customfolder".to_string())
2955 );
2956
2957 // ${localWorkspaceFolder}
2958 assert_eq!(
2959 variable_replaced_devcontainer
2960 .remote_env
2961 .as_ref()
2962 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2963 Some(&TEST_PROJECT_PATH.to_string())
2964 );
2965 }
2966
2967 // updateRemoteUserUID is treated as false in Windows, so this test will fail
2968 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2969 #[cfg(not(target_os = "windows"))]
2970 #[gpui::test]
2971 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2972 cx.executor().allow_parking();
2973 env_logger::try_init().ok();
2974 let given_devcontainer_contents = r#"
2975 /*---------------------------------------------------------------------------------------------
2976 * Copyright (c) Microsoft Corporation. All rights reserved.
2977 * Licensed under the MIT License. See License.txt in the project root for license information.
2978 *--------------------------------------------------------------------------------------------*/
2979 {
2980 "name": "cli-${devcontainerId}",
2981 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2982 "build": {
2983 "dockerfile": "Dockerfile",
2984 "args": {
2985 "VARIANT": "18-bookworm",
2986 "FOO": "bar",
2987 },
2988 },
2989 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2990 "workspaceFolder": "/workspace2",
2991 "mounts": [
2992 // Keep command history across instances
2993 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2994 ],
2995
2996 "forwardPorts": [
2997 8082,
2998 8083,
2999 ],
3000 "appPort": "8084",
3001
3002 "containerEnv": {
3003 "VARIABLE_VALUE": "value",
3004 },
3005
3006 "initializeCommand": "touch IAM.md",
3007
3008 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3009
3010 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3011
3012 "postCreateCommand": {
3013 "yarn": "yarn install",
3014 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3015 },
3016
3017 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3018
3019 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3020
3021 "remoteUser": "node",
3022
3023 "remoteEnv": {
3024 "PATH": "${containerEnv:PATH}:/some/other/path",
3025 "OTHER_ENV": "other_env_value"
3026 },
3027
3028 "features": {
3029 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3030 "moby": false,
3031 },
3032 "ghcr.io/devcontainers/features/go:1": {},
3033 },
3034
3035 "customizations": {
3036 "vscode": {
3037 "extensions": [
3038 "dbaeumer.vscode-eslint",
3039 "GitHub.vscode-pull-request-github",
3040 ],
3041 },
3042 "zed": {
3043 "extensions": ["vue", "ruby"],
3044 },
3045 "codespaces": {
3046 "repositories": {
3047 "devcontainers/features": {
3048 "permissions": {
3049 "contents": "write",
3050 "workflows": "write",
3051 },
3052 },
3053 },
3054 },
3055 },
3056 }
3057 "#;
3058
3059 let (test_dependencies, mut devcontainer_manifest) =
3060 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3061 .await
3062 .unwrap();
3063
3064 test_dependencies
3065 .fs
3066 .atomic_write(
3067 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3068 r#"
3069# Copyright (c) Microsoft Corporation. All rights reserved.
3070# Licensed under the MIT License. See License.txt in the project root for license information.
3071ARG VARIANT="16-bullseye"
3072FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3073
3074RUN mkdir -p /workspaces && chown node:node /workspaces
3075
3076ARG USERNAME=node
3077USER $USERNAME
3078
3079# Save command line history
3080RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3081&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3082&& mkdir -p /home/$USERNAME/commandhistory \
3083&& touch /home/$USERNAME/commandhistory/.bash_history \
3084&& chown -R $USERNAME /home/$USERNAME/commandhistory
3085 "#.trim().to_string(),
3086 )
3087 .await
3088 .unwrap();
3089
3090 devcontainer_manifest.parse_nonremote_vars().unwrap();
3091
3092 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3093
3094 assert_eq!(
3095 devcontainer_up.extension_ids,
3096 vec!["vue".to_string(), "ruby".to_string()]
3097 );
3098
3099 let files = test_dependencies.fs.files();
3100 let feature_dockerfile = files
3101 .iter()
3102 .find(|f| {
3103 f.file_name()
3104 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3105 })
3106 .expect("to be found");
3107 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3108 assert_eq!(
3109 &feature_dockerfile,
3110 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3111
3112# Copyright (c) Microsoft Corporation. All rights reserved.
3113# Licensed under the MIT License. See License.txt in the project root for license information.
3114ARG VARIANT="16-bullseye"
3115FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3116
3117RUN mkdir -p /workspaces && chown node:node /workspaces
3118
3119ARG USERNAME=node
3120USER $USERNAME
3121
3122# Save command line history
3123RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3124&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3125&& mkdir -p /home/$USERNAME/commandhistory \
3126&& touch /home/$USERNAME/commandhistory/.bash_history \
3127&& chown -R $USERNAME /home/$USERNAME/commandhistory
3128
3129FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3130USER root
3131COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3132RUN chmod -R 0755 /tmp/build-features/
3133
3134FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3135
3136USER root
3137
3138RUN mkdir -p /tmp/dev-container-features
3139COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3140
3141RUN \
3142echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3143echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3144
3145
3146RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3147cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3148&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3149&& cd /tmp/dev-container-features/docker-in-docker_0 \
3150&& chmod +x ./devcontainer-features-install.sh \
3151&& ./devcontainer-features-install.sh \
3152&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3153
3154RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3155cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3156&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3157&& cd /tmp/dev-container-features/go_1 \
3158&& chmod +x ./devcontainer-features-install.sh \
3159&& ./devcontainer-features-install.sh \
3160&& rm -rf /tmp/dev-container-features/go_1
3161
3162
3163ARG _DEV_CONTAINERS_IMAGE_USER=root
3164USER $_DEV_CONTAINERS_IMAGE_USER
3165"#
3166 );
3167
3168 let uid_dockerfile = files
3169 .iter()
3170 .find(|f| {
3171 f.file_name()
3172 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3173 })
3174 .expect("to be found");
3175 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3176
3177 assert_eq!(
3178 &uid_dockerfile,
3179 r#"ARG BASE_IMAGE
3180FROM $BASE_IMAGE
3181
3182USER root
3183
3184ARG REMOTE_USER
3185ARG NEW_UID
3186ARG NEW_GID
3187SHELL ["/bin/sh", "-c"]
3188RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3189 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3190 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3191 if [ -z "$OLD_UID" ]; then \
3192 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3193 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3194 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3195 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3196 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3197 else \
3198 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3199 FREE_GID=65532; \
3200 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3201 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3202 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3203 fi; \
3204 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3205 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3206 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3207 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3208 fi; \
3209 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3210 fi;
3211
3212ARG IMAGE_USER
3213USER $IMAGE_USER
3214
3215# Ensure that /etc/profile does not clobber the existing path
3216RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3217
3218ENV DOCKER_BUILDKIT=1
3219
3220ENV GOPATH=/go
3221ENV GOROOT=/usr/local/go
3222ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3223ENV VARIABLE_VALUE=value
3224"#
3225 );
3226
3227 let golang_install_wrapper = files
3228 .iter()
3229 .find(|f| {
3230 f.file_name()
3231 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3232 && f.to_str().is_some_and(|s| s.contains("/go_"))
3233 })
3234 .expect("to be found");
3235 let golang_install_wrapper = test_dependencies
3236 .fs
3237 .load(golang_install_wrapper)
3238 .await
3239 .unwrap();
3240 assert_eq!(
3241 &golang_install_wrapper,
3242 r#"#!/bin/sh
3243set -e
3244
3245on_exit () {
3246 [ $? -eq 0 ] && exit
3247 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3248}
3249
3250trap on_exit EXIT
3251
3252echo ===========================================================================
3253echo 'Feature : go'
3254echo 'Id : ghcr.io/devcontainers/features/go:1'
3255echo 'Options :'
3256echo ' GOLANGCILINTVERSION=latest
3257 VERSION=latest'
3258echo ===========================================================================
3259
3260set -a
3261. ../devcontainer-features.builtin.env
3262. ./devcontainer-features.env
3263set +a
3264
3265chmod +x ./install.sh
3266./install.sh
3267"#
3268 );
3269
3270 let docker_commands = test_dependencies
3271 .command_runner
3272 .commands_by_program("docker");
3273
3274 let docker_run_command = docker_commands
3275 .iter()
3276 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3277 .expect("found");
3278
3279 assert_eq!(
3280 docker_run_command.args,
3281 vec![
3282 "run".to_string(),
3283 "--privileged".to_string(),
3284 "--sig-proxy=false".to_string(),
3285 "-d".to_string(),
3286 "--mount".to_string(),
3287 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3288 "--mount".to_string(),
3289 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3290 "--mount".to_string(),
3291 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3292 "-l".to_string(),
3293 "devcontainer.local_folder=/path/to/local/project".to_string(),
3294 "-l".to_string(),
3295 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3296 "-l".to_string(),
3297 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3298 "-p".to_string(),
3299 "8082:8082".to_string(),
3300 "-p".to_string(),
3301 "8083:8083".to_string(),
3302 "-p".to_string(),
3303 "8084:8084".to_string(),
3304 "--entrypoint".to_string(),
3305 "/bin/sh".to_string(),
3306 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3307 "-c".to_string(),
3308 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3309 "-".to_string()
3310 ]
3311 );
3312
3313 let docker_exec_commands = test_dependencies
3314 .docker
3315 .exec_commands_recorded
3316 .lock()
3317 .unwrap();
3318
3319 assert!(docker_exec_commands.iter().all(|exec| {
3320 exec.env
3321 == HashMap::from([
3322 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3323 (
3324 "PATH".to_string(),
3325 "/initial/path:/some/other/path".to_string(),
3326 ),
3327 ])
3328 }))
3329 }
3330
3331 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3332 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3333 #[cfg(not(target_os = "windows"))]
3334 #[gpui::test]
3335 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3336 cx.executor().allow_parking();
3337 env_logger::try_init().ok();
3338 let given_devcontainer_contents = r#"
3339 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3340 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3341 {
3342 "features": {
3343 "ghcr.io/devcontainers/features/aws-cli:1": {},
3344 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3345 },
3346 "name": "Rust and PostgreSQL",
3347 "dockerComposeFile": "docker-compose.yml",
3348 "service": "app",
3349 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3350
3351 // Features to add to the dev container. More info: https://containers.dev/features.
3352 // "features": {},
3353
3354 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3355 "forwardPorts": [
3356 8083,
3357 "db:5432",
3358 "db:1234",
3359 ],
3360 "appPort": "8084",
3361
3362 // Use 'postCreateCommand' to run commands after the container is created.
3363 // "postCreateCommand": "rustc --version",
3364
3365 // Configure tool-specific properties.
3366 // "customizations": {},
3367
3368 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3369 // "remoteUser": "root"
3370 }
3371 "#;
3372 let (test_dependencies, mut devcontainer_manifest) =
3373 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3374 .await
3375 .unwrap();
3376
3377 test_dependencies
3378 .fs
3379 .atomic_write(
3380 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3381 r#"
3382version: '3.8'
3383
3384volumes:
3385 postgres-data:
3386
3387services:
3388 app:
3389 build:
3390 context: .
3391 dockerfile: Dockerfile
3392 env_file:
3393 # Ensure that the variables in .env match the same variables in devcontainer.json
3394 - .env
3395
3396 volumes:
3397 - ../..:/workspaces:cached
3398
3399 # Overrides default command so things don't shut down after the process ends.
3400 command: sleep infinity
3401
3402 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3403 network_mode: service:db
3404
3405 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3406 # (Adding the "ports" property to this file will not forward from a Codespace.)
3407
3408 db:
3409 image: postgres:14.1
3410 restart: unless-stopped
3411 volumes:
3412 - postgres-data:/var/lib/postgresql/data
3413 env_file:
3414 # Ensure that the variables in .env match the same variables in devcontainer.json
3415 - .env
3416
3417 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3418 # (Adding the "ports" property to this file will not forward from a Codespace.)
3419 "#.trim().to_string(),
3420 )
3421 .await
3422 .unwrap();
3423
3424 test_dependencies.fs.atomic_write(
3425 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3426 r#"
3427FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3428
3429# Include lld linker to improve build times either by using environment variable
3430# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3431RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3432 && apt-get -y install clang lld \
3433 && apt-get autoremove -y && apt-get clean -y
3434 "#.trim().to_string()).await.unwrap();
3435
3436 devcontainer_manifest.parse_nonremote_vars().unwrap();
3437
3438 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3439
3440 let files = test_dependencies.fs.files();
3441 let feature_dockerfile = files
3442 .iter()
3443 .find(|f| {
3444 f.file_name()
3445 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3446 })
3447 .expect("to be found");
3448 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3449 assert_eq!(
3450 &feature_dockerfile,
3451 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3452
3453FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3454
3455# Include lld linker to improve build times either by using environment variable
3456# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3457RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3458 && apt-get -y install clang lld \
3459 && apt-get autoremove -y && apt-get clean -y
3460
3461FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3462USER root
3463COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3464RUN chmod -R 0755 /tmp/build-features/
3465
3466FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3467
3468USER root
3469
3470RUN mkdir -p /tmp/dev-container-features
3471COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3472
3473RUN \
3474echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3475echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3476
3477
3478RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3479cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3480&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3481&& cd /tmp/dev-container-features/aws-cli_0 \
3482&& chmod +x ./devcontainer-features-install.sh \
3483&& ./devcontainer-features-install.sh \
3484&& rm -rf /tmp/dev-container-features/aws-cli_0
3485
3486RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3487cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3488&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3489&& cd /tmp/dev-container-features/docker-in-docker_1 \
3490&& chmod +x ./devcontainer-features-install.sh \
3491&& ./devcontainer-features-install.sh \
3492&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3493
3494
3495ARG _DEV_CONTAINERS_IMAGE_USER=root
3496USER $_DEV_CONTAINERS_IMAGE_USER
3497"#
3498 );
3499
3500 let uid_dockerfile = files
3501 .iter()
3502 .find(|f| {
3503 f.file_name()
3504 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3505 })
3506 .expect("to be found");
3507 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3508
3509 assert_eq!(
3510 &uid_dockerfile,
3511 r#"ARG BASE_IMAGE
3512FROM $BASE_IMAGE
3513
3514USER root
3515
3516ARG REMOTE_USER
3517ARG NEW_UID
3518ARG NEW_GID
3519SHELL ["/bin/sh", "-c"]
3520RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3521 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3522 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3523 if [ -z "$OLD_UID" ]; then \
3524 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3525 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3526 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3527 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3528 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3529 else \
3530 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3531 FREE_GID=65532; \
3532 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3533 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3534 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3535 fi; \
3536 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3537 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3538 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3539 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3540 fi; \
3541 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3542 fi;
3543
3544ARG IMAGE_USER
3545USER $IMAGE_USER
3546
3547# Ensure that /etc/profile does not clobber the existing path
3548RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3549
3550
3551ENV DOCKER_BUILDKIT=1
3552"#
3553 );
3554
3555 let build_override = files
3556 .iter()
3557 .find(|f| {
3558 f.file_name()
3559 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3560 })
3561 .expect("to be found");
3562 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3563 let build_config: DockerComposeConfig =
3564 serde_json_lenient::from_str(&build_override).unwrap();
3565 let build_context = build_config
3566 .services
3567 .get("app")
3568 .and_then(|s| s.build.as_ref())
3569 .and_then(|b| b.context.clone())
3570 .expect("build override should have a context");
3571 assert_eq!(
3572 build_context, ".",
3573 "build override should preserve the original build context from docker-compose.yml"
3574 );
3575
3576 let runtime_override = files
3577 .iter()
3578 .find(|f| {
3579 f.file_name()
3580 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3581 })
3582 .expect("to be found");
3583 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3584
3585 let expected_runtime_override = DockerComposeConfig {
3586 name: None,
3587 services: HashMap::from([
3588 (
3589 "app".to_string(),
3590 DockerComposeService {
3591 entrypoint: Some(vec![
3592 "/bin/sh".to_string(),
3593 "-c".to_string(),
3594 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3595 "-".to_string(),
3596 ]),
3597 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3598 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3599 privileged: Some(true),
3600 labels: Some(HashMap::from([
3601 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3602 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3603 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3604 ])),
3605 volumes: vec![
3606 MountDefinition {
3607 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3608 target: "/var/lib/docker".to_string(),
3609 mount_type: Some("volume".to_string())
3610 }
3611 ],
3612 ..Default::default()
3613 },
3614 ),
3615 (
3616 "db".to_string(),
3617 DockerComposeService {
3618 ports: vec![
3619 DockerComposeServicePort {
3620 target: "8083".to_string(),
3621 published: "8083".to_string(),
3622 ..Default::default()
3623 },
3624 DockerComposeServicePort {
3625 target: "5432".to_string(),
3626 published: "5432".to_string(),
3627 ..Default::default()
3628 },
3629 DockerComposeServicePort {
3630 target: "1234".to_string(),
3631 published: "1234".to_string(),
3632 ..Default::default()
3633 },
3634 DockerComposeServicePort {
3635 target: "8084".to_string(),
3636 published: "8084".to_string(),
3637 ..Default::default()
3638 },
3639 ],
3640 ..Default::default()
3641 },
3642 ),
3643 ]),
3644 volumes: HashMap::from([(
3645 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3646 DockerComposeVolume {
3647 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3648 },
3649 )]),
3650 };
3651
3652 assert_eq!(
3653 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3654 expected_runtime_override
3655 )
3656 }
3657
3658 #[gpui::test]
3659 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3660 cx: &mut TestAppContext,
3661 ) {
3662 cx.executor().allow_parking();
3663 env_logger::try_init().ok();
3664 let given_devcontainer_contents = r#"
3665 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3666 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3667 {
3668 "features": {
3669 "ghcr.io/devcontainers/features/aws-cli:1": {},
3670 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3671 },
3672 "name": "Rust and PostgreSQL",
3673 "dockerComposeFile": "docker-compose.yml",
3674 "service": "app",
3675 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3676
3677 // Features to add to the dev container. More info: https://containers.dev/features.
3678 // "features": {},
3679
3680 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3681 "forwardPorts": [
3682 8083,
3683 "db:5432",
3684 "db:1234",
3685 ],
3686 "updateRemoteUserUID": false,
3687 "appPort": "8084",
3688
3689 // Use 'postCreateCommand' to run commands after the container is created.
3690 // "postCreateCommand": "rustc --version",
3691
3692 // Configure tool-specific properties.
3693 // "customizations": {},
3694
3695 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3696 // "remoteUser": "root"
3697 }
3698 "#;
3699 let (test_dependencies, mut devcontainer_manifest) =
3700 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3701 .await
3702 .unwrap();
3703
3704 test_dependencies
3705 .fs
3706 .atomic_write(
3707 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3708 r#"
3709version: '3.8'
3710
3711volumes:
3712postgres-data:
3713
3714services:
3715app:
3716 build:
3717 context: .
3718 dockerfile: Dockerfile
3719 env_file:
3720 # Ensure that the variables in .env match the same variables in devcontainer.json
3721 - .env
3722
3723 volumes:
3724 - ../..:/workspaces:cached
3725
3726 # Overrides default command so things don't shut down after the process ends.
3727 command: sleep infinity
3728
3729 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3730 network_mode: service:db
3731
3732 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3733 # (Adding the "ports" property to this file will not forward from a Codespace.)
3734
3735db:
3736 image: postgres:14.1
3737 restart: unless-stopped
3738 volumes:
3739 - postgres-data:/var/lib/postgresql/data
3740 env_file:
3741 # Ensure that the variables in .env match the same variables in devcontainer.json
3742 - .env
3743
3744 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3745 # (Adding the "ports" property to this file will not forward from a Codespace.)
3746 "#.trim().to_string(),
3747 )
3748 .await
3749 .unwrap();
3750
3751 test_dependencies.fs.atomic_write(
3752 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3753 r#"
3754FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3755
3756# Include lld linker to improve build times either by using environment variable
3757# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3758RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3759&& apt-get -y install clang lld \
3760&& apt-get autoremove -y && apt-get clean -y
3761 "#.trim().to_string()).await.unwrap();
3762
3763 devcontainer_manifest.parse_nonremote_vars().unwrap();
3764
3765 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3766
3767 let files = test_dependencies.fs.files();
3768 let feature_dockerfile = files
3769 .iter()
3770 .find(|f| {
3771 f.file_name()
3772 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3773 })
3774 .expect("to be found");
3775 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3776 assert_eq!(
3777 &feature_dockerfile,
3778 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3779
3780FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3781
3782# Include lld linker to improve build times either by using environment variable
3783# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3784RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3785&& apt-get -y install clang lld \
3786&& apt-get autoremove -y && apt-get clean -y
3787
3788FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3789USER root
3790COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3791RUN chmod -R 0755 /tmp/build-features/
3792
3793FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3794
3795USER root
3796
3797RUN mkdir -p /tmp/dev-container-features
3798COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3799
3800RUN \
3801echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3802echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3803
3804
3805RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3806cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3807&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3808&& cd /tmp/dev-container-features/aws-cli_0 \
3809&& chmod +x ./devcontainer-features-install.sh \
3810&& ./devcontainer-features-install.sh \
3811&& rm -rf /tmp/dev-container-features/aws-cli_0
3812
3813RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3814cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3815&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3816&& cd /tmp/dev-container-features/docker-in-docker_1 \
3817&& chmod +x ./devcontainer-features-install.sh \
3818&& ./devcontainer-features-install.sh \
3819&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3820
3821
3822ARG _DEV_CONTAINERS_IMAGE_USER=root
3823USER $_DEV_CONTAINERS_IMAGE_USER
3824
3825# Ensure that /etc/profile does not clobber the existing path
3826RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3827
3828
3829ENV DOCKER_BUILDKIT=1
3830"#
3831 );
3832 }
3833
3834 #[cfg(not(target_os = "windows"))]
3835 #[gpui::test]
3836 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3837 cx.executor().allow_parking();
3838 env_logger::try_init().ok();
3839 let given_devcontainer_contents = r#"
3840 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3841 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3842 {
3843 "features": {
3844 "ghcr.io/devcontainers/features/aws-cli:1": {},
3845 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3846 },
3847 "name": "Rust and PostgreSQL",
3848 "dockerComposeFile": "docker-compose.yml",
3849 "service": "app",
3850 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3851
3852 // Features to add to the dev container. More info: https://containers.dev/features.
3853 // "features": {},
3854
3855 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3856 // "forwardPorts": [5432],
3857
3858 // Use 'postCreateCommand' to run commands after the container is created.
3859 // "postCreateCommand": "rustc --version",
3860
3861 // Configure tool-specific properties.
3862 // "customizations": {},
3863
3864 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3865 // "remoteUser": "root"
3866 }
3867 "#;
3868 let mut fake_docker = FakeDocker::new();
3869 fake_docker.set_podman(true);
3870 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3871 cx,
3872 FakeFs::new(cx.executor()),
3873 fake_http_client(),
3874 Arc::new(fake_docker),
3875 Arc::new(TestCommandRunner::new()),
3876 HashMap::new(),
3877 given_devcontainer_contents,
3878 )
3879 .await
3880 .unwrap();
3881
3882 test_dependencies
3883 .fs
3884 .atomic_write(
3885 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3886 r#"
3887version: '3.8'
3888
3889volumes:
3890postgres-data:
3891
3892services:
3893app:
3894build:
3895 context: .
3896 dockerfile: Dockerfile
3897env_file:
3898 # Ensure that the variables in .env match the same variables in devcontainer.json
3899 - .env
3900
3901volumes:
3902 - ../..:/workspaces:cached
3903
3904# Overrides default command so things don't shut down after the process ends.
3905command: sleep infinity
3906
3907# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3908network_mode: service:db
3909
3910# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3911# (Adding the "ports" property to this file will not forward from a Codespace.)
3912
3913db:
3914image: postgres:14.1
3915restart: unless-stopped
3916volumes:
3917 - postgres-data:/var/lib/postgresql/data
3918env_file:
3919 # Ensure that the variables in .env match the same variables in devcontainer.json
3920 - .env
3921
3922# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3923# (Adding the "ports" property to this file will not forward from a Codespace.)
3924 "#.trim().to_string(),
3925 )
3926 .await
3927 .unwrap();
3928
3929 test_dependencies.fs.atomic_write(
3930 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3931 r#"
3932FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3933
3934# Include lld linker to improve build times either by using environment variable
3935# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3936RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3937&& apt-get -y install clang lld \
3938&& apt-get autoremove -y && apt-get clean -y
3939 "#.trim().to_string()).await.unwrap();
3940
3941 devcontainer_manifest.parse_nonremote_vars().unwrap();
3942
3943 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3944
3945 let files = test_dependencies.fs.files();
3946
3947 let feature_dockerfile = files
3948 .iter()
3949 .find(|f| {
3950 f.file_name()
3951 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3952 })
3953 .expect("to be found");
3954 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3955 assert_eq!(
3956 &feature_dockerfile,
3957 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3958
3959FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3960
3961# Include lld linker to improve build times either by using environment variable
3962# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3963RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3964&& apt-get -y install clang lld \
3965&& apt-get autoremove -y && apt-get clean -y
3966
3967FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3968
3969FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3970USER root
3971COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3972RUN chmod -R 0755 /tmp/build-features/
3973
3974FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3975
3976USER root
3977
3978RUN mkdir -p /tmp/dev-container-features
3979COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3980
3981RUN \
3982echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3983echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3984
3985
3986COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3987RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3988&& cd /tmp/dev-container-features/aws-cli_0 \
3989&& chmod +x ./devcontainer-features-install.sh \
3990&& ./devcontainer-features-install.sh
3991
3992COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3993RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3994&& cd /tmp/dev-container-features/docker-in-docker_1 \
3995&& chmod +x ./devcontainer-features-install.sh \
3996&& ./devcontainer-features-install.sh
3997
3998
3999ARG _DEV_CONTAINERS_IMAGE_USER=root
4000USER $_DEV_CONTAINERS_IMAGE_USER
4001"#
4002 );
4003
4004 let uid_dockerfile = files
4005 .iter()
4006 .find(|f| {
4007 f.file_name()
4008 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4009 })
4010 .expect("to be found");
4011 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4012
4013 assert_eq!(
4014 &uid_dockerfile,
4015 r#"ARG BASE_IMAGE
4016FROM $BASE_IMAGE
4017
4018USER root
4019
4020ARG REMOTE_USER
4021ARG NEW_UID
4022ARG NEW_GID
4023SHELL ["/bin/sh", "-c"]
4024RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4025 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4026 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4027 if [ -z "$OLD_UID" ]; then \
4028 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4029 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4030 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4031 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4032 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4033 else \
4034 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4035 FREE_GID=65532; \
4036 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4037 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4038 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4039 fi; \
4040 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4041 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4042 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4043 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4044 fi; \
4045 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4046 fi;
4047
4048ARG IMAGE_USER
4049USER $IMAGE_USER
4050
4051# Ensure that /etc/profile does not clobber the existing path
4052RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4053
4054
4055ENV DOCKER_BUILDKIT=1
4056"#
4057 );
4058 }
4059
4060 #[gpui::test]
4061 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4062 cx.executor().allow_parking();
4063 env_logger::try_init().ok();
4064 let given_devcontainer_contents = r#"
4065 /*---------------------------------------------------------------------------------------------
4066 * Copyright (c) Microsoft Corporation. All rights reserved.
4067 * Licensed under the MIT License. See License.txt in the project root for license information.
4068 *--------------------------------------------------------------------------------------------*/
4069 {
4070 "name": "cli-${devcontainerId}",
4071 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4072 "build": {
4073 "dockerfile": "Dockerfile",
4074 "args": {
4075 "VARIANT": "18-bookworm",
4076 "FOO": "bar",
4077 },
4078 },
4079 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4080 "workspaceFolder": "/workspace2",
4081 "mounts": [
4082 // Keep command history across instances
4083 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4084 ],
4085
4086 "forwardPorts": [
4087 8082,
4088 8083,
4089 ],
4090 "appPort": "8084",
4091 "updateRemoteUserUID": false,
4092
4093 "containerEnv": {
4094 "VARIABLE_VALUE": "value",
4095 },
4096
4097 "initializeCommand": "touch IAM.md",
4098
4099 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4100
4101 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4102
4103 "postCreateCommand": {
4104 "yarn": "yarn install",
4105 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4106 },
4107
4108 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4109
4110 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4111
4112 "remoteUser": "node",
4113
4114 "remoteEnv": {
4115 "PATH": "${containerEnv:PATH}:/some/other/path",
4116 "OTHER_ENV": "other_env_value"
4117 },
4118
4119 "features": {
4120 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4121 "moby": false,
4122 },
4123 "ghcr.io/devcontainers/features/go:1": {},
4124 },
4125
4126 "customizations": {
4127 "vscode": {
4128 "extensions": [
4129 "dbaeumer.vscode-eslint",
4130 "GitHub.vscode-pull-request-github",
4131 ],
4132 },
4133 "zed": {
4134 "extensions": ["vue", "ruby"],
4135 },
4136 "codespaces": {
4137 "repositories": {
4138 "devcontainers/features": {
4139 "permissions": {
4140 "contents": "write",
4141 "workflows": "write",
4142 },
4143 },
4144 },
4145 },
4146 },
4147 }
4148 "#;
4149
4150 let (test_dependencies, mut devcontainer_manifest) =
4151 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4152 .await
4153 .unwrap();
4154
4155 test_dependencies
4156 .fs
4157 .atomic_write(
4158 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4159 r#"
4160# Copyright (c) Microsoft Corporation. All rights reserved.
4161# Licensed under the MIT License. See License.txt in the project root for license information.
4162ARG VARIANT="16-bullseye"
4163FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4164
4165RUN mkdir -p /workspaces && chown node:node /workspaces
4166
4167ARG USERNAME=node
4168USER $USERNAME
4169
4170# Save command line history
4171RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4172&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4173&& mkdir -p /home/$USERNAME/commandhistory \
4174&& touch /home/$USERNAME/commandhistory/.bash_history \
4175&& chown -R $USERNAME /home/$USERNAME/commandhistory
4176 "#.trim().to_string(),
4177 )
4178 .await
4179 .unwrap();
4180
4181 devcontainer_manifest.parse_nonremote_vars().unwrap();
4182
4183 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4184
4185 assert_eq!(
4186 devcontainer_up.extension_ids,
4187 vec!["vue".to_string(), "ruby".to_string()]
4188 );
4189
4190 let files = test_dependencies.fs.files();
4191 let feature_dockerfile = files
4192 .iter()
4193 .find(|f| {
4194 f.file_name()
4195 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4196 })
4197 .expect("to be found");
4198 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4199 assert_eq!(
4200 &feature_dockerfile,
4201 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4202
4203# Copyright (c) Microsoft Corporation. All rights reserved.
4204# Licensed under the MIT License. See License.txt in the project root for license information.
4205ARG VARIANT="16-bullseye"
4206FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4207
4208RUN mkdir -p /workspaces && chown node:node /workspaces
4209
4210ARG USERNAME=node
4211USER $USERNAME
4212
4213# Save command line history
4214RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4215&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4216&& mkdir -p /home/$USERNAME/commandhistory \
4217&& touch /home/$USERNAME/commandhistory/.bash_history \
4218&& chown -R $USERNAME /home/$USERNAME/commandhistory
4219
4220FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4221USER root
4222COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4223RUN chmod -R 0755 /tmp/build-features/
4224
4225FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4226
4227USER root
4228
4229RUN mkdir -p /tmp/dev-container-features
4230COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4231
4232RUN \
4233echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4234echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4235
4236
4237RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4238cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4239&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4240&& cd /tmp/dev-container-features/docker-in-docker_0 \
4241&& chmod +x ./devcontainer-features-install.sh \
4242&& ./devcontainer-features-install.sh \
4243&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4244
4245RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4246cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4247&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4248&& cd /tmp/dev-container-features/go_1 \
4249&& chmod +x ./devcontainer-features-install.sh \
4250&& ./devcontainer-features-install.sh \
4251&& rm -rf /tmp/dev-container-features/go_1
4252
4253
4254ARG _DEV_CONTAINERS_IMAGE_USER=root
4255USER $_DEV_CONTAINERS_IMAGE_USER
4256
4257# Ensure that /etc/profile does not clobber the existing path
4258RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4259
4260ENV DOCKER_BUILDKIT=1
4261
4262ENV GOPATH=/go
4263ENV GOROOT=/usr/local/go
4264ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4265ENV VARIABLE_VALUE=value
4266"#
4267 );
4268
4269 let golang_install_wrapper = files
4270 .iter()
4271 .find(|f| {
4272 f.file_name()
4273 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4274 && f.to_str().is_some_and(|s| s.contains("go_"))
4275 })
4276 .expect("to be found");
4277 let golang_install_wrapper = test_dependencies
4278 .fs
4279 .load(golang_install_wrapper)
4280 .await
4281 .unwrap();
4282 assert_eq!(
4283 &golang_install_wrapper,
4284 r#"#!/bin/sh
4285set -e
4286
4287on_exit () {
4288 [ $? -eq 0 ] && exit
4289 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4290}
4291
4292trap on_exit EXIT
4293
4294echo ===========================================================================
4295echo 'Feature : go'
4296echo 'Id : ghcr.io/devcontainers/features/go:1'
4297echo 'Options :'
4298echo ' GOLANGCILINTVERSION=latest
4299 VERSION=latest'
4300echo ===========================================================================
4301
4302set -a
4303. ../devcontainer-features.builtin.env
4304. ./devcontainer-features.env
4305set +a
4306
4307chmod +x ./install.sh
4308./install.sh
4309"#
4310 );
4311
4312 let docker_commands = test_dependencies
4313 .command_runner
4314 .commands_by_program("docker");
4315
4316 let docker_run_command = docker_commands
4317 .iter()
4318 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4319
4320 assert!(docker_run_command.is_some());
4321
4322 let docker_exec_commands = test_dependencies
4323 .docker
4324 .exec_commands_recorded
4325 .lock()
4326 .unwrap();
4327
4328 assert!(docker_exec_commands.iter().all(|exec| {
4329 exec.env
4330 == HashMap::from([
4331 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4332 (
4333 "PATH".to_string(),
4334 "/initial/path:/some/other/path".to_string(),
4335 ),
4336 ])
4337 }))
4338 }
4339
4340 #[cfg(not(target_os = "windows"))]
4341 #[gpui::test]
4342 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4343 cx.executor().allow_parking();
4344 env_logger::try_init().ok();
4345 let given_devcontainer_contents = r#"
4346 {
4347 "name": "cli-${devcontainerId}",
4348 "image": "test_image:latest",
4349 }
4350 "#;
4351
4352 let (test_dependencies, mut devcontainer_manifest) =
4353 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4354 .await
4355 .unwrap();
4356
4357 devcontainer_manifest.parse_nonremote_vars().unwrap();
4358
4359 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4360
4361 let files = test_dependencies.fs.files();
4362 let uid_dockerfile = files
4363 .iter()
4364 .find(|f| {
4365 f.file_name()
4366 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4367 })
4368 .expect("to be found");
4369 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4370
4371 assert_eq!(
4372 &uid_dockerfile,
4373 r#"ARG BASE_IMAGE
4374FROM $BASE_IMAGE
4375
4376USER root
4377
4378ARG REMOTE_USER
4379ARG NEW_UID
4380ARG NEW_GID
4381SHELL ["/bin/sh", "-c"]
4382RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4383 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4384 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4385 if [ -z "$OLD_UID" ]; then \
4386 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4387 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4388 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4389 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4390 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4391 else \
4392 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4393 FREE_GID=65532; \
4394 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4395 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4396 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4397 fi; \
4398 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4399 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4400 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4401 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4402 fi; \
4403 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4404 fi;
4405
4406ARG IMAGE_USER
4407USER $IMAGE_USER
4408
4409# Ensure that /etc/profile does not clobber the existing path
4410RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4411"#
4412 );
4413 }
4414
4415 #[cfg(not(target_os = "windows"))]
4416 #[gpui::test]
4417 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4418 cx.executor().allow_parking();
4419 env_logger::try_init().ok();
4420 let given_devcontainer_contents = r#"
4421 {
4422 "name": "cli-${devcontainerId}",
4423 "dockerComposeFile": "docker-compose-plain.yml",
4424 "service": "app",
4425 }
4426 "#;
4427
4428 let (test_dependencies, mut devcontainer_manifest) =
4429 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4430 .await
4431 .unwrap();
4432
4433 test_dependencies
4434 .fs
4435 .atomic_write(
4436 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4437 r#"
4438services:
4439 app:
4440 image: test_image:latest
4441 command: sleep infinity
4442 volumes:
4443 - ..:/workspace:cached
4444 "#
4445 .trim()
4446 .to_string(),
4447 )
4448 .await
4449 .unwrap();
4450
4451 devcontainer_manifest.parse_nonremote_vars().unwrap();
4452
4453 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4454
4455 let files = test_dependencies.fs.files();
4456 let uid_dockerfile = files
4457 .iter()
4458 .find(|f| {
4459 f.file_name()
4460 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4461 })
4462 .expect("to be found");
4463 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4464
4465 assert_eq!(
4466 &uid_dockerfile,
4467 r#"ARG BASE_IMAGE
4468FROM $BASE_IMAGE
4469
4470USER root
4471
4472ARG REMOTE_USER
4473ARG NEW_UID
4474ARG NEW_GID
4475SHELL ["/bin/sh", "-c"]
4476RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4477 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4478 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4479 if [ -z "$OLD_UID" ]; then \
4480 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4481 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4482 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4483 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4484 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4485 else \
4486 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4487 FREE_GID=65532; \
4488 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4489 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4490 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4491 fi; \
4492 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4493 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4494 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4495 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4496 fi; \
4497 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4498 fi;
4499
4500ARG IMAGE_USER
4501USER $IMAGE_USER
4502
4503# Ensure that /etc/profile does not clobber the existing path
4504RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4505"#
4506 );
4507 }
4508
4509 pub(crate) struct RecordedExecCommand {
4510 pub(crate) _container_id: String,
4511 pub(crate) _remote_folder: String,
4512 pub(crate) _user: String,
4513 pub(crate) env: HashMap<String, String>,
4514 pub(crate) _inner_command: Command,
4515 }
4516
4517 pub(crate) struct FakeDocker {
4518 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4519 podman: bool,
4520 }
4521
4522 impl FakeDocker {
4523 pub(crate) fn new() -> Self {
4524 Self {
4525 podman: false,
4526 exec_commands_recorded: Mutex::new(Vec::new()),
4527 }
4528 }
4529 #[cfg(not(target_os = "windows"))]
4530 fn set_podman(&mut self, podman: bool) {
4531 self.podman = podman;
4532 }
4533 }
4534
4535 #[async_trait]
4536 impl DockerClient for FakeDocker {
4537 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4538 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4539 return Ok(DockerInspect {
4540 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4541 .to_string(),
4542 config: DockerInspectConfig {
4543 labels: DockerConfigLabels {
4544 metadata: Some(vec![HashMap::from([(
4545 "remoteUser".to_string(),
4546 Value::String("node".to_string()),
4547 )])]),
4548 },
4549 env: Vec::new(),
4550 image_user: Some("root".to_string()),
4551 },
4552 mounts: None,
4553 state: None,
4554 });
4555 }
4556 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4557 return Ok(DockerInspect {
4558 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4559 .to_string(),
4560 config: DockerInspectConfig {
4561 labels: DockerConfigLabels {
4562 metadata: Some(vec![HashMap::from([(
4563 "remoteUser".to_string(),
4564 Value::String("vscode".to_string()),
4565 )])]),
4566 },
4567 image_user: Some("root".to_string()),
4568 env: Vec::new(),
4569 },
4570 mounts: None,
4571 state: None,
4572 });
4573 }
4574 if id.starts_with("cli_") {
4575 return Ok(DockerInspect {
4576 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4577 .to_string(),
4578 config: DockerInspectConfig {
4579 labels: DockerConfigLabels {
4580 metadata: Some(vec![HashMap::from([(
4581 "remoteUser".to_string(),
4582 Value::String("node".to_string()),
4583 )])]),
4584 },
4585 image_user: Some("root".to_string()),
4586 env: vec!["PATH=/initial/path".to_string()],
4587 },
4588 mounts: None,
4589 state: None,
4590 });
4591 }
4592 if id == "found_docker_ps" {
4593 return Ok(DockerInspect {
4594 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4595 .to_string(),
4596 config: DockerInspectConfig {
4597 labels: DockerConfigLabels {
4598 metadata: Some(vec![HashMap::from([(
4599 "remoteUser".to_string(),
4600 Value::String("node".to_string()),
4601 )])]),
4602 },
4603 image_user: Some("root".to_string()),
4604 env: vec!["PATH=/initial/path".to_string()],
4605 },
4606 mounts: Some(vec![DockerInspectMount {
4607 source: "/path/to/local/project".to_string(),
4608 destination: "/workspaces/project".to_string(),
4609 }]),
4610 state: None,
4611 });
4612 }
4613 if id.starts_with("rust_a-") {
4614 return Ok(DockerInspect {
4615 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4616 .to_string(),
4617 config: DockerInspectConfig {
4618 labels: DockerConfigLabels {
4619 metadata: Some(vec![HashMap::from([(
4620 "remoteUser".to_string(),
4621 Value::String("vscode".to_string()),
4622 )])]),
4623 },
4624 image_user: Some("root".to_string()),
4625 env: Vec::new(),
4626 },
4627 mounts: None,
4628 state: None,
4629 });
4630 }
4631 if id == "test_image:latest" {
4632 return Ok(DockerInspect {
4633 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4634 .to_string(),
4635 config: DockerInspectConfig {
4636 labels: DockerConfigLabels {
4637 metadata: Some(vec![HashMap::from([(
4638 "remoteUser".to_string(),
4639 Value::String("node".to_string()),
4640 )])]),
4641 },
4642 env: Vec::new(),
4643 image_user: Some("root".to_string()),
4644 },
4645 mounts: None,
4646 state: None,
4647 });
4648 }
4649
4650 Err(DevContainerError::DockerNotAvailable)
4651 }
4652 async fn get_docker_compose_config(
4653 &self,
4654 config_files: &Vec<PathBuf>,
4655 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4656 if config_files.len() == 1
4657 && config_files.get(0)
4658 == Some(&PathBuf::from(
4659 "/path/to/local/project/.devcontainer/docker-compose.yml",
4660 ))
4661 {
4662 return Ok(Some(DockerComposeConfig {
4663 name: None,
4664 services: HashMap::from([
4665 (
4666 "app".to_string(),
4667 DockerComposeService {
4668 build: Some(DockerComposeServiceBuild {
4669 context: Some(".".to_string()),
4670 dockerfile: Some("Dockerfile".to_string()),
4671 args: None,
4672 additional_contexts: None,
4673 }),
4674 volumes: vec![MountDefinition {
4675 source: Some("../..".to_string()),
4676 target: "/workspaces".to_string(),
4677 mount_type: Some("bind".to_string()),
4678 }],
4679 network_mode: Some("service:db".to_string()),
4680 ..Default::default()
4681 },
4682 ),
4683 (
4684 "db".to_string(),
4685 DockerComposeService {
4686 image: Some("postgres:14.1".to_string()),
4687 volumes: vec![MountDefinition {
4688 source: Some("postgres-data".to_string()),
4689 target: "/var/lib/postgresql/data".to_string(),
4690 mount_type: Some("volume".to_string()),
4691 }],
4692 env_file: Some(vec![".env".to_string()]),
4693 ..Default::default()
4694 },
4695 ),
4696 ]),
4697 volumes: HashMap::from([(
4698 "postgres-data".to_string(),
4699 DockerComposeVolume::default(),
4700 )]),
4701 }));
4702 }
4703 if config_files.len() == 1
4704 && config_files.get(0)
4705 == Some(&PathBuf::from(
4706 "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4707 ))
4708 {
4709 return Ok(Some(DockerComposeConfig {
4710 name: None,
4711 services: HashMap::from([(
4712 "app".to_string(),
4713 DockerComposeService {
4714 image: Some("test_image:latest".to_string()),
4715 command: vec!["sleep".to_string(), "infinity".to_string()],
4716 ..Default::default()
4717 },
4718 )]),
4719 ..Default::default()
4720 }));
4721 }
4722 Err(DevContainerError::DockerNotAvailable)
4723 }
4724 async fn docker_compose_build(
4725 &self,
4726 _config_files: &Vec<PathBuf>,
4727 _project_name: &str,
4728 ) -> Result<(), DevContainerError> {
4729 Ok(())
4730 }
4731 async fn run_docker_exec(
4732 &self,
4733 container_id: &str,
4734 remote_folder: &str,
4735 user: &str,
4736 env: &HashMap<String, String>,
4737 inner_command: Command,
4738 ) -> Result<(), DevContainerError> {
4739 let mut record = self
4740 .exec_commands_recorded
4741 .lock()
4742 .expect("should be available");
4743 record.push(RecordedExecCommand {
4744 _container_id: container_id.to_string(),
4745 _remote_folder: remote_folder.to_string(),
4746 _user: user.to_string(),
4747 env: env.clone(),
4748 _inner_command: inner_command,
4749 });
4750 Ok(())
4751 }
4752 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4753 Err(DevContainerError::DockerNotAvailable)
4754 }
4755 async fn find_process_by_filters(
4756 &self,
4757 _filters: Vec<String>,
4758 ) -> Result<Option<DockerPs>, DevContainerError> {
4759 Ok(Some(DockerPs {
4760 id: "found_docker_ps".to_string(),
4761 }))
4762 }
4763 fn supports_compose_buildkit(&self) -> bool {
4764 !self.podman
4765 }
4766 fn docker_cli(&self) -> String {
4767 if self.podman {
4768 "podman".to_string()
4769 } else {
4770 "docker".to_string()
4771 }
4772 }
4773 }
4774
4775 #[derive(Debug, Clone)]
4776 pub(crate) struct TestCommand {
4777 pub(crate) program: String,
4778 pub(crate) args: Vec<String>,
4779 }
4780
4781 pub(crate) struct TestCommandRunner {
4782 commands_recorded: Mutex<Vec<TestCommand>>,
4783 }
4784
4785 impl TestCommandRunner {
4786 fn new() -> Self {
4787 Self {
4788 commands_recorded: Mutex::new(Vec::new()),
4789 }
4790 }
4791
4792 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4793 let record = self.commands_recorded.lock().expect("poisoned");
4794 record
4795 .iter()
4796 .filter(|r| r.program == program)
4797 .map(|r| r.clone())
4798 .collect()
4799 }
4800 }
4801
4802 #[async_trait]
4803 impl CommandRunner for TestCommandRunner {
4804 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4805 let mut record = self.commands_recorded.lock().expect("poisoned");
4806
4807 record.push(TestCommand {
4808 program: command.get_program().display().to_string(),
4809 args: command
4810 .get_args()
4811 .map(|a| a.display().to_string())
4812 .collect(),
4813 });
4814
4815 Ok(Output {
4816 status: ExitStatus::default(),
4817 stdout: vec![],
4818 stderr: vec![],
4819 })
4820 }
4821 }
4822
4823 fn fake_http_client() -> Arc<dyn HttpClient> {
4824 FakeHttpClient::create(|request| async move {
4825 let (parts, _body) = request.into_parts();
4826 if parts.uri.path() == "/token" {
4827 let token_response = TokenResponse {
4828 token: "token".to_string(),
4829 };
4830 return Ok(http::Response::builder()
4831 .status(200)
4832 .body(http_client::AsyncBody::from(
4833 serde_json_lenient::to_string(&token_response).unwrap(),
4834 ))
4835 .unwrap());
4836 }
4837
4838 // OCI specific things
4839 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4840 let response = r#"
4841 {
4842 "schemaVersion": 2,
4843 "mediaType": "application/vnd.oci.image.manifest.v1+json",
4844 "config": {
4845 "mediaType": "application/vnd.devcontainers",
4846 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4847 "size": 2
4848 },
4849 "layers": [
4850 {
4851 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4852 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4853 "size": 59392,
4854 "annotations": {
4855 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4856 }
4857 }
4858 ],
4859 "annotations": {
4860 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4861 "com.github.package.type": "devcontainer_feature"
4862 }
4863 }
4864 "#;
4865 return Ok(http::Response::builder()
4866 .status(200)
4867 .body(http_client::AsyncBody::from(response))
4868 .unwrap());
4869 }
4870
4871 if parts.uri.path()
4872 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4873 {
4874 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4879 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4880 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4881 ```
4882 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4883 ```
4884 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4885
4886
4887 ## OS Support
4888
4889 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4890
4891 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4892
4893 `bash` is required to execute the `install.sh` script."#),
4894 ("./README.md", r#"
4895 # Docker (Docker-in-Docker) (docker-in-docker)
4896
4897 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4898
4899 ## Example Usage
4900
4901 ```json
4902 "features": {
4903 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4904 }
4905 ```
4906
4907 ## Options
4908
4909 | Options Id | Description | Type | Default Value |
4910 |-----|-----|-----|-----|
4911 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4912 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4913 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4914 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4915 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4916 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4917 | installDockerBuildx | Install Docker Buildx | boolean | true |
4918 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4919 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4920
4921 ## Customizations
4922
4923 ### VS Code Extensions
4924
4925 - `ms-azuretools.vscode-containers`
4926
4927 ## Limitations
4928
4929 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4930 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4931 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4932 ```
4933 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4934 ```
4935 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4936
4937
4938 ## OS Support
4939
4940 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4941
4942 `bash` is required to execute the `install.sh` script.
4943
4944
4945 ---
4946
4947 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
4948 ("./devcontainer-feature.json", r#"
4949 {
4950 "id": "docker-in-docker",
4951 "version": "2.16.1",
4952 "name": "Docker (Docker-in-Docker)",
4953 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4954 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4955 "options": {
4956 "version": {
4957 "type": "string",
4958 "proposals": [
4959 "latest",
4960 "none",
4961 "20.10"
4962 ],
4963 "default": "latest",
4964 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4965 },
4966 "moby": {
4967 "type": "boolean",
4968 "default": true,
4969 "description": "Install OSS Moby build instead of Docker CE"
4970 },
4971 "mobyBuildxVersion": {
4972 "type": "string",
4973 "default": "latest",
4974 "description": "Install a specific version of moby-buildx when using Moby"
4975 },
4976 "dockerDashComposeVersion": {
4977 "type": "string",
4978 "enum": [
4979 "none",
4980 "v1",
4981 "v2"
4982 ],
4983 "default": "v2",
4984 "description": "Default version of Docker Compose (v1, v2 or none)"
4985 },
4986 "azureDnsAutoDetection": {
4987 "type": "boolean",
4988 "default": true,
4989 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4990 },
4991 "dockerDefaultAddressPool": {
4992 "type": "string",
4993 "default": "",
4994 "proposals": [],
4995 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4996 },
4997 "installDockerBuildx": {
4998 "type": "boolean",
4999 "default": true,
5000 "description": "Install Docker Buildx"
5001 },
5002 "installDockerComposeSwitch": {
5003 "type": "boolean",
5004 "default": false,
5005 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5006 },
5007 "disableIp6tables": {
5008 "type": "boolean",
5009 "default": false,
5010 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5011 }
5012 },
5013 "entrypoint": "/usr/local/share/docker-init.sh",
5014 "privileged": true,
5015 "containerEnv": {
5016 "DOCKER_BUILDKIT": "1"
5017 },
5018 "customizations": {
5019 "vscode": {
5020 "extensions": [
5021 "ms-azuretools.vscode-containers"
5022 ],
5023 "settings": {
5024 "github.copilot.chat.codeGeneration.instructions": [
5025 {
5026 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5027 }
5028 ]
5029 }
5030 }
5031 },
5032 "mounts": [
5033 {
5034 "source": "dind-var-lib-docker-${devcontainerId}",
5035 "target": "/var/lib/docker",
5036 "type": "volume"
5037 }
5038 ],
5039 "installsAfter": [
5040 "ghcr.io/devcontainers/features/common-utils"
5041 ]
5042 }"#),
5043 ("./install.sh", r#"
5044 #!/usr/bin/env bash
5045 #-------------------------------------------------------------------------------------------------------------
5046 # Copyright (c) Microsoft Corporation. All rights reserved.
5047 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5048 #-------------------------------------------------------------------------------------------------------------
5049 #
5050 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5051 # Maintainer: The Dev Container spec maintainers
5052
5053
5054 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5055 USE_MOBY="${MOBY:-"true"}"
5056 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5057 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5058 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5059 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5060 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5061 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5062 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5063 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5064 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5065 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5066 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5067 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5068
5069 # Default: Exit on any failure.
5070 set -e
5071
5072 # Clean up
5073 rm -rf /var/lib/apt/lists/*
5074
5075 # Setup STDERR.
5076 err() {
5077 echo "(!) $*" >&2
5078 }
5079
5080 if [ "$(id -u)" -ne 0 ]; then
5081 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5082 exit 1
5083 fi
5084
5085 ###################
5086 # Helper Functions
5087 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5088 ###################
5089
5090 # Determine the appropriate non-root user
5091 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5092 USERNAME=""
5093 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5094 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5095 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5096 USERNAME=${CURRENT_USER}
5097 break
5098 fi
5099 done
5100 if [ "${USERNAME}" = "" ]; then
5101 USERNAME=root
5102 fi
5103 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5104 USERNAME=root
5105 fi
5106
5107 # Package manager update function
5108 pkg_mgr_update() {
5109 case ${ADJUSTED_ID} in
5110 debian)
5111 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5112 echo "Running apt-get update..."
5113 apt-get update -y
5114 fi
5115 ;;
5116 rhel)
5117 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5118 cache_check_dir="/var/cache/yum"
5119 else
5120 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5121 fi
5122 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5123 echo "Running ${PKG_MGR_CMD} makecache ..."
5124 ${PKG_MGR_CMD} makecache
5125 fi
5126 ;;
5127 esac
5128 }
5129
5130 # Checks if packages are installed and installs them if not
5131 check_packages() {
5132 case ${ADJUSTED_ID} in
5133 debian)
5134 if ! dpkg -s "$@" > /dev/null 2>&1; then
5135 pkg_mgr_update
5136 apt-get -y install --no-install-recommends "$@"
5137 fi
5138 ;;
5139 rhel)
5140 if ! rpm -q "$@" > /dev/null 2>&1; then
5141 pkg_mgr_update
5142 ${PKG_MGR_CMD} -y install "$@"
5143 fi
5144 ;;
5145 esac
5146 }
5147
5148 # Figure out correct version of a three part version number is not passed
5149 find_version_from_git_tags() {
5150 local variable_name=$1
5151 local requested_version=${!variable_name}
5152 if [ "${requested_version}" = "none" ]; then return; fi
5153 local repository=$2
5154 local prefix=${3:-"tags/v"}
5155 local separator=${4:-"."}
5156 local last_part_optional=${5:-"false"}
5157 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5158 local escaped_separator=${separator//./\\.}
5159 local last_part
5160 if [ "${last_part_optional}" = "true" ]; then
5161 last_part="(${escaped_separator}[0-9]+)?"
5162 else
5163 last_part="${escaped_separator}[0-9]+"
5164 fi
5165 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5166 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5167 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5168 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5169 else
5170 set +e
5171 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5172 set -e
5173 fi
5174 fi
5175 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5176 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5177 exit 1
5178 fi
5179 echo "${variable_name}=${!variable_name}"
5180 }
5181
5182 # Use semver logic to decrement a version number then look for the closest match
5183 find_prev_version_from_git_tags() {
5184 local variable_name=$1
5185 local current_version=${!variable_name}
5186 local repository=$2
5187 # Normally a "v" is used before the version number, but support alternate cases
5188 local prefix=${3:-"tags/v"}
5189 # Some repositories use "_" instead of "." for version number part separation, support that
5190 local separator=${4:-"."}
5191 # Some tools release versions that omit the last digit (e.g. go)
5192 local last_part_optional=${5:-"false"}
5193 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5194 local version_suffix_regex=$6
5195 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5196 set +e
5197 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5198 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5199 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5200
5201 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5202 ((major=major-1))
5203 declare -g ${variable_name}="${major}"
5204 # Look for latest version from previous major release
5205 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5206 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5207 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5208 ((minor=minor-1))
5209 declare -g ${variable_name}="${major}.${minor}"
5210 # Look for latest version from previous minor release
5211 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5212 else
5213 ((breakfix=breakfix-1))
5214 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5215 declare -g ${variable_name}="${major}.${minor}"
5216 else
5217 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5218 fi
5219 fi
5220 set -e
5221 }
5222
5223 # Function to fetch the version released prior to the latest version
5224 get_previous_version() {
5225 local url=$1
5226 local repo_url=$2
5227 local variable_name=$3
5228 prev_version=${!variable_name}
5229
5230 output=$(curl -s "$repo_url");
5231 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5232 message=$(echo "$output" | jq -r '.message')
5233
5234 if [[ $message == "API rate limit exceeded"* ]]; then
5235 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5236 echo -e "\nAttempting to find latest version using GitHub tags."
5237 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5238 declare -g ${variable_name}="${prev_version}"
5239 fi
5240 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5241 echo -e "\nAttempting to find latest version using GitHub Api."
5242 version=$(echo "$output" | jq -r '.[1].tag_name')
5243 declare -g ${variable_name}="${version#v}"
5244 fi
5245 echo "${variable_name}=${!variable_name}"
5246 }
5247
5248 get_github_api_repo_url() {
5249 local url=$1
5250 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5251 }
5252
5253 ###########################################
5254 # Start docker-in-docker installation
5255 ###########################################
5256
5257 # Ensure apt is in non-interactive to avoid prompts
5258 export DEBIAN_FRONTEND=noninteractive
5259
5260 # Source /etc/os-release to get OS info
5261 . /etc/os-release
5262
5263 # Determine adjusted ID and package manager
5264 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5265 ADJUSTED_ID="debian"
5266 PKG_MGR_CMD="apt-get"
5267 # Use dpkg for Debian-based systems
5268 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5269 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5270 ADJUSTED_ID="rhel"
5271 # Determine the appropriate package manager for RHEL-based systems
5272 for pkg_mgr in tdnf dnf microdnf yum; do
5273 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5274 PKG_MGR_CMD="$pkg_mgr"
5275 break
5276 fi
5277 done
5278
5279 if [ -z "${PKG_MGR_CMD}" ]; then
5280 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5281 exit 1
5282 fi
5283
5284 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5285 else
5286 err "Linux distro ${ID} not supported."
5287 exit 1
5288 fi
5289
5290 # Azure Linux specific setup
5291 if [ "${ID}" = "azurelinux" ]; then
5292 VERSION_CODENAME="azurelinux${VERSION_ID}"
5293 fi
5294
5295 # Prevent attempting to install Moby on Debian trixie (packages removed)
5296 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5297 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5298 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5299 exit 1
5300 fi
5301
5302 # Check if distro is supported
5303 if [ "${USE_MOBY}" = "true" ]; then
5304 if [ "${ADJUSTED_ID}" = "debian" ]; then
5305 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5306 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5307 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5308 exit 1
5309 fi
5310 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5311 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5312 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5313 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5314 else
5315 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5316 fi
5317 fi
5318 else
5319 if [ "${ADJUSTED_ID}" = "debian" ]; then
5320 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5321 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5322 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5323 exit 1
5324 fi
5325 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5326 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5327
5328 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5329 fi
5330 fi
5331
5332 # Install base dependencies
5333 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5334 case ${ADJUSTED_ID} in
5335 debian)
5336 check_packages apt-transport-https $base_packages dirmngr
5337 ;;
5338 rhel)
5339 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5340
5341 ;;
5342 esac
5343
5344 # Install git if not already present
5345 if ! command -v git >/dev/null 2>&1; then
5346 check_packages git
5347 fi
5348
5349 # Update CA certificates to ensure HTTPS connections work properly
5350 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5351 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5352 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5353 update-ca-certificates
5354 fi
5355
5356 # Swap to legacy iptables for compatibility (Debian only)
5357 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5358 update-alternatives --set iptables /usr/sbin/iptables-legacy
5359 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5360 fi
5361
5362 # Set up the necessary repositories
5363 if [ "${USE_MOBY}" = "true" ]; then
5364 # Name of open source engine/cli
5365 engine_package_name="moby-engine"
5366 cli_package_name="moby-cli"
5367
5368 case ${ADJUSTED_ID} in
5369 debian)
5370 # Import key safely and import Microsoft apt repo
5371 {
5372 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5373 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5374 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5375 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5376 ;;
5377 rhel)
5378 echo "(*) ${ID} detected - checking for Moby packages..."
5379
5380 # Check if moby packages are available in default repos
5381 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5382 echo "(*) Using built-in ${ID} Moby packages"
5383 else
5384 case "${ID}" in
5385 azurelinux)
5386 echo "(*) Moby packages not found in Azure Linux repositories"
5387 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5388 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5389 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5390 exit 1
5391 ;;
5392 mariner)
5393 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5394 # Add Microsoft repository if packages aren't available locally
5395 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5396 cat > /etc/yum.repos.d/microsoft.repo << EOF
5397 [microsoft]
5398 name=Microsoft Repository
5399 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5400 enabled=1
5401 gpgcheck=1
5402 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5403 EOF
5404 # Verify packages are available after adding repo
5405 pkg_mgr_update
5406 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5407 echo "(*) Moby packages not found in Microsoft repository either"
5408 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5409 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5410 exit 1
5411 fi
5412 ;;
5413 *)
5414 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5415 exit 1
5416 ;;
5417 esac
5418 fi
5419 ;;
5420 esac
5421 else
5422 # Name of licensed engine/cli
5423 engine_package_name="docker-ce"
5424 cli_package_name="docker-ce-cli"
5425 case ${ADJUSTED_ID} in
5426 debian)
5427 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5428 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5429 ;;
5430 rhel)
5431 # Docker CE repository setup for RHEL-based systems
5432 setup_docker_ce_repo() {
5433 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5434 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5435 [docker-ce-stable]
5436 name=Docker CE Stable
5437 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5438 enabled=1
5439 gpgcheck=1
5440 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5441 skip_if_unavailable=1
5442 module_hotfixes=1
5443 EOF
5444 }
5445 install_azure_linux_deps() {
5446 echo "(*) Installing device-mapper libraries for Docker CE..."
5447 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5448 echo "(*) Installing additional Docker CE dependencies..."
5449 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5450 echo "(*) Some optional dependencies could not be installed, continuing..."
5451 }
5452 }
5453 setup_selinux_context() {
5454 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5455 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5456 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5457 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5458 fi
5459 }
5460
5461 # Special handling for RHEL Docker CE installation
5462 case "${ID}" in
5463 azurelinux|mariner)
5464 echo "(*) ${ID} detected"
5465 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5466 echo "(*) Setting up Docker CE repository..."
5467
5468 setup_docker_ce_repo
5469 install_azure_linux_deps
5470
5471 if [ "${USE_MOBY}" != "true" ]; then
5472 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5473 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5474 setup_selinux_context
5475 else
5476 echo "(*) Using Moby - container-selinux not required"
5477 fi
5478 ;;
5479 *)
5480 # Standard RHEL/CentOS/Fedora approach
5481 if command -v dnf >/dev/null 2>&1; then
5482 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5483 elif command -v yum-config-manager >/dev/null 2>&1; then
5484 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5485 else
5486 # Manual fallback
5487 setup_docker_ce_repo
5488 fi
5489 ;;
5490 esac
5491 ;;
5492 esac
5493 fi
5494
5495 # Refresh package database
5496 case ${ADJUSTED_ID} in
5497 debian)
5498 apt-get update
5499 ;;
5500 rhel)
5501 pkg_mgr_update
5502 ;;
5503 esac
5504
5505 # Soft version matching
5506 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5507 # Empty, meaning grab whatever "latest" is in apt repo
5508 engine_version_suffix=""
5509 cli_version_suffix=""
5510 else
5511 case ${ADJUSTED_ID} in
5512 debian)
5513 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5514 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5515 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5516 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5517 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5518 set +e # Don't exit if finding version fails - will handle gracefully
5519 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5520 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5521 set -e
5522 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5523 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5524 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5525 exit 1
5526 fi
5527 ;;
5528 rhel)
5529 # For RHEL-based systems, use dnf/yum to find versions
5530 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5531 set +e # Don't exit if finding version fails - will handle gracefully
5532 if [ "${USE_MOBY}" = "true" ]; then
5533 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5534 else
5535 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5536 fi
5537 set -e
5538 if [ -n "${available_versions}" ]; then
5539 engine_version_suffix="-${available_versions}"
5540 cli_version_suffix="-${available_versions}"
5541 else
5542 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5543 engine_version_suffix=""
5544 cli_version_suffix=""
5545 fi
5546 ;;
5547 esac
5548 fi
5549
5550 # Version matching for moby-buildx
5551 if [ "${USE_MOBY}" = "true" ]; then
5552 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5553 # Empty, meaning grab whatever "latest" is in apt repo
5554 buildx_version_suffix=""
5555 else
5556 case ${ADJUSTED_ID} in
5557 debian)
5558 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5559 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5560 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5561 set +e
5562 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5563 set -e
5564 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5565 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5566 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5567 exit 1
5568 fi
5569 ;;
5570 rhel)
5571 # For RHEL-based systems, try to find buildx version or use latest
5572 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5573 set +e
5574 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5575 set -e
5576 if [ -n "${available_buildx}" ]; then
5577 buildx_version_suffix="-${available_buildx}"
5578 else
5579 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5580 buildx_version_suffix=""
5581 fi
5582 ;;
5583 esac
5584 echo "buildx_version_suffix ${buildx_version_suffix}"
5585 fi
5586 fi
5587
5588 # Install Docker / Moby CLI if not already installed
5589 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5590 echo "Docker / Moby CLI and Engine already installed."
5591 else
5592 case ${ADJUSTED_ID} in
5593 debian)
5594 if [ "${USE_MOBY}" = "true" ]; then
5595 # Install engine
5596 set +e # Handle error gracefully
5597 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5598 exit_code=$?
5599 set -e
5600
5601 if [ ${exit_code} -ne 0 ]; then
5602 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5603 exit 1
5604 fi
5605
5606 # Install compose
5607 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5608 else
5609 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5610 # Install compose
5611 apt-mark hold docker-ce docker-ce-cli
5612 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5613 fi
5614 ;;
5615 rhel)
5616 if [ "${USE_MOBY}" = "true" ]; then
5617 set +e # Handle error gracefully
5618 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5619 exit_code=$?
5620 set -e
5621
5622 if [ ${exit_code} -ne 0 ]; then
5623 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5624 exit 1
5625 fi
5626
5627 # Install compose
5628 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5629 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5630 fi
5631 else
5632 # Special handling for Azure Linux Docker CE installation
5633 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5634 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5635
5636 # Use rpm with --force and --nodeps for Azure Linux
5637 set +e # Don't exit on error for this section
5638 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5639 install_result=$?
5640 set -e
5641
5642 if [ $install_result -ne 0 ]; then
5643 echo "(*) Standard installation failed, trying manual installation..."
5644
5645 echo "(*) Standard installation failed, trying manual installation..."
5646
5647 # Create directory for downloading packages
5648 mkdir -p /tmp/docker-ce-install
5649
5650 # Download packages manually using curl since tdnf doesn't support download
5651 echo "(*) Downloading Docker CE packages manually..."
5652
5653 # Get the repository baseurl
5654 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5655
5656 # Download packages directly
5657 cd /tmp/docker-ce-install
5658
5659 # Get package names with versions
5660 if [ -n "${cli_version_suffix}" ]; then
5661 docker_ce_version="${cli_version_suffix#-}"
5662 docker_cli_version="${engine_version_suffix#-}"
5663 else
5664 # Get latest version from repository
5665 docker_ce_version="latest"
5666 fi
5667
5668 echo "(*) Attempting to download Docker CE packages from repository..."
5669
5670 # Try to download latest packages if specific version fails
5671 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5672 # Fallback: try to get latest available version
5673 echo "(*) Specific version not found, trying latest..."
5674 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5675 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5676 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5677
5678 if [ -n "${latest_docker}" ]; then
5679 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5680 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5681 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5682 else
5683 echo "(*) ERROR: Could not find Docker CE packages in repository"
5684 echo "(*) Please check repository configuration or use 'moby': true"
5685 exit 1
5686 fi
5687 fi
5688 # Install systemd libraries required by Docker CE
5689 echo "(*) Installing systemd libraries required by Docker CE..."
5690 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5691 echo "(*) WARNING: Could not install systemd libraries"
5692 echo "(*) Docker may fail to start without these"
5693 }
5694
5695 # Install with rpm --force --nodeps
5696 echo "(*) Installing Docker CE packages with dependency override..."
5697 rpm -Uvh --force --nodeps *.rpm
5698
5699 # Cleanup
5700 cd /
5701 rm -rf /tmp/docker-ce-install
5702
5703 echo "(*) Docker CE installation completed with dependency bypass"
5704 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5705 fi
5706 else
5707 # Standard installation for other RHEL-based systems
5708 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5709 fi
5710 # Install compose
5711 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5712 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5713 fi
5714 fi
5715 ;;
5716 esac
5717 fi
5718
5719 echo "Finished installing docker / moby!"
5720
5721 docker_home="/usr/libexec/docker"
5722 cli_plugins_dir="${docker_home}/cli-plugins"
5723
5724 # fallback for docker-compose
5725 fallback_compose(){
5726 local url=$1
5727 local repo_url=$(get_github_api_repo_url "$url")
5728 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5729 get_previous_version "${url}" "${repo_url}" compose_version
5730 echo -e "\nAttempting to install v${compose_version}"
5731 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5732 }
5733
5734 # If 'docker-compose' command is to be included
5735 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5736 case "${architecture}" in
5737 amd64|x86_64) target_compose_arch=x86_64 ;;
5738 arm64|aarch64) target_compose_arch=aarch64 ;;
5739 *)
5740 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5741 exit 1
5742 esac
5743
5744 docker_compose_path="/usr/local/bin/docker-compose"
5745 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5746 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5747 INSTALL_DOCKER_COMPOSE_SWITCH="false"
5748
5749 if [ "${target_compose_arch}" = "x86_64" ]; then
5750 echo "(*) Installing docker compose v1..."
5751 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5752 chmod +x ${docker_compose_path}
5753
5754 # Download the SHA256 checksum
5755 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5756 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5757 sha256sum -c docker-compose.sha256sum --ignore-missing
5758 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5759 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5760 exit 1
5761 else
5762 # Use pip to get a version that runs on this architecture
5763 check_packages python3-minimal python3-pip libffi-dev python3-venv
5764 echo "(*) Installing docker compose v1 via pip..."
5765 export PYTHONUSERBASE=/usr/local
5766 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5767 fi
5768 else
5769 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5770 docker_compose_url="https://github.com/docker/compose"
5771 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5772 echo "(*) Installing docker-compose ${compose_version}..."
5773 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5774 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5775 fallback_compose "$docker_compose_url"
5776 }
5777
5778 chmod +x ${docker_compose_path}
5779
5780 # Download the SHA256 checksum
5781 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5782 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5783 sha256sum -c docker-compose.sha256sum --ignore-missing
5784
5785 mkdir -p ${cli_plugins_dir}
5786 cp ${docker_compose_path} ${cli_plugins_dir}
5787 fi
5788 fi
5789
5790 # fallback method for compose-switch
5791 fallback_compose-switch() {
5792 local url=$1
5793 local repo_url=$(get_github_api_repo_url "$url")
5794 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5795 get_previous_version "$url" "$repo_url" compose_switch_version
5796 echo -e "\nAttempting to install v${compose_switch_version}"
5797 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5798 }
5799 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5800 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5801 if type docker-compose > /dev/null 2>&1; then
5802 echo "(*) Installing compose-switch..."
5803 current_compose_path="$(command -v docker-compose)"
5804 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5805 compose_switch_version="latest"
5806 compose_switch_url="https://github.com/docker/compose-switch"
5807 # Try to get latest version, fallback to known stable version if GitHub API fails
5808 set +e
5809 find_version_from_git_tags compose_switch_version "$compose_switch_url"
5810 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5811 echo "(*) GitHub API rate limited or failed, using fallback method"
5812 fallback_compose-switch "$compose_switch_url"
5813 fi
5814 set -e
5815
5816 # Map architecture for compose-switch downloads
5817 case "${architecture}" in
5818 amd64|x86_64) target_switch_arch=amd64 ;;
5819 arm64|aarch64) target_switch_arch=arm64 ;;
5820 *) target_switch_arch=${architecture} ;;
5821 esac
5822 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5823 chmod +x /usr/local/bin/compose-switch
5824 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5825 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5826 mv "${current_compose_path}" "${target_compose_path}"
5827 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5828 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5829 else
5830 err "Skipping installation of compose-switch as docker compose is unavailable..."
5831 fi
5832 fi
5833
5834 # If init file already exists, exit
5835 if [ -f "/usr/local/share/docker-init.sh" ]; then
5836 echo "/usr/local/share/docker-init.sh already exists, so exiting."
5837 # Clean up
5838 rm -rf /var/lib/apt/lists/*
5839 exit 0
5840 fi
5841 echo "docker-init doesn't exist, adding..."
5842
5843 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5844 groupadd -r docker
5845 fi
5846
5847 usermod -aG docker ${USERNAME}
5848
5849 # fallback for docker/buildx
5850 fallback_buildx() {
5851 local url=$1
5852 local repo_url=$(get_github_api_repo_url "$url")
5853 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5854 get_previous_version "$url" "$repo_url" buildx_version
5855 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5856 echo -e "\nAttempting to install v${buildx_version}"
5857 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5858 }
5859
5860 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5861 buildx_version="latest"
5862 docker_buildx_url="https://github.com/docker/buildx"
5863 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5864 echo "(*) Installing buildx ${buildx_version}..."
5865
5866 # Map architecture for buildx downloads
5867 case "${architecture}" in
5868 amd64|x86_64) target_buildx_arch=amd64 ;;
5869 arm64|aarch64) target_buildx_arch=arm64 ;;
5870 *) target_buildx_arch=${architecture} ;;
5871 esac
5872
5873 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5874
5875 cd /tmp
5876 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5877
5878 docker_home="/usr/libexec/docker"
5879 cli_plugins_dir="${docker_home}/cli-plugins"
5880
5881 mkdir -p ${cli_plugins_dir}
5882 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5883 chmod +x ${cli_plugins_dir}/docker-buildx
5884
5885 chown -R "${USERNAME}:docker" "${docker_home}"
5886 chmod -R g+r+w "${docker_home}"
5887 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5888 fi
5889
5890 DOCKER_DEFAULT_IP6_TABLES=""
5891 if [ "$DISABLE_IP6_TABLES" == true ]; then
5892 requested_version=""
5893 # checking whether the version requested either is in semver format or just a number denoting the major version
5894 # and, extracting the major version number out of the two scenarios
5895 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5896 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5897 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5898 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5899 requested_version=$DOCKER_VERSION
5900 fi
5901 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5902 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5903 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5904 fi
5905 fi
5906
5907 if [ ! -d /usr/local/share ]; then
5908 mkdir -p /usr/local/share
5909 fi
5910
5911 tee /usr/local/share/docker-init.sh > /dev/null \
5912 << EOF
5913 #!/bin/sh
5914 #-------------------------------------------------------------------------------------------------------------
5915 # Copyright (c) Microsoft Corporation. All rights reserved.
5916 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5917 #-------------------------------------------------------------------------------------------------------------
5918
5919 set -e
5920
5921 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5922 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5923 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5924 EOF
5925
5926 tee -a /usr/local/share/docker-init.sh > /dev/null \
5927 << 'EOF'
5928 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5929 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5930 find /run /var/run -iname 'docker*.pid' -delete || :
5931 find /run /var/run -iname 'container*.pid' -delete || :
5932
5933 # -- Start: dind wrapper script --
5934 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5935
5936 export container=docker
5937
5938 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5939 mount -t securityfs none /sys/kernel/security || {
5940 echo >&2 'Could not mount /sys/kernel/security.'
5941 echo >&2 'AppArmor detection and --privileged mode might break.'
5942 }
5943 fi
5944
5945 # Mount /tmp (conditionally)
5946 if ! mountpoint -q /tmp; then
5947 mount -t tmpfs none /tmp
5948 fi
5949
5950 set_cgroup_nesting()
5951 {
5952 # cgroup v2: enable nesting
5953 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5954 # move the processes from the root group to the /init group,
5955 # otherwise writing subtree_control fails with EBUSY.
5956 # An error during moving non-existent process (i.e., "cat") is ignored.
5957 mkdir -p /sys/fs/cgroup/init
5958 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5959 # enable controllers
5960 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5961 > /sys/fs/cgroup/cgroup.subtree_control
5962 fi
5963 }
5964
5965 # Set cgroup nesting, retrying if necessary
5966 retry_cgroup_nesting=0
5967
5968 until [ "${retry_cgroup_nesting}" -eq "5" ];
5969 do
5970 set +e
5971 set_cgroup_nesting
5972
5973 if [ $? -ne 0 ]; then
5974 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5975 else
5976 break
5977 fi
5978
5979 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5980 set -e
5981 done
5982
5983 # -- End: dind wrapper script --
5984
5985 # Handle DNS
5986 set +e
5987 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5988 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5989 then
5990 echo "Setting dockerd Azure DNS."
5991 CUSTOMDNS="--dns 168.63.129.16"
5992 else
5993 echo "Not setting dockerd DNS manually."
5994 CUSTOMDNS=""
5995 fi
5996 set -e
5997
5998 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5999 then
6000 DEFAULT_ADDRESS_POOL=""
6001 else
6002 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6003 fi
6004
6005 # Start docker/moby engine
6006 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6007 INNEREOF
6008 )"
6009
6010 sudo_if() {
6011 COMMAND="$*"
6012
6013 if [ "$(id -u)" -ne 0 ]; then
6014 sudo $COMMAND
6015 else
6016 $COMMAND
6017 fi
6018 }
6019
6020 retry_docker_start_count=0
6021 docker_ok="false"
6022
6023 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6024 do
6025 # Start using sudo if not invoked as root
6026 if [ "$(id -u)" -ne 0 ]; then
6027 sudo /bin/sh -c "${dockerd_start}"
6028 else
6029 eval "${dockerd_start}"
6030 fi
6031
6032 retry_count=0
6033 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6034 do
6035 sleep 1s
6036 set +e
6037 docker info > /dev/null 2>&1 && docker_ok="true"
6038 set -e
6039
6040 retry_count=`expr $retry_count + 1`
6041 done
6042
6043 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6044 echo "(*) Failed to start docker, retrying..."
6045 set +e
6046 sudo_if pkill dockerd
6047 sudo_if pkill containerd
6048 set -e
6049 fi
6050
6051 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6052 done
6053
6054 # Execute whatever commands were passed in (if any). This allows us
6055 # to set this script to ENTRYPOINT while still executing the default CMD.
6056 exec "$@"
6057 EOF
6058
6059 chmod +x /usr/local/share/docker-init.sh
6060 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6061
6062 # Clean up
6063 rm -rf /var/lib/apt/lists/*
6064
6065 echo 'docker-in-docker-debian script has completed!'"#),
6066 ]).await;
6067
6068 return Ok(http::Response::builder()
6069 .status(200)
6070 .body(AsyncBody::from(response))
6071 .unwrap());
6072 }
6073 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6074 let response = r#"
6075 {
6076 "schemaVersion": 2,
6077 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6078 "config": {
6079 "mediaType": "application/vnd.devcontainers",
6080 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6081 "size": 2
6082 },
6083 "layers": [
6084 {
6085 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6086 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6087 "size": 20992,
6088 "annotations": {
6089 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6090 }
6091 }
6092 ],
6093 "annotations": {
6094 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6095 "com.github.package.type": "devcontainer_feature"
6096 }
6097 }
6098 "#;
6099
6100 return Ok(http::Response::builder()
6101 .status(200)
6102 .body(http_client::AsyncBody::from(response))
6103 .unwrap());
6104 }
6105 if parts.uri.path()
6106 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6107 {
6108 let response = build_tarball(vec![
6109 ("./devcontainer-feature.json", r#"
6110 {
6111 "id": "go",
6112 "version": "1.3.3",
6113 "name": "Go",
6114 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6115 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6116 "options": {
6117 "version": {
6118 "type": "string",
6119 "proposals": [
6120 "latest",
6121 "none",
6122 "1.24",
6123 "1.23"
6124 ],
6125 "default": "latest",
6126 "description": "Select or enter a Go version to install"
6127 },
6128 "golangciLintVersion": {
6129 "type": "string",
6130 "default": "latest",
6131 "description": "Version of golangci-lint to install"
6132 }
6133 },
6134 "init": true,
6135 "customizations": {
6136 "vscode": {
6137 "extensions": [
6138 "golang.Go"
6139 ],
6140 "settings": {
6141 "github.copilot.chat.codeGeneration.instructions": [
6142 {
6143 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6144 }
6145 ]
6146 }
6147 }
6148 },
6149 "containerEnv": {
6150 "GOROOT": "/usr/local/go",
6151 "GOPATH": "/go",
6152 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6153 },
6154 "capAdd": [
6155 "SYS_PTRACE"
6156 ],
6157 "securityOpt": [
6158 "seccomp=unconfined"
6159 ],
6160 "installsAfter": [
6161 "ghcr.io/devcontainers/features/common-utils"
6162 ]
6163 }
6164 "#),
6165 ("./install.sh", r#"
6166 #!/usr/bin/env bash
6167 #-------------------------------------------------------------------------------------------------------------
6168 # Copyright (c) Microsoft Corporation. All rights reserved.
6169 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6170 #-------------------------------------------------------------------------------------------------------------
6171 #
6172 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6173 # Maintainer: The VS Code and Codespaces Teams
6174
6175 TARGET_GO_VERSION="${VERSION:-"latest"}"
6176 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6177
6178 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6179 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6180 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6181 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6182
6183 # https://www.google.com/linuxrepositories/
6184 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6185
6186 set -e
6187
6188 if [ "$(id -u)" -ne 0 ]; then
6189 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6190 exit 1
6191 fi
6192
6193 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6194 . /etc/os-release
6195 # Get an adjusted ID independent of distro variants
6196 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6197 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6198 ADJUSTED_ID="debian"
6199 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6200 ADJUSTED_ID="rhel"
6201 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6202 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6203 else
6204 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6205 fi
6206 else
6207 echo "Linux distro ${ID} not supported."
6208 exit 1
6209 fi
6210
6211 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6212 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6213 # Update the repo files to reference vault.centos.org.
6214 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6215 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6216 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6217 fi
6218
6219 # Setup INSTALL_CMD & PKG_MGR_CMD
6220 if type apt-get > /dev/null 2>&1; then
6221 PKG_MGR_CMD=apt-get
6222 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6223 elif type microdnf > /dev/null 2>&1; then
6224 PKG_MGR_CMD=microdnf
6225 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6226 elif type dnf > /dev/null 2>&1; then
6227 PKG_MGR_CMD=dnf
6228 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6229 else
6230 PKG_MGR_CMD=yum
6231 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6232 fi
6233
6234 # Clean up
6235 clean_up() {
6236 case ${ADJUSTED_ID} in
6237 debian)
6238 rm -rf /var/lib/apt/lists/*
6239 ;;
6240 rhel)
6241 rm -rf /var/cache/dnf/* /var/cache/yum/*
6242 rm -rf /tmp/yum.log
6243 rm -rf ${GPG_INSTALL_PATH}
6244 ;;
6245 esac
6246 }
6247 clean_up
6248
6249
6250 # Figure out correct version of a three part version number is not passed
6251 find_version_from_git_tags() {
6252 local variable_name=$1
6253 local requested_version=${!variable_name}
6254 if [ "${requested_version}" = "none" ]; then return; fi
6255 local repository=$2
6256 local prefix=${3:-"tags/v"}
6257 local separator=${4:-"."}
6258 local last_part_optional=${5:-"false"}
6259 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6260 local escaped_separator=${separator//./\\.}
6261 local last_part
6262 if [ "${last_part_optional}" = "true" ]; then
6263 last_part="(${escaped_separator}[0-9]+)?"
6264 else
6265 last_part="${escaped_separator}[0-9]+"
6266 fi
6267 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6268 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6269 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6270 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6271 else
6272 set +e
6273 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6274 set -e
6275 fi
6276 fi
6277 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6278 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6279 exit 1
6280 fi
6281 echo "${variable_name}=${!variable_name}"
6282 }
6283
6284 pkg_mgr_update() {
6285 case $ADJUSTED_ID in
6286 debian)
6287 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6288 echo "Running apt-get update..."
6289 ${PKG_MGR_CMD} update -y
6290 fi
6291 ;;
6292 rhel)
6293 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6294 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6295 echo "Running ${PKG_MGR_CMD} makecache ..."
6296 ${PKG_MGR_CMD} makecache
6297 fi
6298 else
6299 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6300 echo "Running ${PKG_MGR_CMD} check-update ..."
6301 set +e
6302 ${PKG_MGR_CMD} check-update
6303 rc=$?
6304 if [ $rc != 0 ] && [ $rc != 100 ]; then
6305 exit 1
6306 fi
6307 set -e
6308 fi
6309 fi
6310 ;;
6311 esac
6312 }
6313
6314 # Checks if packages are installed and installs them if not
6315 check_packages() {
6316 case ${ADJUSTED_ID} in
6317 debian)
6318 if ! dpkg -s "$@" > /dev/null 2>&1; then
6319 pkg_mgr_update
6320 ${INSTALL_CMD} "$@"
6321 fi
6322 ;;
6323 rhel)
6324 if ! rpm -q "$@" > /dev/null 2>&1; then
6325 pkg_mgr_update
6326 ${INSTALL_CMD} "$@"
6327 fi
6328 ;;
6329 esac
6330 }
6331
6332 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6333 rm -f /etc/profile.d/00-restore-env.sh
6334 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6335 chmod +x /etc/profile.d/00-restore-env.sh
6336
6337 # Some distributions do not install awk by default (e.g. Mariner)
6338 if ! type awk >/dev/null 2>&1; then
6339 check_packages awk
6340 fi
6341
6342 # Determine the appropriate non-root user
6343 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6344 USERNAME=""
6345 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6346 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6347 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6348 USERNAME=${CURRENT_USER}
6349 break
6350 fi
6351 done
6352 if [ "${USERNAME}" = "" ]; then
6353 USERNAME=root
6354 fi
6355 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6356 USERNAME=root
6357 fi
6358
6359 export DEBIAN_FRONTEND=noninteractive
6360
6361 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6362
6363 if [ $ADJUSTED_ID = "debian" ]; then
6364 check_packages g++ libc6-dev
6365 else
6366 check_packages gcc-c++ glibc-devel
6367 fi
6368 # Install curl, git, other dependencies if missing
6369 if ! type curl > /dev/null 2>&1; then
6370 check_packages curl
6371 fi
6372 if ! type git > /dev/null 2>&1; then
6373 check_packages git
6374 fi
6375 # Some systems, e.g. Mariner, still a few more packages
6376 if ! type as > /dev/null 2>&1; then
6377 check_packages binutils
6378 fi
6379 if ! [ -f /usr/include/linux/errno.h ]; then
6380 check_packages kernel-headers
6381 fi
6382 # Minimal RHEL install may need findutils installed
6383 if ! [ -f /usr/bin/find ]; then
6384 check_packages findutils
6385 fi
6386
6387 # Get closest match for version number specified
6388 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6389
6390 architecture="$(uname -m)"
6391 case $architecture in
6392 x86_64) architecture="amd64";;
6393 aarch64 | armv8*) architecture="arm64";;
6394 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6395 i?86) architecture="386";;
6396 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6397 esac
6398
6399 # Install Go
6400 umask 0002
6401 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6402 groupadd -r golang
6403 fi
6404 usermod -a -G golang "${USERNAME}"
6405 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6406
6407 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6408 # Use a temporary location for gpg keys to avoid polluting image
6409 export GNUPGHOME="/tmp/tmp-gnupg"
6410 mkdir -p ${GNUPGHOME}
6411 chmod 700 ${GNUPGHOME}
6412 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6413 gpg -q --import /tmp/tmp-gnupg/golang_key
6414 echo "Downloading Go ${TARGET_GO_VERSION}..."
6415 set +e
6416 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6417 exit_code=$?
6418 set -e
6419 if [ "$exit_code" != "0" ]; then
6420 echo "(!) Download failed."
6421 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6422 set +e
6423 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6424 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6425 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6426 # Handle Go's odd version pattern where "0" releases omit the last part
6427 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6428 ((minor=minor-1))
6429 TARGET_GO_VERSION="${major}.${minor}"
6430 # Look for latest version from previous minor release
6431 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6432 else
6433 ((breakfix=breakfix-1))
6434 if [ "${breakfix}" = "0" ]; then
6435 TARGET_GO_VERSION="${major}.${minor}"
6436 else
6437 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6438 fi
6439 fi
6440 set -e
6441 echo "Trying ${TARGET_GO_VERSION}..."
6442 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6443 fi
6444 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6445 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6446 echo "Extracting Go ${TARGET_GO_VERSION}..."
6447 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6448 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6449 else
6450 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6451 fi
6452
6453 # Install Go tools that are isImportant && !replacedByGopls based on
6454 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6455 GO_TOOLS="\
6456 golang.org/x/tools/gopls@latest \
6457 honnef.co/go/tools/cmd/staticcheck@latest \
6458 golang.org/x/lint/golint@latest \
6459 github.com/mgechev/revive@latest \
6460 github.com/go-delve/delve/cmd/dlv@latest \
6461 github.com/fatih/gomodifytags@latest \
6462 github.com/haya14busa/goplay/cmd/goplay@latest \
6463 github.com/cweill/gotests/gotests@latest \
6464 github.com/josharian/impl@latest"
6465
6466 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6467 echo "Installing common Go tools..."
6468 export PATH=${TARGET_GOROOT}/bin:${PATH}
6469 export GOPATH=/tmp/gotools
6470 export GOCACHE="${GOPATH}/cache"
6471
6472 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6473 cd "${GOPATH}"
6474
6475 # Use go get for versions of go under 1.16
6476 go_install_command=install
6477 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6478 export GO111MODULE=on
6479 go_install_command=get
6480 echo "Go version < 1.16, using go get."
6481 fi
6482
6483 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6484
6485 # Move Go tools into path
6486 if [ -d "${GOPATH}/bin" ]; then
6487 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6488 fi
6489
6490 # Install golangci-lint from precompiled binaries
6491 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6492 echo "Installing golangci-lint latest..."
6493 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6494 sh -s -- -b "${TARGET_GOPATH}/bin"
6495 else
6496 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6497 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6498 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6499 fi
6500
6501 # Remove Go tools temp directory
6502 rm -rf "${GOPATH}"
6503 fi
6504
6505
6506 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6507 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6508 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6509 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6510
6511 # Clean up
6512 clean_up
6513
6514 echo "Done!"
6515 "#),
6516 ])
6517 .await;
6518 return Ok(http::Response::builder()
6519 .status(200)
6520 .body(AsyncBody::from(response))
6521 .unwrap());
6522 }
6523 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6524 let response = r#"
6525 {
6526 "schemaVersion": 2,
6527 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6528 "config": {
6529 "mediaType": "application/vnd.devcontainers",
6530 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6531 "size": 2
6532 },
6533 "layers": [
6534 {
6535 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6536 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6537 "size": 19968,
6538 "annotations": {
6539 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6540 }
6541 }
6542 ],
6543 "annotations": {
6544 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6545 "com.github.package.type": "devcontainer_feature"
6546 }
6547 }"#;
6548 return Ok(http::Response::builder()
6549 .status(200)
6550 .body(AsyncBody::from(response))
6551 .unwrap());
6552 }
6553 if parts.uri.path()
6554 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6555 {
6556 let response = build_tarball(vec![
6557 (
6558 "./devcontainer-feature.json",
6559 r#"
6560{
6561 "id": "aws-cli",
6562 "version": "1.1.3",
6563 "name": "AWS CLI",
6564 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6565 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6566 "options": {
6567 "version": {
6568 "type": "string",
6569 "proposals": [
6570 "latest"
6571 ],
6572 "default": "latest",
6573 "description": "Select or enter an AWS CLI version."
6574 },
6575 "verbose": {
6576 "type": "boolean",
6577 "default": true,
6578 "description": "Suppress verbose output."
6579 }
6580 },
6581 "customizations": {
6582 "vscode": {
6583 "extensions": [
6584 "AmazonWebServices.aws-toolkit-vscode"
6585 ],
6586 "settings": {
6587 "github.copilot.chat.codeGeneration.instructions": [
6588 {
6589 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6590 }
6591 ]
6592 }
6593 }
6594 },
6595 "installsAfter": [
6596 "ghcr.io/devcontainers/features/common-utils"
6597 ]
6598}
6599 "#,
6600 ),
6601 (
6602 "./install.sh",
6603 r#"#!/usr/bin/env bash
6604 #-------------------------------------------------------------------------------------------------------------
6605 # Copyright (c) Microsoft Corporation. All rights reserved.
6606 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6607 #-------------------------------------------------------------------------------------------------------------
6608 #
6609 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6610 # Maintainer: The VS Code and Codespaces Teams
6611
6612 set -e
6613
6614 # Clean up
6615 rm -rf /var/lib/apt/lists/*
6616
6617 VERSION=${VERSION:-"latest"}
6618 VERBOSE=${VERBOSE:-"true"}
6619
6620 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6621 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6622
6623 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6624 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6625 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6626 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6627 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6628 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6629 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6630 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6631 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6632 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6633 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6634 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6635 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6636 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6637 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6638 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6639 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6640 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6641 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6642 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6643 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6644 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6645 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6646 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6647 YLZATHZKTJyiqA==
6648 =vYOk
6649 -----END PGP PUBLIC KEY BLOCK-----"
6650
6651 if [ "$(id -u)" -ne 0 ]; then
6652 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6653 exit 1
6654 fi
6655
6656 apt_get_update()
6657 {
6658 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6659 echo "Running apt-get update..."
6660 apt-get update -y
6661 fi
6662 }
6663
6664 # Checks if packages are installed and installs them if not
6665 check_packages() {
6666 if ! dpkg -s "$@" > /dev/null 2>&1; then
6667 apt_get_update
6668 apt-get -y install --no-install-recommends "$@"
6669 fi
6670 }
6671
6672 export DEBIAN_FRONTEND=noninteractive
6673
6674 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6675
6676 verify_aws_cli_gpg_signature() {
6677 local filePath=$1
6678 local sigFilePath=$2
6679 local awsGpgKeyring=aws-cli-public-key.gpg
6680
6681 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6682 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6683 local status=$?
6684
6685 rm "./${awsGpgKeyring}"
6686
6687 return ${status}
6688 }
6689
6690 install() {
6691 local scriptZipFile=awscli.zip
6692 local scriptSigFile=awscli.sig
6693
6694 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6695 if [ "${VERSION}" != "latest" ]; then
6696 local versionStr=-${VERSION}
6697 fi
6698 architecture=$(dpkg --print-architecture)
6699 case "${architecture}" in
6700 amd64) architectureStr=x86_64 ;;
6701 arm64) architectureStr=aarch64 ;;
6702 *)
6703 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6704 exit 1
6705 esac
6706 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6707 curl "${scriptUrl}" -o "${scriptZipFile}"
6708 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6709
6710 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6711 if (( $? > 0 )); then
6712 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6713 exit 1
6714 fi
6715
6716 if [ "${VERBOSE}" = "false" ]; then
6717 unzip -q "${scriptZipFile}"
6718 else
6719 unzip "${scriptZipFile}"
6720 fi
6721
6722 ./aws/install
6723
6724 # kubectl bash completion
6725 mkdir -p /etc/bash_completion.d
6726 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6727
6728 # kubectl zsh completion
6729 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6730 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6731 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6732 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6733 fi
6734
6735 rm -rf ./aws
6736 }
6737
6738 echo "(*) Installing AWS CLI..."
6739
6740 install
6741
6742 # Clean up
6743 rm -rf /var/lib/apt/lists/*
6744
6745 echo "Done!""#,
6746 ),
6747 ("./scripts/", r#""#),
6748 (
6749 "./scripts/fetch-latest-completer-scripts.sh",
6750 r#"
6751 #!/bin/bash
6752 #-------------------------------------------------------------------------------------------------------------
6753 # Copyright (c) Microsoft Corporation. All rights reserved.
6754 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6755 #-------------------------------------------------------------------------------------------------------------
6756 #
6757 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6758 # Maintainer: The Dev Container spec maintainers
6759 #
6760 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6761 #
6762 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6763 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6764 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6765
6766 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6767 chmod +x "$BASH_COMPLETER_SCRIPT"
6768
6769 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6770 chmod +x "$ZSH_COMPLETER_SCRIPT"
6771 "#,
6772 ),
6773 ("./scripts/vendor/", r#""#),
6774 (
6775 "./scripts/vendor/aws_bash_completer",
6776 r#"
6777 # Typically that would be added under one of the following paths:
6778 # - /etc/bash_completion.d
6779 # - /usr/local/etc/bash_completion.d
6780 # - /usr/share/bash-completion/completions
6781
6782 complete -C aws_completer aws
6783 "#,
6784 ),
6785 (
6786 "./scripts/vendor/aws_zsh_completer.sh",
6787 r#"
6788 # Source this file to activate auto completion for zsh using the bash
6789 # compatibility helper. Make sure to run `compinit` before, which should be
6790 # given usually.
6791 #
6792 # % source /path/to/zsh_complete.sh
6793 #
6794 # Typically that would be called somewhere in your .zshrc.
6795 #
6796 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6797 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6798 #
6799 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6800 #
6801 # zsh releases prior to that version do not export the required env variables!
6802
6803 autoload -Uz bashcompinit
6804 bashcompinit -i
6805
6806 _bash_complete() {
6807 local ret=1
6808 local -a suf matches
6809 local -x COMP_POINT COMP_CWORD
6810 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6811 local -x COMP_LINE="$words"
6812 local -A savejobstates savejobtexts
6813
6814 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6815 (( COMP_CWORD = CURRENT - 1))
6816 COMP_WORDS=( $words )
6817 BASH_VERSINFO=( 2 05b 0 1 release )
6818
6819 savejobstates=( ${(kv)jobstates} )
6820 savejobtexts=( ${(kv)jobtexts} )
6821
6822 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6823
6824 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6825
6826 if [[ -n $matches ]]; then
6827 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6828 compset -P '*/' && matches=( ${matches##*/} )
6829 compset -S '/*' && matches=( ${matches%%/*} )
6830 compadd -Q -f "${suf[@]}" -a matches && ret=0
6831 else
6832 compadd -Q "${suf[@]}" -a matches && ret=0
6833 fi
6834 fi
6835
6836 if (( ret )); then
6837 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6838 _default "${suf[@]}" && ret=0
6839 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6840 _directories "${suf[@]}" && ret=0
6841 fi
6842 fi
6843
6844 return ret
6845 }
6846
6847 complete -C aws_completer aws
6848 "#,
6849 ),
6850 ]).await;
6851
6852 return Ok(http::Response::builder()
6853 .status(200)
6854 .body(AsyncBody::from(response))
6855 .unwrap());
6856 }
6857
6858 Ok(http::Response::builder()
6859 .status(404)
6860 .body(http_client::AsyncBody::default())
6861 .unwrap())
6862 })
6863 }
6864}