1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use fs::Fs;
10use http_client::HttpClient;
11use util::{ResultExt, command::Command};
12
13use crate::{
14 DevContainerConfig, DevContainerContext,
15 command_json::{CommandRunner, DefaultCommandRunner},
16 devcontainer_api::{DevContainerError, DevContainerUp},
17 devcontainer_json::{
18 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
19 deserialize_devcontainer_json,
20 },
21 docker::{
22 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
23 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
24 get_remote_dir_from_config,
25 },
26 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
27 get_oci_token,
28 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
29 safe_id_lower,
30};
31
32enum ConfigStatus {
33 Deserialized(DevContainer),
34 VariableParsed(DevContainer),
35}
36
37#[derive(Debug, Clone, Eq, PartialEq, Default)]
38pub(crate) struct DockerComposeResources {
39 files: Vec<PathBuf>,
40 config: DockerComposeConfig,
41}
42
43struct DevContainerManifest {
44 http_client: Arc<dyn HttpClient>,
45 fs: Arc<dyn Fs>,
46 docker_client: Arc<dyn DockerClient>,
47 command_runner: Arc<dyn CommandRunner>,
48 raw_config: String,
49 config: ConfigStatus,
50 local_environment: HashMap<String, String>,
51 local_project_directory: PathBuf,
52 config_directory: PathBuf,
53 file_name: String,
54 root_image: Option<DockerInspect>,
55 features_build_info: Option<FeaturesBuildInfo>,
56 features: Vec<FeatureManifest>,
57}
58const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
59impl DevContainerManifest {
60 async fn new(
61 context: &DevContainerContext,
62 environment: HashMap<String, String>,
63 docker_client: Arc<dyn DockerClient>,
64 command_runner: Arc<dyn CommandRunner>,
65 local_config: DevContainerConfig,
66 local_project_path: &Path,
67 ) -> Result<Self, DevContainerError> {
68 let config_path = local_project_path.join(local_config.config_path.clone());
69 log::debug!("parsing devcontainer json found in {:?}", &config_path);
70 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
71 log::error!("Unable to read devcontainer contents: {e}");
72 DevContainerError::DevContainerParseFailed
73 })?;
74
75 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
76
77 let devcontainer_directory = config_path.parent().ok_or_else(|| {
78 log::error!("Dev container file should be in a directory");
79 DevContainerError::NotInValidProject
80 })?;
81 let file_name = config_path
82 .file_name()
83 .and_then(|f| f.to_str())
84 .ok_or_else(|| {
85 log::error!("Dev container file has no file name, or is invalid unicode");
86 DevContainerError::DevContainerParseFailed
87 })?;
88
89 Ok(Self {
90 fs: context.fs.clone(),
91 http_client: context.http_client.clone(),
92 docker_client,
93 command_runner,
94 raw_config: devcontainer_contents,
95 config: ConfigStatus::Deserialized(devcontainer),
96 local_project_directory: local_project_path.to_path_buf(),
97 local_environment: environment,
98 config_directory: devcontainer_directory.to_path_buf(),
99 file_name: file_name.to_string(),
100 root_image: None,
101 features_build_info: None,
102 features: Vec::new(),
103 })
104 }
105
106 fn devcontainer_id(&self) -> String {
107 let mut labels = self.identifying_labels();
108 labels.sort_by_key(|(key, _)| *key);
109
110 let mut hasher = DefaultHasher::new();
111 for (key, value) in &labels {
112 key.hash(&mut hasher);
113 value.hash(&mut hasher);
114 }
115
116 format!("{:016x}", hasher.finish())
117 }
118
119 fn identifying_labels(&self) -> Vec<(&str, String)> {
120 let labels = vec![
121 (
122 "devcontainer.local_folder",
123 (self.local_project_directory.display()).to_string(),
124 ),
125 (
126 "devcontainer.config_file",
127 (self.config_file().display()).to_string(),
128 ),
129 ];
130 labels
131 }
132
133 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
134 let mut replaced_content = content
135 .replace("${devcontainerId}", &self.devcontainer_id())
136 .replace(
137 "${containerWorkspaceFolderBasename}",
138 &self.remote_workspace_base_name().unwrap_or_default(),
139 )
140 .replace(
141 "${localWorkspaceFolderBasename}",
142 &self.local_workspace_base_name()?,
143 )
144 .replace(
145 "${containerWorkspaceFolder}",
146 &self
147 .remote_workspace_folder()
148 .map(|path| path.display().to_string())
149 .unwrap_or_default()
150 .replace('\\', "/"),
151 )
152 .replace(
153 "${localWorkspaceFolder}",
154 &self.local_workspace_folder().replace('\\', "/"),
155 );
156 for (k, v) in &self.local_environment {
157 let find = format!("${{localEnv:{k}}}");
158 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
159 }
160
161 Ok(replaced_content)
162 }
163
164 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
165 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
166 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
167
168 self.config = ConfigStatus::VariableParsed(parsed_config);
169
170 Ok(())
171 }
172
173 fn runtime_remote_env(
174 &self,
175 container_env: &HashMap<String, String>,
176 ) -> Result<HashMap<String, String>, DevContainerError> {
177 let mut merged_remote_env = container_env.clone();
178 // HOME is user-specific, and we will often not run as the image user
179 merged_remote_env.remove("HOME");
180 if let Some(remote_env) = self.dev_container().remote_env.clone() {
181 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
182 log::error!(
183 "Unexpected error serializing dev container remote_env: {e} - {:?}",
184 remote_env
185 );
186 DevContainerError::DevContainerParseFailed
187 })?;
188 for (k, v) in container_env {
189 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
190 }
191 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
192 .map_err(|e| {
193 log::error!(
194 "Unexpected error reserializing dev container remote env: {e} - {:?}",
195 &raw
196 );
197 DevContainerError::DevContainerParseFailed
198 })?;
199 for (k, v) in reserialized {
200 merged_remote_env.insert(k, v);
201 }
202 }
203 Ok(merged_remote_env)
204 }
205
206 fn config_file(&self) -> PathBuf {
207 self.config_directory.join(&self.file_name)
208 }
209
210 fn dev_container(&self) -> &DevContainer {
211 match &self.config {
212 ConfigStatus::Deserialized(dev_container) => dev_container,
213 ConfigStatus::VariableParsed(dev_container) => dev_container,
214 }
215 }
216
217 async fn dockerfile_location(&self) -> Option<PathBuf> {
218 let dev_container = self.dev_container();
219 match dev_container.build_type() {
220 DevContainerBuildType::Image => None,
221 DevContainerBuildType::Dockerfile => dev_container
222 .build
223 .as_ref()
224 .map(|build| self.config_directory.join(&build.dockerfile)),
225 DevContainerBuildType::DockerCompose => {
226 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
227 return None;
228 };
229 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
230 else {
231 return None;
232 };
233 main_service
234 .build
235 .and_then(|b| b.dockerfile)
236 .map(|dockerfile| self.config_directory.join(dockerfile))
237 }
238 DevContainerBuildType::None => None,
239 }
240 }
241
242 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
243 let mut hasher = DefaultHasher::new();
244 let prefix = match &self.dev_container().name {
245 Some(name) => &safe_id_lower(name),
246 None => "zed-dc",
247 };
248 let prefix = prefix.get(..6).unwrap_or(prefix);
249
250 dockerfile_build_path.hash(&mut hasher);
251
252 let hash = hasher.finish();
253 format!("{}-{:x}-features", prefix, hash)
254 }
255
256 /// Gets the base image from the devcontainer with the following precedence:
257 /// - The devcontainer image if an image is specified
258 /// - The image sourced in the Dockerfile if a Dockerfile is specified
259 /// - The image sourced in the docker-compose main service, if one is specified
260 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
261 /// If no such image is available, return an error
262 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
263 if let Some(image) = &self.dev_container().image {
264 return Ok(image.to_string());
265 }
266 if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
267 let dockerfile_contents = self
268 .fs
269 .load(&self.config_directory.join(dockerfile))
270 .await
271 .map_err(|e| {
272 log::error!("Error reading dockerfile: {e}");
273 DevContainerError::DevContainerParseFailed
274 })?;
275 return image_from_dockerfile(self, dockerfile_contents);
276 }
277 if self.dev_container().docker_compose_file.is_some() {
278 let docker_compose_manifest = self.docker_compose_manifest().await?;
279 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
280
281 if let Some(dockerfile) = main_service
282 .build
283 .as_ref()
284 .and_then(|b| b.dockerfile.as_ref())
285 {
286 let dockerfile_contents = self
287 .fs
288 .load(&self.config_directory.join(dockerfile))
289 .await
290 .map_err(|e| {
291 log::error!("Error reading dockerfile: {e}");
292 DevContainerError::DevContainerParseFailed
293 })?;
294 return image_from_dockerfile(self, dockerfile_contents);
295 }
296 if let Some(image) = &main_service.image {
297 return Ok(image.to_string());
298 }
299
300 log::error!("No valid base image found in docker-compose configuration");
301 return Err(DevContainerError::DevContainerParseFailed);
302 }
303 log::error!("No valid base image found in dev container configuration");
304 Err(DevContainerError::DevContainerParseFailed)
305 }
306
307 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
308 let dev_container = match &self.config {
309 ConfigStatus::Deserialized(_) => {
310 log::error!(
311 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
312 );
313 return Err(DevContainerError::DevContainerParseFailed);
314 }
315 ConfigStatus::VariableParsed(dev_container) => dev_container,
316 };
317 let root_image_tag = self.get_base_image_from_config().await?;
318 let root_image = self.docker_client.inspect(&root_image_tag).await?;
319
320 let temp_base = std::env::temp_dir().join("devcontainer-zed");
321 let timestamp = std::time::SystemTime::now()
322 .duration_since(std::time::UNIX_EPOCH)
323 .map(|d| d.as_millis())
324 .unwrap_or(0);
325
326 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
327 let empty_context_dir = temp_base.join("empty-folder");
328
329 self.fs
330 .create_dir(&features_content_dir)
331 .await
332 .map_err(|e| {
333 log::error!("Failed to create features content dir: {e}");
334 DevContainerError::FilesystemError
335 })?;
336
337 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
338 log::error!("Failed to create empty context dir: {e}");
339 DevContainerError::FilesystemError
340 })?;
341
342 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
343 let image_tag =
344 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
345
346 let build_info = FeaturesBuildInfo {
347 dockerfile_path,
348 features_content_dir,
349 empty_context_dir,
350 build_image: dev_container.image.clone(),
351 image_tag,
352 };
353
354 let features = match &dev_container.features {
355 Some(features) => features,
356 None => &HashMap::new(),
357 };
358
359 let container_user = get_container_user_from_config(&root_image, self)?;
360 let remote_user = get_remote_user_from_config(&root_image, self)?;
361
362 let builtin_env_content = format!(
363 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
364 container_user, remote_user
365 );
366
367 let builtin_env_path = build_info
368 .features_content_dir
369 .join("devcontainer-features.builtin.env");
370
371 self.fs
372 .write(&builtin_env_path, &builtin_env_content.as_bytes())
373 .await
374 .map_err(|e| {
375 log::error!("Failed to write builtin env file: {e}");
376 DevContainerError::FilesystemError
377 })?;
378
379 let ordered_features =
380 resolve_feature_order(features, &dev_container.override_feature_install_order);
381
382 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
383 if matches!(options, FeatureOptions::Bool(false)) {
384 log::debug!(
385 "Feature '{}' is disabled (set to false), skipping",
386 feature_ref
387 );
388 continue;
389 }
390
391 let feature_id = extract_feature_id(feature_ref);
392 let consecutive_id = format!("{}_{}", feature_id, index);
393 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
394
395 self.fs.create_dir(&feature_dir).await.map_err(|e| {
396 log::error!(
397 "Failed to create feature directory for {}: {e}",
398 feature_ref
399 );
400 DevContainerError::FilesystemError
401 })?;
402
403 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
404 log::error!(
405 "Feature '{}' is not a supported OCI feature reference",
406 feature_ref
407 );
408 DevContainerError::DevContainerParseFailed
409 })?;
410 let TokenResponse { token } =
411 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
412 .await
413 .map_err(|e| {
414 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
415 DevContainerError::ResourceFetchFailed
416 })?;
417 let manifest = get_oci_manifest(
418 &oci_ref.registry,
419 &oci_ref.path,
420 &token,
421 &self.http_client,
422 &oci_ref.version,
423 None,
424 )
425 .await
426 .map_err(|e| {
427 log::error!(
428 "Failed to fetch OCI manifest for feature '{}': {e}",
429 feature_ref
430 );
431 DevContainerError::ResourceFetchFailed
432 })?;
433 let digest = &manifest
434 .layers
435 .first()
436 .ok_or_else(|| {
437 log::error!(
438 "OCI manifest for feature '{}' contains no layers",
439 feature_ref
440 );
441 DevContainerError::ResourceFetchFailed
442 })?
443 .digest;
444 download_oci_tarball(
445 &token,
446 &oci_ref.registry,
447 &oci_ref.path,
448 digest,
449 "application/vnd.devcontainers.layer.v1+tar",
450 &feature_dir,
451 &self.http_client,
452 &self.fs,
453 None,
454 )
455 .await?;
456
457 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
458 if !self.fs.is_file(feature_json_path).await {
459 let message = format!(
460 "No devcontainer-feature.json found in {:?}, no defaults to apply",
461 feature_json_path
462 );
463 log::error!("{}", &message);
464 return Err(DevContainerError::ResourceFetchFailed);
465 }
466
467 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
468 log::error!("error reading devcontainer-feature.json: {:?}", e);
469 DevContainerError::FilesystemError
470 })?;
471
472 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
473
474 let feature_json: DevContainerFeatureJson =
475 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
476 log::error!("Failed to parse devcontainer-feature.json: {e}");
477 DevContainerError::ResourceFetchFailed
478 })?;
479
480 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
481
482 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
483
484 let env_content = feature_manifest
485 .write_feature_env(&self.fs, options)
486 .await?;
487
488 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
489
490 self.fs
491 .write(
492 &feature_manifest
493 .file_path()
494 .join("devcontainer-features-install.sh"),
495 &wrapper_content.as_bytes(),
496 )
497 .await
498 .map_err(|e| {
499 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
500 DevContainerError::FilesystemError
501 })?;
502
503 self.features.push(feature_manifest);
504 }
505
506 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
507
508 let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
509 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
510
511 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
512 self.fs.load(location).await.log_err()
513 } else {
514 None
515 };
516
517 let dockerfile_content = self.generate_dockerfile_extended(
518 &container_user,
519 &remote_user,
520 dockerfile_base_content,
521 use_buildkit,
522 );
523
524 self.fs
525 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
526 .await
527 .map_err(|e| {
528 log::error!("Failed to write Dockerfile.extended: {e}");
529 DevContainerError::FilesystemError
530 })?;
531
532 log::debug!(
533 "Features build resources written to {:?}",
534 build_info.features_content_dir
535 );
536
537 self.root_image = Some(root_image);
538 self.features_build_info = Some(build_info);
539
540 Ok(())
541 }
542
543 fn generate_dockerfile_extended(
544 &self,
545 container_user: &str,
546 remote_user: &str,
547 dockerfile_content: Option<String>,
548 use_buildkit: bool,
549 ) -> String {
550 #[cfg(not(target_os = "windows"))]
551 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
552 #[cfg(target_os = "windows")]
553 let update_remote_user_uid = false;
554 let feature_layers: String = self
555 .features
556 .iter()
557 .map(|manifest| {
558 manifest.generate_dockerfile_feature_layer(
559 use_buildkit,
560 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
561 )
562 })
563 .collect();
564
565 let container_home_cmd = get_ent_passwd_shell_command(container_user);
566 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
567
568 let dockerfile_content = dockerfile_content
569 .map(|content| {
570 if dockerfile_alias(&content).is_some() {
571 content
572 } else {
573 dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
574 }
575 })
576 .unwrap_or("".to_string());
577
578 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
579
580 let feature_content_source_stage = if use_buildkit {
581 "".to_string()
582 } else {
583 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
584 .to_string()
585 };
586
587 let builtin_env_source_path = if use_buildkit {
588 "./devcontainer-features.builtin.env"
589 } else {
590 "/tmp/build-features/devcontainer-features.builtin.env"
591 };
592
593 let mut extended_dockerfile = format!(
594 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
595
596{dockerfile_content}
597{feature_content_source_stage}
598FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
599USER root
600COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
601RUN chmod -R 0755 /tmp/build-features/
602
603FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
604
605USER root
606
607RUN mkdir -p {dest}
608COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
609
610RUN \
611echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
612echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
613
614{feature_layers}
615
616ARG _DEV_CONTAINERS_IMAGE_USER=root
617USER $_DEV_CONTAINERS_IMAGE_USER
618"#
619 );
620
621 // If we're not adding a uid update layer, then we should add env vars to this layer instead
622 if !update_remote_user_uid {
623 extended_dockerfile = format!(
624 r#"{extended_dockerfile}
625# Ensure that /etc/profile does not clobber the existing path
626RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
627"#
628 );
629
630 for feature in &self.features {
631 let container_env_layer = feature.generate_dockerfile_env();
632 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
633 }
634
635 if let Some(env) = &self.dev_container().container_env {
636 for (key, value) in env {
637 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
638 }
639 }
640 }
641
642 extended_dockerfile
643 }
644
645 fn build_merged_resources(
646 &self,
647 base_image: DockerInspect,
648 ) -> Result<DockerBuildResources, DevContainerError> {
649 let dev_container = match &self.config {
650 ConfigStatus::Deserialized(_) => {
651 log::error!(
652 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
653 );
654 return Err(DevContainerError::DevContainerParseFailed);
655 }
656 ConfigStatus::VariableParsed(dev_container) => dev_container,
657 };
658 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
659
660 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
661
662 mounts.append(&mut feature_mounts);
663
664 let privileged = dev_container.privileged.unwrap_or(false)
665 || self.features.iter().any(|f| f.privileged());
666
667 let mut entrypoint_script_lines = vec![
668 "echo Container started".to_string(),
669 "trap \"exit 0\" 15".to_string(),
670 ];
671
672 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
673 entrypoint_script_lines.push(entrypoint.clone());
674 }
675 entrypoint_script_lines.append(&mut vec![
676 "exec \"$@\"".to_string(),
677 "while sleep 1 & wait $!; do :; done".to_string(),
678 ]);
679
680 Ok(DockerBuildResources {
681 image: base_image,
682 additional_mounts: mounts,
683 privileged,
684 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
685 })
686 }
687
688 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
689 if let ConfigStatus::Deserialized(_) = &self.config {
690 log::error!(
691 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
692 );
693 return Err(DevContainerError::DevContainerParseFailed);
694 }
695 let dev_container = self.dev_container();
696 match dev_container.build_type() {
697 DevContainerBuildType::Image => {
698 let built_docker_image = self.build_docker_image().await?;
699 let Some(base_image) = dev_container.image.as_ref() else {
700 log::error!("Dev container is using and image which can't be referenced");
701 return Err(DevContainerError::DevContainerParseFailed);
702 };
703 let built_docker_image = self
704 .update_remote_user_uid(built_docker_image, base_image)
705 .await?;
706
707 let resources = self.build_merged_resources(built_docker_image)?;
708 Ok(DevContainerBuildResources::Docker(resources))
709 }
710 DevContainerBuildType::Dockerfile => {
711 let built_docker_image = self.build_docker_image().await?;
712 let Some(features_build_info) = &self.features_build_info else {
713 log::error!(
714 "Can't attempt to build update UID dockerfile before initial docker build"
715 );
716 return Err(DevContainerError::DevContainerParseFailed);
717 };
718 let built_docker_image = self
719 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
720 .await?;
721
722 let resources = self.build_merged_resources(built_docker_image)?;
723 Ok(DevContainerBuildResources::Docker(resources))
724 }
725 DevContainerBuildType::DockerCompose => {
726 log::debug!("Using docker compose. Building extended compose files");
727 let docker_compose_resources = self.build_and_extend_compose_files().await?;
728
729 return Ok(DevContainerBuildResources::DockerCompose(
730 docker_compose_resources,
731 ));
732 }
733 DevContainerBuildType::None => {
734 return Err(DevContainerError::DevContainerParseFailed);
735 }
736 }
737 }
738
739 async fn run_dev_container(
740 &self,
741 build_resources: DevContainerBuildResources,
742 ) -> Result<DevContainerUp, DevContainerError> {
743 let ConfigStatus::VariableParsed(_) = &self.config else {
744 log::error!(
745 "Variables have not been parsed; cannot proceed with running the dev container"
746 );
747 return Err(DevContainerError::DevContainerParseFailed);
748 };
749 let running_container = match build_resources {
750 DevContainerBuildResources::DockerCompose(resources) => {
751 self.run_docker_compose(resources).await?
752 }
753 DevContainerBuildResources::Docker(resources) => {
754 self.run_docker_image(resources).await?
755 }
756 };
757
758 let remote_user = get_remote_user_from_config(&running_container, self)?;
759 let remote_workspace_folder = get_remote_dir_from_config(
760 &running_container,
761 (&self.local_project_directory.display()).to_string(),
762 )?;
763
764 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
765
766 Ok(DevContainerUp {
767 container_id: running_container.id,
768 remote_user,
769 remote_workspace_folder,
770 extension_ids: self.extension_ids(),
771 remote_env,
772 })
773 }
774
775 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
776 let dev_container = match &self.config {
777 ConfigStatus::Deserialized(_) => {
778 log::error!(
779 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
780 );
781 return Err(DevContainerError::DevContainerParseFailed);
782 }
783 ConfigStatus::VariableParsed(dev_container) => dev_container,
784 };
785 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
786 return Err(DevContainerError::DevContainerParseFailed);
787 };
788 let docker_compose_full_paths = docker_compose_files
789 .iter()
790 .map(|relative| self.config_directory.join(relative))
791 .collect::<Vec<PathBuf>>();
792
793 let Some(config) = self
794 .docker_client
795 .get_docker_compose_config(&docker_compose_full_paths)
796 .await?
797 else {
798 log::error!("Output could not deserialize into DockerComposeConfig");
799 return Err(DevContainerError::DevContainerParseFailed);
800 };
801 Ok(DockerComposeResources {
802 files: docker_compose_full_paths,
803 config,
804 })
805 }
806
807 async fn build_and_extend_compose_files(
808 &self,
809 ) -> Result<DockerComposeResources, DevContainerError> {
810 let dev_container = match &self.config {
811 ConfigStatus::Deserialized(_) => {
812 log::error!(
813 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
814 );
815 return Err(DevContainerError::DevContainerParseFailed);
816 }
817 ConfigStatus::VariableParsed(dev_container) => dev_container,
818 };
819
820 let Some(features_build_info) = &self.features_build_info else {
821 log::error!(
822 "Cannot build and extend compose files: features build info is not yet constructed"
823 );
824 return Err(DevContainerError::DevContainerParseFailed);
825 };
826 let mut docker_compose_resources = self.docker_compose_manifest().await?;
827 let supports_buildkit = self.docker_client.supports_compose_buildkit();
828
829 let (main_service_name, main_service) =
830 find_primary_service(&docker_compose_resources, self)?;
831 let (built_service_image, built_service_image_tag) = if main_service
832 .build
833 .as_ref()
834 .map(|b| b.dockerfile.as_ref())
835 .is_some()
836 {
837 if !supports_buildkit {
838 self.build_feature_content_image().await?;
839 }
840
841 let dockerfile_path = &features_build_info.dockerfile_path;
842
843 let build_args = if !supports_buildkit {
844 HashMap::from([
845 (
846 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
847 "dev_container_auto_added_stage_label".to_string(),
848 ),
849 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
850 ])
851 } else {
852 HashMap::from([
853 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
854 (
855 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
856 "dev_container_auto_added_stage_label".to_string(),
857 ),
858 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
859 ])
860 };
861
862 let additional_contexts = if !supports_buildkit {
863 None
864 } else {
865 Some(HashMap::from([(
866 "dev_containers_feature_content_source".to_string(),
867 features_build_info
868 .features_content_dir
869 .display()
870 .to_string(),
871 )]))
872 };
873
874 let build_override = DockerComposeConfig {
875 name: None,
876 services: HashMap::from([(
877 main_service_name.clone(),
878 DockerComposeService {
879 image: Some(features_build_info.image_tag.clone()),
880 entrypoint: None,
881 cap_add: None,
882 security_opt: None,
883 labels: None,
884 build: Some(DockerComposeServiceBuild {
885 context: Some(
886 features_build_info.empty_context_dir.display().to_string(),
887 ),
888 dockerfile: Some(dockerfile_path.display().to_string()),
889 args: Some(build_args),
890 additional_contexts,
891 }),
892 volumes: Vec::new(),
893 ..Default::default()
894 },
895 )]),
896 volumes: HashMap::new(),
897 };
898
899 let temp_base = std::env::temp_dir().join("devcontainer-zed");
900 let config_location = temp_base.join("docker_compose_build.json");
901
902 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
903 log::error!("Error serializing docker compose runtime override: {e}");
904 DevContainerError::DevContainerParseFailed
905 })?;
906
907 self.fs
908 .write(&config_location, config_json.as_bytes())
909 .await
910 .map_err(|e| {
911 log::error!("Error writing the runtime override file: {e}");
912 DevContainerError::FilesystemError
913 })?;
914
915 docker_compose_resources.files.push(config_location);
916
917 self.docker_client
918 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
919 .await?;
920 (
921 self.docker_client
922 .inspect(&features_build_info.image_tag)
923 .await?,
924 &features_build_info.image_tag,
925 )
926 } else if let Some(image) = &main_service.image {
927 if dev_container
928 .features
929 .as_ref()
930 .is_none_or(|features| features.is_empty())
931 {
932 (self.docker_client.inspect(image).await?, image)
933 } else {
934 if !supports_buildkit {
935 self.build_feature_content_image().await?;
936 }
937
938 let dockerfile_path = &features_build_info.dockerfile_path;
939
940 let build_args = if !supports_buildkit {
941 HashMap::from([
942 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
943 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
944 ])
945 } else {
946 HashMap::from([
947 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
948 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
949 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
950 ])
951 };
952
953 let additional_contexts = if !supports_buildkit {
954 None
955 } else {
956 Some(HashMap::from([(
957 "dev_containers_feature_content_source".to_string(),
958 features_build_info
959 .features_content_dir
960 .display()
961 .to_string(),
962 )]))
963 };
964
965 let build_override = DockerComposeConfig {
966 name: None,
967 services: HashMap::from([(
968 main_service_name.clone(),
969 DockerComposeService {
970 image: Some(features_build_info.image_tag.clone()),
971 entrypoint: None,
972 cap_add: None,
973 security_opt: None,
974 labels: None,
975 build: Some(DockerComposeServiceBuild {
976 context: Some(
977 features_build_info.empty_context_dir.display().to_string(),
978 ),
979 dockerfile: Some(dockerfile_path.display().to_string()),
980 args: Some(build_args),
981 additional_contexts,
982 }),
983 volumes: Vec::new(),
984 ..Default::default()
985 },
986 )]),
987 volumes: HashMap::new(),
988 };
989
990 let temp_base = std::env::temp_dir().join("devcontainer-zed");
991 let config_location = temp_base.join("docker_compose_build.json");
992
993 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
994 log::error!("Error serializing docker compose runtime override: {e}");
995 DevContainerError::DevContainerParseFailed
996 })?;
997
998 self.fs
999 .write(&config_location, config_json.as_bytes())
1000 .await
1001 .map_err(|e| {
1002 log::error!("Error writing the runtime override file: {e}");
1003 DevContainerError::FilesystemError
1004 })?;
1005
1006 docker_compose_resources.files.push(config_location);
1007
1008 self.docker_client
1009 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1010 .await?;
1011
1012 (
1013 self.docker_client
1014 .inspect(&features_build_info.image_tag)
1015 .await?,
1016 &features_build_info.image_tag,
1017 )
1018 }
1019 } else {
1020 log::error!("Docker compose must have either image or dockerfile defined");
1021 return Err(DevContainerError::DevContainerParseFailed);
1022 };
1023
1024 let built_service_image = self
1025 .update_remote_user_uid(built_service_image, built_service_image_tag)
1026 .await?;
1027
1028 let resources = self.build_merged_resources(built_service_image)?;
1029
1030 let network_mode = main_service.network_mode.as_ref();
1031 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1032 let runtime_override_file = self
1033 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1034 .await?;
1035
1036 docker_compose_resources.files.push(runtime_override_file);
1037
1038 Ok(docker_compose_resources)
1039 }
1040
1041 async fn write_runtime_override_file(
1042 &self,
1043 main_service_name: &str,
1044 network_mode_service: Option<&str>,
1045 resources: DockerBuildResources,
1046 ) -> Result<PathBuf, DevContainerError> {
1047 let config =
1048 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1049 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1050 let config_location = temp_base.join("docker_compose_runtime.json");
1051
1052 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1053 log::error!("Error serializing docker compose runtime override: {e}");
1054 DevContainerError::DevContainerParseFailed
1055 })?;
1056
1057 self.fs
1058 .write(&config_location, config_json.as_bytes())
1059 .await
1060 .map_err(|e| {
1061 log::error!("Error writing the runtime override file: {e}");
1062 DevContainerError::FilesystemError
1063 })?;
1064
1065 Ok(config_location)
1066 }
1067
1068 fn build_runtime_override(
1069 &self,
1070 main_service_name: &str,
1071 network_mode_service: Option<&str>,
1072 resources: DockerBuildResources,
1073 ) -> Result<DockerComposeConfig, DevContainerError> {
1074 let mut runtime_labels = HashMap::new();
1075
1076 if let Some(metadata) = &resources.image.config.labels.metadata {
1077 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1078 log::error!("Error serializing docker image metadata: {e}");
1079 DevContainerError::ContainerNotValid(resources.image.id.clone())
1080 })?;
1081
1082 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1083 }
1084
1085 for (k, v) in self.identifying_labels() {
1086 runtime_labels.insert(k.to_string(), v.to_string());
1087 }
1088
1089 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1090 .additional_mounts
1091 .iter()
1092 .filter_map(|mount| {
1093 if let Some(mount_type) = &mount.mount_type
1094 && mount_type.to_lowercase() == "volume"
1095 && let Some(source) = &mount.source
1096 {
1097 Some((
1098 source.clone(),
1099 DockerComposeVolume {
1100 name: source.clone(),
1101 },
1102 ))
1103 } else {
1104 None
1105 }
1106 })
1107 .collect();
1108
1109 let volumes: Vec<MountDefinition> = resources
1110 .additional_mounts
1111 .iter()
1112 .map(|v| MountDefinition {
1113 source: v.source.clone(),
1114 target: v.target.clone(),
1115 mount_type: v.mount_type.clone(),
1116 })
1117 .collect();
1118
1119 let mut main_service = DockerComposeService {
1120 entrypoint: Some(vec![
1121 "/bin/sh".to_string(),
1122 "-c".to_string(),
1123 resources.entrypoint_script,
1124 "-".to_string(),
1125 ]),
1126 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1127 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1128 labels: Some(runtime_labels),
1129 volumes,
1130 privileged: Some(resources.privileged),
1131 ..Default::default()
1132 };
1133 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1134 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1135 if let Some(forward_ports) = &self.dev_container().forward_ports {
1136 let main_service_ports: Vec<String> = forward_ports
1137 .iter()
1138 .filter_map(|f| match f {
1139 ForwardPort::Number(port) => Some(port.to_string()),
1140 ForwardPort::String(port) => {
1141 let parts: Vec<&str> = port.split(":").collect();
1142 if parts.len() <= 1 {
1143 Some(port.to_string())
1144 } else if parts.len() == 2 {
1145 if parts[0] == main_service_name {
1146 Some(parts[1].to_string())
1147 } else {
1148 None
1149 }
1150 } else {
1151 None
1152 }
1153 }
1154 })
1155 .collect();
1156 for port in main_service_ports {
1157 // If the main service uses a different service's network bridge, append to that service's ports instead
1158 if let Some(network_service_name) = network_mode_service {
1159 if let Some(service) = service_declarations.get_mut(network_service_name) {
1160 service.ports.push(DockerComposeServicePort {
1161 target: port.clone(),
1162 published: port.clone(),
1163 ..Default::default()
1164 });
1165 } else {
1166 service_declarations.insert(
1167 network_service_name.to_string(),
1168 DockerComposeService {
1169 ports: vec![DockerComposeServicePort {
1170 target: port.clone(),
1171 published: port.clone(),
1172 ..Default::default()
1173 }],
1174 ..Default::default()
1175 },
1176 );
1177 }
1178 } else {
1179 main_service.ports.push(DockerComposeServicePort {
1180 target: port.clone(),
1181 published: port.clone(),
1182 ..Default::default()
1183 });
1184 }
1185 }
1186 let other_service_ports: Vec<(&str, &str)> = forward_ports
1187 .iter()
1188 .filter_map(|f| match f {
1189 ForwardPort::Number(_) => None,
1190 ForwardPort::String(port) => {
1191 let parts: Vec<&str> = port.split(":").collect();
1192 if parts.len() != 2 {
1193 None
1194 } else {
1195 if parts[0] == main_service_name {
1196 None
1197 } else {
1198 Some((parts[0], parts[1]))
1199 }
1200 }
1201 }
1202 })
1203 .collect();
1204 for (service_name, port) in other_service_ports {
1205 if let Some(service) = service_declarations.get_mut(service_name) {
1206 service.ports.push(DockerComposeServicePort {
1207 target: port.to_string(),
1208 published: port.to_string(),
1209 ..Default::default()
1210 });
1211 } else {
1212 service_declarations.insert(
1213 service_name.to_string(),
1214 DockerComposeService {
1215 ports: vec![DockerComposeServicePort {
1216 target: port.to_string(),
1217 published: port.to_string(),
1218 ..Default::default()
1219 }],
1220 ..Default::default()
1221 },
1222 );
1223 }
1224 }
1225 }
1226 if let Some(port) = &self.dev_container().app_port {
1227 if let Some(network_service_name) = network_mode_service {
1228 if let Some(service) = service_declarations.get_mut(network_service_name) {
1229 service.ports.push(DockerComposeServicePort {
1230 target: port.clone(),
1231 published: port.clone(),
1232 ..Default::default()
1233 });
1234 } else {
1235 service_declarations.insert(
1236 network_service_name.to_string(),
1237 DockerComposeService {
1238 ports: vec![DockerComposeServicePort {
1239 target: port.clone(),
1240 published: port.clone(),
1241 ..Default::default()
1242 }],
1243 ..Default::default()
1244 },
1245 );
1246 }
1247 } else {
1248 main_service.ports.push(DockerComposeServicePort {
1249 target: port.clone(),
1250 published: port.clone(),
1251 ..Default::default()
1252 });
1253 }
1254 }
1255
1256 service_declarations.insert(main_service_name.to_string(), main_service);
1257 let new_docker_compose_config = DockerComposeConfig {
1258 name: None,
1259 services: service_declarations,
1260 volumes: config_volumes,
1261 };
1262
1263 Ok(new_docker_compose_config)
1264 }
1265
1266 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1267 let dev_container = match &self.config {
1268 ConfigStatus::Deserialized(_) => {
1269 log::error!(
1270 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1271 );
1272 return Err(DevContainerError::DevContainerParseFailed);
1273 }
1274 ConfigStatus::VariableParsed(dev_container) => dev_container,
1275 };
1276
1277 match dev_container.build_type() {
1278 DevContainerBuildType::Image => {
1279 let Some(image_tag) = &dev_container.image else {
1280 return Err(DevContainerError::DevContainerParseFailed);
1281 };
1282 let base_image = self.docker_client.inspect(image_tag).await?;
1283 if dev_container
1284 .features
1285 .as_ref()
1286 .is_none_or(|features| features.is_empty())
1287 {
1288 log::debug!("No features to add. Using base image");
1289 return Ok(base_image);
1290 }
1291 }
1292 DevContainerBuildType::Dockerfile => {}
1293 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1294 return Err(DevContainerError::DevContainerParseFailed);
1295 }
1296 };
1297
1298 let mut command = self.create_docker_build()?;
1299
1300 let output = self
1301 .command_runner
1302 .run_command(&mut command)
1303 .await
1304 .map_err(|e| {
1305 log::error!("Error building docker image: {e}");
1306 DevContainerError::CommandFailed(command.get_program().display().to_string())
1307 })?;
1308
1309 if !output.status.success() {
1310 let stderr = String::from_utf8_lossy(&output.stderr);
1311 log::error!("docker buildx build failed: {stderr}");
1312 return Err(DevContainerError::CommandFailed(
1313 command.get_program().display().to_string(),
1314 ));
1315 }
1316
1317 // After a successful build, inspect the newly tagged image to get its metadata
1318 let Some(features_build_info) = &self.features_build_info else {
1319 log::error!("Features build info expected, but not created");
1320 return Err(DevContainerError::DevContainerParseFailed);
1321 };
1322 let image = self
1323 .docker_client
1324 .inspect(&features_build_info.image_tag)
1325 .await?;
1326
1327 Ok(image)
1328 }
1329
1330 #[cfg(target_os = "windows")]
1331 async fn update_remote_user_uid(
1332 &self,
1333 image: DockerInspect,
1334 _base_image: &str,
1335 ) -> Result<DockerInspect, DevContainerError> {
1336 Ok(image)
1337 }
1338 #[cfg(not(target_os = "windows"))]
1339 async fn update_remote_user_uid(
1340 &self,
1341 image: DockerInspect,
1342 base_image: &str,
1343 ) -> Result<DockerInspect, DevContainerError> {
1344 let dev_container = self.dev_container();
1345
1346 let Some(features_build_info) = &self.features_build_info else {
1347 return Ok(image);
1348 };
1349
1350 // updateRemoteUserUID defaults to true per the devcontainers spec
1351 if dev_container.update_remote_user_uid == Some(false) {
1352 return Ok(image);
1353 }
1354
1355 let remote_user = get_remote_user_from_config(&image, self)?;
1356 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1357 return Ok(image);
1358 }
1359
1360 let image_user = image
1361 .config
1362 .image_user
1363 .as_deref()
1364 .unwrap_or("root")
1365 .to_string();
1366
1367 let host_uid = Command::new("id")
1368 .arg("-u")
1369 .output()
1370 .await
1371 .map_err(|e| {
1372 log::error!("Failed to get host UID: {e}");
1373 DevContainerError::CommandFailed("id -u".to_string())
1374 })
1375 .and_then(|output| {
1376 String::from_utf8_lossy(&output.stdout)
1377 .trim()
1378 .parse::<u32>()
1379 .map_err(|e| {
1380 log::error!("Failed to parse host UID: {e}");
1381 DevContainerError::CommandFailed("id -u".to_string())
1382 })
1383 })?;
1384
1385 let host_gid = Command::new("id")
1386 .arg("-g")
1387 .output()
1388 .await
1389 .map_err(|e| {
1390 log::error!("Failed to get host GID: {e}");
1391 DevContainerError::CommandFailed("id -g".to_string())
1392 })
1393 .and_then(|output| {
1394 String::from_utf8_lossy(&output.stdout)
1395 .trim()
1396 .parse::<u32>()
1397 .map_err(|e| {
1398 log::error!("Failed to parse host GID: {e}");
1399 DevContainerError::CommandFailed("id -g".to_string())
1400 })
1401 })?;
1402
1403 let dockerfile_content = self.generate_update_uid_dockerfile();
1404
1405 let dockerfile_path = features_build_info
1406 .features_content_dir
1407 .join("updateUID.Dockerfile");
1408 self.fs
1409 .write(&dockerfile_path, dockerfile_content.as_bytes())
1410 .await
1411 .map_err(|e| {
1412 log::error!("Failed to write updateUID Dockerfile: {e}");
1413 DevContainerError::FilesystemError
1414 })?;
1415
1416 let updated_image_tag = format!("{}-uid", features_build_info.image_tag);
1417
1418 let mut command = Command::new(self.docker_client.docker_cli());
1419 command.args(["build"]);
1420 command.args(["-f", &dockerfile_path.display().to_string()]);
1421 command.args(["-t", &updated_image_tag]);
1422 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1423 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1424 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1425 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1426 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1427 command.arg(features_build_info.empty_context_dir.display().to_string());
1428
1429 let output = self
1430 .command_runner
1431 .run_command(&mut command)
1432 .await
1433 .map_err(|e| {
1434 log::error!("Error building UID update image: {e}");
1435 DevContainerError::CommandFailed(command.get_program().display().to_string())
1436 })?;
1437
1438 if !output.status.success() {
1439 let stderr = String::from_utf8_lossy(&output.stderr);
1440 log::error!("UID update build failed: {stderr}");
1441 return Err(DevContainerError::CommandFailed(
1442 command.get_program().display().to_string(),
1443 ));
1444 }
1445
1446 self.docker_client.inspect(&updated_image_tag).await
1447 }
1448
1449 #[cfg(not(target_os = "windows"))]
1450 fn generate_update_uid_dockerfile(&self) -> String {
1451 let mut dockerfile = r#"ARG BASE_IMAGE
1452FROM $BASE_IMAGE
1453
1454USER root
1455
1456ARG REMOTE_USER
1457ARG NEW_UID
1458ARG NEW_GID
1459SHELL ["/bin/sh", "-c"]
1460RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1461 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1462 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1463 if [ -z "$OLD_UID" ]; then \
1464 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1465 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1466 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1467 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1468 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1469 else \
1470 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1471 FREE_GID=65532; \
1472 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1473 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1474 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1475 fi; \
1476 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1477 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1478 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1479 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1480 fi; \
1481 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1482 fi;
1483
1484ARG IMAGE_USER
1485USER $IMAGE_USER
1486
1487# Ensure that /etc/profile does not clobber the existing path
1488RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1489"#.to_string();
1490 for feature in &self.features {
1491 let container_env_layer = feature.generate_dockerfile_env();
1492 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1493 }
1494
1495 if let Some(env) = &self.dev_container().container_env {
1496 for (key, value) in env {
1497 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1498 }
1499 }
1500 dockerfile
1501 }
1502
1503 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1504 let Some(features_build_info) = &self.features_build_info else {
1505 log::error!("Features build info not available for building feature content image");
1506 return Err(DevContainerError::DevContainerParseFailed);
1507 };
1508 let features_content_dir = &features_build_info.features_content_dir;
1509
1510 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1511 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1512
1513 self.fs
1514 .write(&dockerfile_path, dockerfile_content.as_bytes())
1515 .await
1516 .map_err(|e| {
1517 log::error!("Failed to write feature content Dockerfile: {e}");
1518 DevContainerError::FilesystemError
1519 })?;
1520
1521 let mut command = Command::new(self.docker_client.docker_cli());
1522 command.args([
1523 "build",
1524 "-t",
1525 "dev_container_feature_content_temp",
1526 "-f",
1527 &dockerfile_path.display().to_string(),
1528 &features_content_dir.display().to_string(),
1529 ]);
1530
1531 let output = self
1532 .command_runner
1533 .run_command(&mut command)
1534 .await
1535 .map_err(|e| {
1536 log::error!("Error building feature content image: {e}");
1537 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1538 })?;
1539
1540 if !output.status.success() {
1541 let stderr = String::from_utf8_lossy(&output.stderr);
1542 log::error!("Feature content image build failed: {stderr}");
1543 return Err(DevContainerError::CommandFailed(
1544 self.docker_client.docker_cli(),
1545 ));
1546 }
1547
1548 Ok(())
1549 }
1550
1551 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1552 let dev_container = match &self.config {
1553 ConfigStatus::Deserialized(_) => {
1554 log::error!(
1555 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1556 );
1557 return Err(DevContainerError::DevContainerParseFailed);
1558 }
1559 ConfigStatus::VariableParsed(dev_container) => dev_container,
1560 };
1561
1562 let Some(features_build_info) = &self.features_build_info else {
1563 log::error!(
1564 "Cannot create docker build command; features build info has not been constructed"
1565 );
1566 return Err(DevContainerError::DevContainerParseFailed);
1567 };
1568 let mut command = Command::new(self.docker_client.docker_cli());
1569
1570 command.args(["buildx", "build"]);
1571
1572 // --load is short for --output=docker, loading the built image into the local docker images
1573 command.arg("--load");
1574
1575 // BuildKit build context: provides the features content directory as a named context
1576 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1577 command.args([
1578 "--build-context",
1579 &format!(
1580 "dev_containers_feature_content_source={}",
1581 features_build_info.features_content_dir.display()
1582 ),
1583 ]);
1584
1585 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1586 if let Some(build_image) = &features_build_info.build_image {
1587 command.args([
1588 "--build-arg",
1589 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1590 ]);
1591 } else {
1592 command.args([
1593 "--build-arg",
1594 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1595 ]);
1596 }
1597
1598 command.args([
1599 "--build-arg",
1600 &format!(
1601 "_DEV_CONTAINERS_IMAGE_USER={}",
1602 self.root_image
1603 .as_ref()
1604 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1605 .unwrap_or(&"root".to_string())
1606 ),
1607 ]);
1608
1609 command.args([
1610 "--build-arg",
1611 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1612 ]);
1613
1614 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1615 for (key, value) in args {
1616 command.args(["--build-arg", &format!("{}={}", key, value)]);
1617 }
1618 }
1619
1620 command.args(["--target", "dev_containers_target_stage"]);
1621
1622 command.args([
1623 "-f",
1624 &features_build_info.dockerfile_path.display().to_string(),
1625 ]);
1626
1627 command.args(["-t", &features_build_info.image_tag]);
1628
1629 if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1630 command.arg(self.config_directory.display().to_string());
1631 } else {
1632 // Use an empty folder as the build context to avoid pulling in unneeded files.
1633 // The actual feature content is supplied via the BuildKit build context above.
1634 command.arg(features_build_info.empty_context_dir.display().to_string());
1635 }
1636
1637 Ok(command)
1638 }
1639
1640 async fn run_docker_compose(
1641 &self,
1642 resources: DockerComposeResources,
1643 ) -> Result<DockerInspect, DevContainerError> {
1644 let mut command = Command::new(self.docker_client.docker_cli());
1645 command.args(&["compose", "--project-name", &self.project_name()]);
1646 for docker_compose_file in resources.files {
1647 command.args(&["-f", &docker_compose_file.display().to_string()]);
1648 }
1649 command.args(&["up", "-d"]);
1650
1651 let output = self
1652 .command_runner
1653 .run_command(&mut command)
1654 .await
1655 .map_err(|e| {
1656 log::error!("Error running docker compose up: {e}");
1657 DevContainerError::CommandFailed(command.get_program().display().to_string())
1658 })?;
1659
1660 if !output.status.success() {
1661 let stderr = String::from_utf8_lossy(&output.stderr);
1662 log::error!("Non-success status from docker compose up: {}", stderr);
1663 return Err(DevContainerError::CommandFailed(
1664 command.get_program().display().to_string(),
1665 ));
1666 }
1667
1668 if let Some(docker_ps) = self.check_for_existing_container().await? {
1669 log::debug!("Found newly created dev container");
1670 return self.docker_client.inspect(&docker_ps.id).await;
1671 }
1672
1673 log::error!("Could not find existing container after docker compose up");
1674
1675 Err(DevContainerError::DevContainerParseFailed)
1676 }
1677
1678 async fn run_docker_image(
1679 &self,
1680 build_resources: DockerBuildResources,
1681 ) -> Result<DockerInspect, DevContainerError> {
1682 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1683
1684 let output = self
1685 .command_runner
1686 .run_command(&mut docker_run_command)
1687 .await
1688 .map_err(|e| {
1689 log::error!("Error running docker run: {e}");
1690 DevContainerError::CommandFailed(
1691 docker_run_command.get_program().display().to_string(),
1692 )
1693 })?;
1694
1695 if !output.status.success() {
1696 let std_err = String::from_utf8_lossy(&output.stderr);
1697 log::error!("Non-success status from docker run. StdErr: {std_err}");
1698 return Err(DevContainerError::CommandFailed(
1699 docker_run_command.get_program().display().to_string(),
1700 ));
1701 }
1702
1703 log::debug!("Checking for container that was started");
1704 let Some(docker_ps) = self.check_for_existing_container().await? else {
1705 log::error!("Could not locate container just created");
1706 return Err(DevContainerError::DevContainerParseFailed);
1707 };
1708 self.docker_client.inspect(&docker_ps.id).await
1709 }
1710
1711 fn local_workspace_folder(&self) -> String {
1712 self.local_project_directory.display().to_string()
1713 }
1714 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1715 self.local_project_directory
1716 .file_name()
1717 .map(|f| f.display().to_string())
1718 .ok_or(DevContainerError::DevContainerParseFailed)
1719 }
1720
1721 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1722 self.dev_container()
1723 .workspace_folder
1724 .as_ref()
1725 .map(|folder| PathBuf::from(folder))
1726 .or(Some(
1727 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1728 ))
1729 .ok_or(DevContainerError::DevContainerParseFailed)
1730 }
1731 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1732 self.remote_workspace_folder().and_then(|f| {
1733 f.file_name()
1734 .map(|file_name| file_name.display().to_string())
1735 .ok_or(DevContainerError::DevContainerParseFailed)
1736 })
1737 }
1738
1739 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1740 if let Some(mount) = &self.dev_container().workspace_mount {
1741 return Ok(mount.clone());
1742 }
1743 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1744 return Err(DevContainerError::DevContainerParseFailed);
1745 };
1746
1747 Ok(MountDefinition {
1748 source: Some(self.local_workspace_folder()),
1749 target: format!("/workspaces/{}", project_directory_name.display()),
1750 mount_type: None,
1751 })
1752 }
1753
1754 fn create_docker_run_command(
1755 &self,
1756 build_resources: DockerBuildResources,
1757 ) -> Result<Command, DevContainerError> {
1758 let remote_workspace_mount = self.remote_workspace_mount()?;
1759
1760 let docker_cli = self.docker_client.docker_cli();
1761 let mut command = Command::new(&docker_cli);
1762
1763 command.arg("run");
1764
1765 if build_resources.privileged {
1766 command.arg("--privileged");
1767 }
1768
1769 if &docker_cli == "podman" {
1770 command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1771 }
1772
1773 command.arg("--sig-proxy=false");
1774 command.arg("-d");
1775 command.arg("--mount");
1776 command.arg(remote_workspace_mount.to_string());
1777
1778 for mount in &build_resources.additional_mounts {
1779 command.arg("--mount");
1780 command.arg(mount.to_string());
1781 }
1782
1783 for (key, val) in self.identifying_labels() {
1784 command.arg("-l");
1785 command.arg(format!("{}={}", key, val));
1786 }
1787
1788 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1789 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1790 log::error!("Problem serializing image metadata: {e}");
1791 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1792 })?;
1793 command.arg("-l");
1794 command.arg(format!(
1795 "{}={}",
1796 "devcontainer.metadata", serialized_metadata
1797 ));
1798 }
1799
1800 if let Some(forward_ports) = &self.dev_container().forward_ports {
1801 for port in forward_ports {
1802 if let ForwardPort::Number(port_number) = port {
1803 command.arg("-p");
1804 command.arg(format!("{port_number}:{port_number}"));
1805 }
1806 }
1807 }
1808 if let Some(app_port) = &self.dev_container().app_port {
1809 command.arg("-p");
1810 command.arg(format!("{app_port}:{app_port}"));
1811 }
1812
1813 command.arg("--entrypoint");
1814 command.arg("/bin/sh");
1815 command.arg(&build_resources.image.id);
1816 command.arg("-c");
1817
1818 command.arg(build_resources.entrypoint_script);
1819 command.arg("-");
1820
1821 Ok(command)
1822 }
1823
1824 fn extension_ids(&self) -> Vec<String> {
1825 self.dev_container()
1826 .customizations
1827 .as_ref()
1828 .map(|c| c.zed.extensions.clone())
1829 .unwrap_or_default()
1830 }
1831
1832 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1833 self.run_initialize_commands().await?;
1834
1835 self.download_feature_and_dockerfile_resources().await?;
1836
1837 let build_resources = self.build_resources().await?;
1838
1839 let devcontainer_up = self.run_dev_container(build_resources).await?;
1840
1841 self.run_remote_scripts(&devcontainer_up, true).await?;
1842
1843 Ok(devcontainer_up)
1844 }
1845
1846 async fn run_remote_scripts(
1847 &self,
1848 devcontainer_up: &DevContainerUp,
1849 new_container: bool,
1850 ) -> Result<(), DevContainerError> {
1851 let ConfigStatus::VariableParsed(config) = &self.config else {
1852 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1853 return Err(DevContainerError::DevContainerScriptsFailed);
1854 };
1855 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1856
1857 if new_container {
1858 if let Some(on_create_command) = &config.on_create_command {
1859 for (command_name, command) in on_create_command.script_commands() {
1860 log::debug!("Running on create command {command_name}");
1861 self.docker_client
1862 .run_docker_exec(
1863 &devcontainer_up.container_id,
1864 &remote_folder,
1865 "root",
1866 &devcontainer_up.remote_env,
1867 command,
1868 )
1869 .await?;
1870 }
1871 }
1872 if let Some(update_content_command) = &config.update_content_command {
1873 for (command_name, command) in update_content_command.script_commands() {
1874 log::debug!("Running update content command {command_name}");
1875 self.docker_client
1876 .run_docker_exec(
1877 &devcontainer_up.container_id,
1878 &remote_folder,
1879 "root",
1880 &devcontainer_up.remote_env,
1881 command,
1882 )
1883 .await?;
1884 }
1885 }
1886
1887 if let Some(post_create_command) = &config.post_create_command {
1888 for (command_name, command) in post_create_command.script_commands() {
1889 log::debug!("Running post create command {command_name}");
1890 self.docker_client
1891 .run_docker_exec(
1892 &devcontainer_up.container_id,
1893 &remote_folder,
1894 &devcontainer_up.remote_user,
1895 &devcontainer_up.remote_env,
1896 command,
1897 )
1898 .await?;
1899 }
1900 }
1901 if let Some(post_start_command) = &config.post_start_command {
1902 for (command_name, command) in post_start_command.script_commands() {
1903 log::debug!("Running post start command {command_name}");
1904 self.docker_client
1905 .run_docker_exec(
1906 &devcontainer_up.container_id,
1907 &remote_folder,
1908 &devcontainer_up.remote_user,
1909 &devcontainer_up.remote_env,
1910 command,
1911 )
1912 .await?;
1913 }
1914 }
1915 }
1916 if let Some(post_attach_command) = &config.post_attach_command {
1917 for (command_name, command) in post_attach_command.script_commands() {
1918 log::debug!("Running post attach command {command_name}");
1919 self.docker_client
1920 .run_docker_exec(
1921 &devcontainer_up.container_id,
1922 &remote_folder,
1923 &devcontainer_up.remote_user,
1924 &devcontainer_up.remote_env,
1925 command,
1926 )
1927 .await?;
1928 }
1929 }
1930
1931 Ok(())
1932 }
1933
1934 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1935 let ConfigStatus::VariableParsed(config) = &self.config else {
1936 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1937 return Err(DevContainerError::DevContainerParseFailed);
1938 };
1939
1940 if let Some(initialize_command) = &config.initialize_command {
1941 log::debug!("Running initialize command");
1942 initialize_command
1943 .run(&self.command_runner, &self.local_project_directory)
1944 .await
1945 } else {
1946 log::warn!("No initialize command found");
1947 Ok(())
1948 }
1949 }
1950
1951 async fn check_for_existing_devcontainer(
1952 &self,
1953 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1954 if let Some(docker_ps) = self.check_for_existing_container().await? {
1955 log::debug!("Dev container already found. Proceeding with it");
1956
1957 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1958
1959 if !docker_inspect.is_running() {
1960 log::debug!("Container not running. Will attempt to start, and then proceed");
1961 self.docker_client.start_container(&docker_ps.id).await?;
1962 }
1963
1964 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1965
1966 let remote_folder = get_remote_dir_from_config(
1967 &docker_inspect,
1968 (&self.local_project_directory.display()).to_string(),
1969 )?;
1970
1971 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1972
1973 let dev_container_up = DevContainerUp {
1974 container_id: docker_ps.id,
1975 remote_user: remote_user,
1976 remote_workspace_folder: remote_folder,
1977 extension_ids: self.extension_ids(),
1978 remote_env,
1979 };
1980
1981 self.run_remote_scripts(&dev_container_up, false).await?;
1982
1983 Ok(Some(dev_container_up))
1984 } else {
1985 log::debug!("Existing container not found.");
1986
1987 Ok(None)
1988 }
1989 }
1990
1991 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1992 self.docker_client
1993 .find_process_by_filters(
1994 self.identifying_labels()
1995 .iter()
1996 .map(|(k, v)| format!("label={k}={v}"))
1997 .collect(),
1998 )
1999 .await
2000 }
2001
2002 fn project_name(&self) -> String {
2003 if let Some(name) = &self.dev_container().name {
2004 safe_id_lower(name)
2005 } else {
2006 let alternate_name = &self
2007 .local_workspace_base_name()
2008 .unwrap_or(self.local_workspace_folder());
2009 safe_id_lower(alternate_name)
2010 }
2011 }
2012}
2013
2014/// Holds all the information needed to construct a `docker buildx build` command
2015/// that extends a base image with dev container features.
2016///
2017/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2018/// (cli/src/spec-node/containerFeatures.ts).
2019#[derive(Debug, Eq, PartialEq)]
2020pub(crate) struct FeaturesBuildInfo {
2021 /// Path to the generated Dockerfile.extended
2022 pub dockerfile_path: PathBuf,
2023 /// Path to the features content directory (used as a BuildKit build context)
2024 pub features_content_dir: PathBuf,
2025 /// Path to an empty directory used as the Docker build context
2026 pub empty_context_dir: PathBuf,
2027 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2028 pub build_image: Option<String>,
2029 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2030 pub image_tag: String,
2031}
2032
2033pub(crate) async fn read_devcontainer_configuration(
2034 config: DevContainerConfig,
2035 context: &DevContainerContext,
2036 environment: HashMap<String, String>,
2037) -> Result<DevContainer, DevContainerError> {
2038 let docker = if context.use_podman {
2039 Docker::new("podman")
2040 } else {
2041 Docker::new("docker")
2042 };
2043 let mut dev_container = DevContainerManifest::new(
2044 context,
2045 environment,
2046 Arc::new(docker),
2047 Arc::new(DefaultCommandRunner::new()),
2048 config,
2049 &context.project_directory.as_ref(),
2050 )
2051 .await?;
2052 dev_container.parse_nonremote_vars()?;
2053 Ok(dev_container.dev_container().clone())
2054}
2055
2056pub(crate) async fn spawn_dev_container(
2057 context: &DevContainerContext,
2058 environment: HashMap<String, String>,
2059 config: DevContainerConfig,
2060 local_project_path: &Path,
2061) -> Result<DevContainerUp, DevContainerError> {
2062 let docker = if context.use_podman {
2063 Docker::new("podman")
2064 } else {
2065 Docker::new("docker")
2066 };
2067 let mut devcontainer_manifest = DevContainerManifest::new(
2068 context,
2069 environment,
2070 Arc::new(docker),
2071 Arc::new(DefaultCommandRunner::new()),
2072 config,
2073 local_project_path,
2074 )
2075 .await?;
2076
2077 devcontainer_manifest.parse_nonremote_vars()?;
2078
2079 log::debug!("Checking for existing container");
2080 if let Some(devcontainer) = devcontainer_manifest
2081 .check_for_existing_devcontainer()
2082 .await?
2083 {
2084 Ok(devcontainer)
2085 } else {
2086 log::debug!("Existing container not found. Building");
2087
2088 devcontainer_manifest.build_and_run().await
2089 }
2090}
2091
2092#[derive(Debug)]
2093struct DockerBuildResources {
2094 image: DockerInspect,
2095 additional_mounts: Vec<MountDefinition>,
2096 privileged: bool,
2097 entrypoint_script: String,
2098}
2099
2100#[derive(Debug)]
2101enum DevContainerBuildResources {
2102 DockerCompose(DockerComposeResources),
2103 Docker(DockerBuildResources),
2104}
2105
2106fn find_primary_service(
2107 docker_compose: &DockerComposeResources,
2108 devcontainer: &DevContainerManifest,
2109) -> Result<(String, DockerComposeService), DevContainerError> {
2110 let Some(service_name) = &devcontainer.dev_container().service else {
2111 return Err(DevContainerError::DevContainerParseFailed);
2112 };
2113
2114 match docker_compose.config.services.get(service_name) {
2115 Some(service) => Ok((service_name.clone(), service.clone())),
2116 None => Err(DevContainerError::DevContainerParseFailed),
2117 }
2118}
2119
2120/// Destination folder inside the container where feature content is staged during build.
2121/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2122const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2123
2124/// Escapes regex special characters in a string.
2125fn escape_regex_chars(input: &str) -> String {
2126 let mut result = String::with_capacity(input.len() * 2);
2127 for c in input.chars() {
2128 if ".*+?^${}()|[]\\".contains(c) {
2129 result.push('\\');
2130 }
2131 result.push(c);
2132 }
2133 result
2134}
2135
2136/// Extracts the short feature ID from a full feature reference string.
2137///
2138/// Examples:
2139/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2140/// - `ghcr.io/user/repo/go` → `go`
2141/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2142/// - `./myFeature` → `myFeature`
2143fn extract_feature_id(feature_ref: &str) -> &str {
2144 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2145 &feature_ref[..at_idx]
2146 } else {
2147 let last_slash = feature_ref.rfind('/');
2148 let last_colon = feature_ref.rfind(':');
2149 match (last_slash, last_colon) {
2150 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2151 _ => feature_ref,
2152 }
2153 };
2154 match without_version.rfind('/') {
2155 Some(idx) => &without_version[idx + 1..],
2156 None => without_version,
2157 }
2158}
2159
2160/// Generates a shell command that looks up a user's passwd entry.
2161///
2162/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2163/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2164fn get_ent_passwd_shell_command(user: &str) -> String {
2165 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2166 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2167 format!(
2168 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2169 shell = escaped_for_shell,
2170 re = escaped_for_regex,
2171 )
2172}
2173
2174/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2175///
2176/// Features listed in the override come first (in the specified order), followed
2177/// by any remaining features sorted lexicographically by their full reference ID.
2178fn resolve_feature_order<'a>(
2179 features: &'a HashMap<String, FeatureOptions>,
2180 override_order: &Option<Vec<String>>,
2181) -> Vec<(&'a String, &'a FeatureOptions)> {
2182 if let Some(order) = override_order {
2183 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2184 for ordered_id in order {
2185 if let Some((key, options)) = features.get_key_value(ordered_id) {
2186 ordered.push((key, options));
2187 }
2188 }
2189 let mut remaining: Vec<_> = features
2190 .iter()
2191 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2192 .collect();
2193 remaining.sort_by_key(|(id, _)| id.as_str());
2194 ordered.extend(remaining);
2195 ordered
2196 } else {
2197 let mut entries: Vec<_> = features.iter().collect();
2198 entries.sort_by_key(|(id, _)| id.as_str());
2199 entries
2200 }
2201}
2202
2203/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2204///
2205/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2206/// `containerFeaturesConfiguration.ts`.
2207fn generate_install_wrapper(
2208 feature_ref: &str,
2209 feature_id: &str,
2210 env_variables: &str,
2211) -> Result<String, DevContainerError> {
2212 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2213 log::error!("Error escaping feature ref {feature_ref}: {e}");
2214 DevContainerError::DevContainerParseFailed
2215 })?;
2216 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2217 log::error!("Error escaping feature {feature_id}: {e}");
2218 DevContainerError::DevContainerParseFailed
2219 })?;
2220 let options_indented: String = env_variables
2221 .lines()
2222 .filter(|l| !l.is_empty())
2223 .map(|l| format!(" {}", l))
2224 .collect::<Vec<_>>()
2225 .join("\n");
2226 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2227 log::error!("Error escaping options {options_indented}: {e}");
2228 DevContainerError::DevContainerParseFailed
2229 })?;
2230
2231 let script = format!(
2232 r#"#!/bin/sh
2233set -e
2234
2235on_exit () {{
2236 [ $? -eq 0 ] && exit
2237 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2238}}
2239
2240trap on_exit EXIT
2241
2242echo ===========================================================================
2243echo 'Feature : {escaped_name}'
2244echo 'Id : {escaped_id}'
2245echo 'Options :'
2246echo {escaped_options}
2247echo ===========================================================================
2248
2249set -a
2250. ../devcontainer-features.builtin.env
2251. ./devcontainer-features.env
2252set +a
2253
2254chmod +x ./install.sh
2255./install.sh
2256"#
2257 );
2258
2259 Ok(script)
2260}
2261
2262// Dockerfile actions need to be moved to their own file
2263fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2264 dockerfile_content
2265 .lines()
2266 .find(|line| line.starts_with("FROM"))
2267 .and_then(|line| {
2268 let words: Vec<&str> = line.split(" ").collect();
2269 if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2270 return Some(words[words.len() - 1].to_string());
2271 } else {
2272 return None;
2273 }
2274 })
2275}
2276
2277fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2278 if dockerfile_alias(dockerfile_content).is_some() {
2279 dockerfile_content.to_string()
2280 } else {
2281 dockerfile_content
2282 .lines()
2283 .map(|line| {
2284 if line.starts_with("FROM") {
2285 format!("{} AS {}", line, alias)
2286 } else {
2287 line.to_string()
2288 }
2289 })
2290 .collect::<Vec<String>>()
2291 .join("\n")
2292 }
2293}
2294
2295fn image_from_dockerfile(
2296 devcontainer: &DevContainerManifest,
2297 dockerfile_contents: String,
2298) -> Result<String, DevContainerError> {
2299 let mut raw_contents = dockerfile_contents
2300 .lines()
2301 .find(|line| line.starts_with("FROM"))
2302 .and_then(|from_line| {
2303 from_line
2304 .split(' ')
2305 .collect::<Vec<&str>>()
2306 .get(1)
2307 .map(|s| s.to_string())
2308 })
2309 .ok_or_else(|| {
2310 log::error!("Could not find an image definition in dockerfile");
2311 DevContainerError::DevContainerParseFailed
2312 })?;
2313
2314 for (k, v) in devcontainer
2315 .dev_container()
2316 .build
2317 .as_ref()
2318 .and_then(|b| b.args.as_ref())
2319 .unwrap_or(&HashMap::new())
2320 {
2321 raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2322 }
2323 Ok(raw_contents)
2324}
2325
2326// Container user things
2327// This should come from spec - see the docs
2328fn get_remote_user_from_config(
2329 docker_config: &DockerInspect,
2330 devcontainer: &DevContainerManifest,
2331) -> Result<String, DevContainerError> {
2332 if let DevContainer {
2333 remote_user: Some(user),
2334 ..
2335 } = &devcontainer.dev_container()
2336 {
2337 return Ok(user.clone());
2338 }
2339 if let Some(metadata) = &docker_config.config.labels.metadata {
2340 for metadatum in metadata {
2341 if let Some(remote_user) = metadatum.get("remoteUser") {
2342 if let Some(remote_user_str) = remote_user.as_str() {
2343 return Ok(remote_user_str.to_string());
2344 }
2345 }
2346 }
2347 }
2348 if let Some(image_user) = &docker_config.config.image_user {
2349 if !image_user.is_empty() {
2350 return Ok(image_user.to_string());
2351 }
2352 }
2353 Ok("root".to_string())
2354}
2355
2356// This should come from spec - see the docs
2357fn get_container_user_from_config(
2358 docker_config: &DockerInspect,
2359 devcontainer: &DevContainerManifest,
2360) -> Result<String, DevContainerError> {
2361 if let Some(user) = &devcontainer.dev_container().container_user {
2362 return Ok(user.to_string());
2363 }
2364 if let Some(metadata) = &docker_config.config.labels.metadata {
2365 for metadatum in metadata {
2366 if let Some(container_user) = metadatum.get("containerUser") {
2367 if let Some(container_user_str) = container_user.as_str() {
2368 return Ok(container_user_str.to_string());
2369 }
2370 }
2371 }
2372 }
2373 if let Some(image_user) = &docker_config.config.image_user {
2374 return Ok(image_user.to_string());
2375 }
2376
2377 Ok("root".to_string())
2378}
2379
2380#[cfg(test)]
2381mod test {
2382 use std::{
2383 collections::HashMap,
2384 ffi::OsStr,
2385 path::PathBuf,
2386 process::{ExitStatus, Output},
2387 sync::{Arc, Mutex},
2388 };
2389
2390 use async_trait::async_trait;
2391 use fs::{FakeFs, Fs};
2392 use gpui::{AppContext, TestAppContext};
2393 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2394 use project::{
2395 ProjectEnvironment,
2396 worktree_store::{WorktreeIdCounter, WorktreeStore},
2397 };
2398 use serde_json_lenient::Value;
2399 use util::{command::Command, paths::SanitizedPath};
2400
2401 #[cfg(not(target_os = "windows"))]
2402 use crate::docker::DockerComposeServicePort;
2403 use crate::{
2404 DevContainerConfig, DevContainerContext,
2405 command_json::CommandRunner,
2406 devcontainer_api::DevContainerError,
2407 devcontainer_json::MountDefinition,
2408 devcontainer_manifest::{
2409 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2410 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2411 },
2412 docker::{
2413 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2414 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2415 DockerPs,
2416 },
2417 oci::TokenResponse,
2418 };
2419 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2420
2421 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2422 let buffer = futures::io::Cursor::new(Vec::new());
2423 let mut builder = async_tar::Builder::new(buffer);
2424 for (file_name, content) in content {
2425 if content.is_empty() {
2426 let mut header = async_tar::Header::new_gnu();
2427 header.set_size(0);
2428 header.set_mode(0o755);
2429 header.set_entry_type(async_tar::EntryType::Directory);
2430 header.set_cksum();
2431 builder
2432 .append_data(&mut header, file_name, &[] as &[u8])
2433 .await
2434 .unwrap();
2435 } else {
2436 let data = content.as_bytes();
2437 let mut header = async_tar::Header::new_gnu();
2438 header.set_size(data.len() as u64);
2439 header.set_mode(0o755);
2440 header.set_entry_type(async_tar::EntryType::Regular);
2441 header.set_cksum();
2442 builder
2443 .append_data(&mut header, file_name, data)
2444 .await
2445 .unwrap();
2446 }
2447 }
2448 let buffer = builder.into_inner().await.unwrap();
2449 buffer.into_inner()
2450 }
2451
2452 fn test_project_filename() -> String {
2453 PathBuf::from(TEST_PROJECT_PATH)
2454 .file_name()
2455 .expect("is valid")
2456 .display()
2457 .to_string()
2458 }
2459
2460 async fn init_devcontainer_config(
2461 fs: &Arc<FakeFs>,
2462 devcontainer_contents: &str,
2463 ) -> DevContainerConfig {
2464 fs.insert_tree(
2465 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2466 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2467 )
2468 .await;
2469
2470 DevContainerConfig::default_config()
2471 }
2472
2473 struct TestDependencies {
2474 fs: Arc<FakeFs>,
2475 _http_client: Arc<dyn HttpClient>,
2476 docker: Arc<FakeDocker>,
2477 command_runner: Arc<TestCommandRunner>,
2478 }
2479
2480 async fn init_default_devcontainer_manifest(
2481 cx: &mut TestAppContext,
2482 devcontainer_contents: &str,
2483 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2484 let fs = FakeFs::new(cx.executor());
2485 let http_client = fake_http_client();
2486 let command_runner = Arc::new(TestCommandRunner::new());
2487 let docker = Arc::new(FakeDocker::new());
2488 let environment = HashMap::new();
2489
2490 init_devcontainer_manifest(
2491 cx,
2492 fs,
2493 http_client,
2494 docker,
2495 command_runner,
2496 environment,
2497 devcontainer_contents,
2498 )
2499 .await
2500 }
2501
2502 async fn init_devcontainer_manifest(
2503 cx: &mut TestAppContext,
2504 fs: Arc<FakeFs>,
2505 http_client: Arc<dyn HttpClient>,
2506 docker_client: Arc<FakeDocker>,
2507 command_runner: Arc<TestCommandRunner>,
2508 environment: HashMap<String, String>,
2509 devcontainer_contents: &str,
2510 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2511 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2512 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2513 let worktree_store =
2514 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2515 let project_environment =
2516 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2517
2518 let context = DevContainerContext {
2519 project_directory: SanitizedPath::cast_arc(project_path),
2520 use_podman: false,
2521 fs: fs.clone(),
2522 http_client: http_client.clone(),
2523 environment: project_environment.downgrade(),
2524 };
2525
2526 let test_dependencies = TestDependencies {
2527 fs: fs.clone(),
2528 _http_client: http_client.clone(),
2529 docker: docker_client.clone(),
2530 command_runner: command_runner.clone(),
2531 };
2532 let manifest = DevContainerManifest::new(
2533 &context,
2534 environment,
2535 docker_client,
2536 command_runner,
2537 local_config,
2538 &PathBuf::from(TEST_PROJECT_PATH),
2539 )
2540 .await?;
2541
2542 Ok((test_dependencies, manifest))
2543 }
2544
2545 #[gpui::test]
2546 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2547 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2548 cx,
2549 r#"
2550// These are some external comments. serde_lenient should handle them
2551{
2552 // These are some internal comments
2553 "image": "image",
2554 "remoteUser": "root",
2555}
2556 "#,
2557 )
2558 .await
2559 .unwrap();
2560
2561 let mut metadata = HashMap::new();
2562 metadata.insert(
2563 "remoteUser".to_string(),
2564 serde_json_lenient::Value::String("vsCode".to_string()),
2565 );
2566 let given_docker_config = DockerInspect {
2567 id: "docker_id".to_string(),
2568 config: DockerInspectConfig {
2569 labels: DockerConfigLabels {
2570 metadata: Some(vec![metadata]),
2571 },
2572 image_user: None,
2573 env: Vec::new(),
2574 },
2575 mounts: None,
2576 state: None,
2577 };
2578
2579 let remote_user =
2580 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2581
2582 assert_eq!(remote_user, "root".to_string())
2583 }
2584
2585 #[gpui::test]
2586 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2587 let (_, devcontainer_manifest) =
2588 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2589 let mut metadata = HashMap::new();
2590 metadata.insert(
2591 "remoteUser".to_string(),
2592 serde_json_lenient::Value::String("vsCode".to_string()),
2593 );
2594 let given_docker_config = DockerInspect {
2595 id: "docker_id".to_string(),
2596 config: DockerInspectConfig {
2597 labels: DockerConfigLabels {
2598 metadata: Some(vec![metadata]),
2599 },
2600 image_user: None,
2601 env: Vec::new(),
2602 },
2603 mounts: None,
2604 state: None,
2605 };
2606
2607 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2608
2609 assert!(remote_user.is_ok());
2610 let remote_user = remote_user.expect("ok");
2611 assert_eq!(&remote_user, "vsCode")
2612 }
2613
2614 #[test]
2615 fn should_extract_feature_id_from_references() {
2616 assert_eq!(
2617 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2618 "aws-cli"
2619 );
2620 assert_eq!(
2621 extract_feature_id("ghcr.io/devcontainers/features/go"),
2622 "go"
2623 );
2624 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2625 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2626 assert_eq!(
2627 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2628 "rust"
2629 );
2630 }
2631
2632 #[gpui::test]
2633 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2634 let mut metadata = HashMap::new();
2635 metadata.insert(
2636 "remoteUser".to_string(),
2637 serde_json_lenient::Value::String("vsCode".to_string()),
2638 );
2639
2640 let (_, devcontainer_manifest) =
2641 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2642 let build_resources = DockerBuildResources {
2643 image: DockerInspect {
2644 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2645 config: DockerInspectConfig {
2646 labels: DockerConfigLabels { metadata: None },
2647 image_user: None,
2648 env: Vec::new(),
2649 },
2650 mounts: None,
2651 state: None,
2652 },
2653 additional_mounts: vec![],
2654 privileged: false,
2655 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2656 };
2657 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2658
2659 assert!(docker_run_command.is_ok());
2660 let docker_run_command = docker_run_command.expect("ok");
2661
2662 assert_eq!(docker_run_command.get_program(), "docker");
2663 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2664 .join(".devcontainer")
2665 .join("devcontainer.json");
2666 let expected_config_file_label = expected_config_file_label.display();
2667 assert_eq!(
2668 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2669 vec![
2670 OsStr::new("run"),
2671 OsStr::new("--sig-proxy=false"),
2672 OsStr::new("-d"),
2673 OsStr::new("--mount"),
2674 OsStr::new(
2675 "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2676 ),
2677 OsStr::new("-l"),
2678 OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2679 OsStr::new("-l"),
2680 OsStr::new(&format!(
2681 "devcontainer.config_file={expected_config_file_label}"
2682 )),
2683 OsStr::new("--entrypoint"),
2684 OsStr::new("/bin/sh"),
2685 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2686 OsStr::new("-c"),
2687 OsStr::new(
2688 "
2689 echo Container started
2690 trap \"exit 0\" 15
2691 exec \"$@\"
2692 while sleep 1 & wait $!; do :; done
2693 "
2694 .trim()
2695 ),
2696 OsStr::new("-"),
2697 ]
2698 )
2699 }
2700
2701 #[gpui::test]
2702 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2703 // State where service not defined in dev container
2704 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2705 let given_docker_compose_config = DockerComposeResources {
2706 config: DockerComposeConfig {
2707 name: Some("devcontainers".to_string()),
2708 services: HashMap::new(),
2709 ..Default::default()
2710 },
2711 ..Default::default()
2712 };
2713
2714 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2715
2716 assert!(bad_result.is_err());
2717
2718 // State where service defined in devcontainer, not found in DockerCompose config
2719 let (_, given_dev_container) =
2720 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2721 .await
2722 .unwrap();
2723 let given_docker_compose_config = DockerComposeResources {
2724 config: DockerComposeConfig {
2725 name: Some("devcontainers".to_string()),
2726 services: HashMap::new(),
2727 ..Default::default()
2728 },
2729 ..Default::default()
2730 };
2731
2732 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2733
2734 assert!(bad_result.is_err());
2735 // State where service defined in devcontainer and in DockerCompose config
2736
2737 let (_, given_dev_container) =
2738 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2739 .await
2740 .unwrap();
2741 let given_docker_compose_config = DockerComposeResources {
2742 config: DockerComposeConfig {
2743 name: Some("devcontainers".to_string()),
2744 services: HashMap::from([(
2745 "found_service".to_string(),
2746 DockerComposeService {
2747 ..Default::default()
2748 },
2749 )]),
2750 ..Default::default()
2751 },
2752 ..Default::default()
2753 };
2754
2755 let (service_name, _) =
2756 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2757
2758 assert_eq!(service_name, "found_service".to_string());
2759 }
2760
2761 #[gpui::test]
2762 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2763 let fs = FakeFs::new(cx.executor());
2764 let given_devcontainer_contents = r#"
2765// These are some external comments. serde_lenient should handle them
2766{
2767 // These are some internal comments
2768 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2769 "name": "myDevContainer-${devcontainerId}",
2770 "remoteUser": "root",
2771 "remoteEnv": {
2772 "DEVCONTAINER_ID": "${devcontainerId}",
2773 "MYVAR2": "myvarothervalue",
2774 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2775 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2776 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2777 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2778 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2779 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2780
2781 }
2782}
2783 "#;
2784 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2785 cx,
2786 fs,
2787 fake_http_client(),
2788 Arc::new(FakeDocker::new()),
2789 Arc::new(TestCommandRunner::new()),
2790 HashMap::from([
2791 ("local_env_1".to_string(), "local_env_value1".to_string()),
2792 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2793 ]),
2794 given_devcontainer_contents,
2795 )
2796 .await
2797 .unwrap();
2798
2799 devcontainer_manifest.parse_nonremote_vars().unwrap();
2800
2801 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2802 &devcontainer_manifest.config
2803 else {
2804 panic!("Config not parsed");
2805 };
2806
2807 // ${devcontainerId}
2808 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2809 assert_eq!(
2810 variable_replaced_devcontainer.name,
2811 Some(format!("myDevContainer-{devcontainer_id}"))
2812 );
2813 assert_eq!(
2814 variable_replaced_devcontainer
2815 .remote_env
2816 .as_ref()
2817 .and_then(|env| env.get("DEVCONTAINER_ID")),
2818 Some(&devcontainer_id)
2819 );
2820
2821 // ${containerWorkspaceFolderBasename}
2822 assert_eq!(
2823 variable_replaced_devcontainer
2824 .remote_env
2825 .as_ref()
2826 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2827 Some(&test_project_filename())
2828 );
2829
2830 // ${localWorkspaceFolderBasename}
2831 assert_eq!(
2832 variable_replaced_devcontainer
2833 .remote_env
2834 .as_ref()
2835 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2836 Some(&test_project_filename())
2837 );
2838
2839 // ${containerWorkspaceFolder}
2840 assert_eq!(
2841 variable_replaced_devcontainer
2842 .remote_env
2843 .as_ref()
2844 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2845 Some(&format!("/workspaces/{}", test_project_filename()))
2846 );
2847
2848 // ${localWorkspaceFolder}
2849 assert_eq!(
2850 variable_replaced_devcontainer
2851 .remote_env
2852 .as_ref()
2853 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2854 Some(&TEST_PROJECT_PATH.to_string())
2855 );
2856
2857 // ${localEnv:VARIABLE_NAME}
2858 assert_eq!(
2859 variable_replaced_devcontainer
2860 .remote_env
2861 .as_ref()
2862 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2863 Some(&"local_env_value1".to_string())
2864 );
2865 assert_eq!(
2866 variable_replaced_devcontainer
2867 .remote_env
2868 .as_ref()
2869 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2870 Some(&"THISVALUEHERE".to_string())
2871 );
2872 }
2873
2874 #[gpui::test]
2875 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2876 let given_devcontainer_contents = r#"
2877 // These are some external comments. serde_lenient should handle them
2878 {
2879 // These are some internal comments
2880 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2881 "name": "myDevContainer-${devcontainerId}",
2882 "remoteUser": "root",
2883 "remoteEnv": {
2884 "DEVCONTAINER_ID": "${devcontainerId}",
2885 "MYVAR2": "myvarothervalue",
2886 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2887 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2888 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2889 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2890
2891 },
2892 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2893 "workspaceFolder": "/workspace/customfolder"
2894 }
2895 "#;
2896
2897 let (_, mut devcontainer_manifest) =
2898 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2899 .await
2900 .unwrap();
2901
2902 devcontainer_manifest.parse_nonremote_vars().unwrap();
2903
2904 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2905 &devcontainer_manifest.config
2906 else {
2907 panic!("Config not parsed");
2908 };
2909
2910 // ${devcontainerId}
2911 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2912 assert_eq!(
2913 variable_replaced_devcontainer.name,
2914 Some(format!("myDevContainer-{devcontainer_id}"))
2915 );
2916 assert_eq!(
2917 variable_replaced_devcontainer
2918 .remote_env
2919 .as_ref()
2920 .and_then(|env| env.get("DEVCONTAINER_ID")),
2921 Some(&devcontainer_id)
2922 );
2923
2924 // ${containerWorkspaceFolderBasename}
2925 assert_eq!(
2926 variable_replaced_devcontainer
2927 .remote_env
2928 .as_ref()
2929 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2930 Some(&"customfolder".to_string())
2931 );
2932
2933 // ${localWorkspaceFolderBasename}
2934 assert_eq!(
2935 variable_replaced_devcontainer
2936 .remote_env
2937 .as_ref()
2938 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2939 Some(&"project".to_string())
2940 );
2941
2942 // ${containerWorkspaceFolder}
2943 assert_eq!(
2944 variable_replaced_devcontainer
2945 .remote_env
2946 .as_ref()
2947 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2948 Some(&"/workspace/customfolder".to_string())
2949 );
2950
2951 // ${localWorkspaceFolder}
2952 assert_eq!(
2953 variable_replaced_devcontainer
2954 .remote_env
2955 .as_ref()
2956 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2957 Some(&TEST_PROJECT_PATH.to_string())
2958 );
2959 }
2960
2961 // updateRemoteUserUID is treated as false in Windows, so this test will fail
2962 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2963 #[cfg(not(target_os = "windows"))]
2964 #[gpui::test]
2965 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2966 cx.executor().allow_parking();
2967 env_logger::try_init().ok();
2968 let given_devcontainer_contents = r#"
2969 /*---------------------------------------------------------------------------------------------
2970 * Copyright (c) Microsoft Corporation. All rights reserved.
2971 * Licensed under the MIT License. See License.txt in the project root for license information.
2972 *--------------------------------------------------------------------------------------------*/
2973 {
2974 "name": "cli-${devcontainerId}",
2975 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2976 "build": {
2977 "dockerfile": "Dockerfile",
2978 "args": {
2979 "VARIANT": "18-bookworm",
2980 "FOO": "bar",
2981 },
2982 },
2983 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2984 "workspaceFolder": "/workspace2",
2985 "mounts": [
2986 // Keep command history across instances
2987 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2988 ],
2989
2990 "forwardPorts": [
2991 8082,
2992 8083,
2993 ],
2994 "appPort": "8084",
2995
2996 "containerEnv": {
2997 "VARIABLE_VALUE": "value",
2998 },
2999
3000 "initializeCommand": "touch IAM.md",
3001
3002 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3003
3004 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3005
3006 "postCreateCommand": {
3007 "yarn": "yarn install",
3008 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3009 },
3010
3011 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3012
3013 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3014
3015 "remoteUser": "node",
3016
3017 "remoteEnv": {
3018 "PATH": "${containerEnv:PATH}:/some/other/path",
3019 "OTHER_ENV": "other_env_value"
3020 },
3021
3022 "features": {
3023 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3024 "moby": false,
3025 },
3026 "ghcr.io/devcontainers/features/go:1": {},
3027 },
3028
3029 "customizations": {
3030 "vscode": {
3031 "extensions": [
3032 "dbaeumer.vscode-eslint",
3033 "GitHub.vscode-pull-request-github",
3034 ],
3035 },
3036 "zed": {
3037 "extensions": ["vue", "ruby"],
3038 },
3039 "codespaces": {
3040 "repositories": {
3041 "devcontainers/features": {
3042 "permissions": {
3043 "contents": "write",
3044 "workflows": "write",
3045 },
3046 },
3047 },
3048 },
3049 },
3050 }
3051 "#;
3052
3053 let (test_dependencies, mut devcontainer_manifest) =
3054 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3055 .await
3056 .unwrap();
3057
3058 test_dependencies
3059 .fs
3060 .atomic_write(
3061 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3062 r#"
3063# Copyright (c) Microsoft Corporation. All rights reserved.
3064# Licensed under the MIT License. See License.txt in the project root for license information.
3065ARG VARIANT="16-bullseye"
3066FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3067
3068RUN mkdir -p /workspaces && chown node:node /workspaces
3069
3070ARG USERNAME=node
3071USER $USERNAME
3072
3073# Save command line history
3074RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3075&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3076&& mkdir -p /home/$USERNAME/commandhistory \
3077&& touch /home/$USERNAME/commandhistory/.bash_history \
3078&& chown -R $USERNAME /home/$USERNAME/commandhistory
3079 "#.trim().to_string(),
3080 )
3081 .await
3082 .unwrap();
3083
3084 devcontainer_manifest.parse_nonremote_vars().unwrap();
3085
3086 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3087
3088 assert_eq!(
3089 devcontainer_up.extension_ids,
3090 vec!["vue".to_string(), "ruby".to_string()]
3091 );
3092
3093 let files = test_dependencies.fs.files();
3094 let feature_dockerfile = files
3095 .iter()
3096 .find(|f| {
3097 f.file_name()
3098 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3099 })
3100 .expect("to be found");
3101 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3102 assert_eq!(
3103 &feature_dockerfile,
3104 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3105
3106# Copyright (c) Microsoft Corporation. All rights reserved.
3107# Licensed under the MIT License. See License.txt in the project root for license information.
3108ARG VARIANT="16-bullseye"
3109FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3110
3111RUN mkdir -p /workspaces && chown node:node /workspaces
3112
3113ARG USERNAME=node
3114USER $USERNAME
3115
3116# Save command line history
3117RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3118&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3119&& mkdir -p /home/$USERNAME/commandhistory \
3120&& touch /home/$USERNAME/commandhistory/.bash_history \
3121&& chown -R $USERNAME /home/$USERNAME/commandhistory
3122
3123FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3124USER root
3125COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3126RUN chmod -R 0755 /tmp/build-features/
3127
3128FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3129
3130USER root
3131
3132RUN mkdir -p /tmp/dev-container-features
3133COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3134
3135RUN \
3136echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3137echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3138
3139
3140RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3141cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3142&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3143&& cd /tmp/dev-container-features/docker-in-docker_0 \
3144&& chmod +x ./devcontainer-features-install.sh \
3145&& ./devcontainer-features-install.sh \
3146&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3147
3148RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3149cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3150&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3151&& cd /tmp/dev-container-features/go_1 \
3152&& chmod +x ./devcontainer-features-install.sh \
3153&& ./devcontainer-features-install.sh \
3154&& rm -rf /tmp/dev-container-features/go_1
3155
3156
3157ARG _DEV_CONTAINERS_IMAGE_USER=root
3158USER $_DEV_CONTAINERS_IMAGE_USER
3159"#
3160 );
3161
3162 let uid_dockerfile = files
3163 .iter()
3164 .find(|f| {
3165 f.file_name()
3166 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3167 })
3168 .expect("to be found");
3169 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3170
3171 assert_eq!(
3172 &uid_dockerfile,
3173 r#"ARG BASE_IMAGE
3174FROM $BASE_IMAGE
3175
3176USER root
3177
3178ARG REMOTE_USER
3179ARG NEW_UID
3180ARG NEW_GID
3181SHELL ["/bin/sh", "-c"]
3182RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3183 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3184 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3185 if [ -z "$OLD_UID" ]; then \
3186 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3187 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3188 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3189 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3190 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3191 else \
3192 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3193 FREE_GID=65532; \
3194 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3195 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3196 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3197 fi; \
3198 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3199 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3200 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3201 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3202 fi; \
3203 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3204 fi;
3205
3206ARG IMAGE_USER
3207USER $IMAGE_USER
3208
3209# Ensure that /etc/profile does not clobber the existing path
3210RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3211
3212ENV DOCKER_BUILDKIT=1
3213
3214ENV GOPATH=/go
3215ENV GOROOT=/usr/local/go
3216ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3217ENV VARIABLE_VALUE=value
3218"#
3219 );
3220
3221 let golang_install_wrapper = files
3222 .iter()
3223 .find(|f| {
3224 f.file_name()
3225 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3226 && f.to_str().is_some_and(|s| s.contains("/go_"))
3227 })
3228 .expect("to be found");
3229 let golang_install_wrapper = test_dependencies
3230 .fs
3231 .load(golang_install_wrapper)
3232 .await
3233 .unwrap();
3234 assert_eq!(
3235 &golang_install_wrapper,
3236 r#"#!/bin/sh
3237set -e
3238
3239on_exit () {
3240 [ $? -eq 0 ] && exit
3241 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3242}
3243
3244trap on_exit EXIT
3245
3246echo ===========================================================================
3247echo 'Feature : go'
3248echo 'Id : ghcr.io/devcontainers/features/go:1'
3249echo 'Options :'
3250echo ' GOLANGCILINTVERSION=latest
3251 VERSION=latest'
3252echo ===========================================================================
3253
3254set -a
3255. ../devcontainer-features.builtin.env
3256. ./devcontainer-features.env
3257set +a
3258
3259chmod +x ./install.sh
3260./install.sh
3261"#
3262 );
3263
3264 let docker_commands = test_dependencies
3265 .command_runner
3266 .commands_by_program("docker");
3267
3268 let docker_run_command = docker_commands
3269 .iter()
3270 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3271 .expect("found");
3272
3273 assert_eq!(
3274 docker_run_command.args,
3275 vec![
3276 "run".to_string(),
3277 "--privileged".to_string(),
3278 "--sig-proxy=false".to_string(),
3279 "-d".to_string(),
3280 "--mount".to_string(),
3281 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3282 "--mount".to_string(),
3283 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3284 "--mount".to_string(),
3285 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3286 "-l".to_string(),
3287 "devcontainer.local_folder=/path/to/local/project".to_string(),
3288 "-l".to_string(),
3289 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3290 "-l".to_string(),
3291 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3292 "-p".to_string(),
3293 "8082:8082".to_string(),
3294 "-p".to_string(),
3295 "8083:8083".to_string(),
3296 "-p".to_string(),
3297 "8084:8084".to_string(),
3298 "--entrypoint".to_string(),
3299 "/bin/sh".to_string(),
3300 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3301 "-c".to_string(),
3302 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3303 "-".to_string()
3304 ]
3305 );
3306
3307 let docker_exec_commands = test_dependencies
3308 .docker
3309 .exec_commands_recorded
3310 .lock()
3311 .unwrap();
3312
3313 assert!(docker_exec_commands.iter().all(|exec| {
3314 exec.env
3315 == HashMap::from([
3316 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3317 (
3318 "PATH".to_string(),
3319 "/initial/path:/some/other/path".to_string(),
3320 ),
3321 ])
3322 }))
3323 }
3324
3325 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3326 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3327 #[cfg(not(target_os = "windows"))]
3328 #[gpui::test]
3329 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3330 cx.executor().allow_parking();
3331 env_logger::try_init().ok();
3332 let given_devcontainer_contents = r#"
3333 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3334 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3335 {
3336 "features": {
3337 "ghcr.io/devcontainers/features/aws-cli:1": {},
3338 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3339 },
3340 "name": "Rust and PostgreSQL",
3341 "dockerComposeFile": "docker-compose.yml",
3342 "service": "app",
3343 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3344
3345 // Features to add to the dev container. More info: https://containers.dev/features.
3346 // "features": {},
3347
3348 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3349 "forwardPorts": [
3350 8083,
3351 "db:5432",
3352 "db:1234",
3353 ],
3354 "appPort": "8084",
3355
3356 // Use 'postCreateCommand' to run commands after the container is created.
3357 // "postCreateCommand": "rustc --version",
3358
3359 // Configure tool-specific properties.
3360 // "customizations": {},
3361
3362 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3363 // "remoteUser": "root"
3364 }
3365 "#;
3366 let (test_dependencies, mut devcontainer_manifest) =
3367 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3368 .await
3369 .unwrap();
3370
3371 test_dependencies
3372 .fs
3373 .atomic_write(
3374 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3375 r#"
3376version: '3.8'
3377
3378volumes:
3379 postgres-data:
3380
3381services:
3382 app:
3383 build:
3384 context: .
3385 dockerfile: Dockerfile
3386 env_file:
3387 # Ensure that the variables in .env match the same variables in devcontainer.json
3388 - .env
3389
3390 volumes:
3391 - ../..:/workspaces:cached
3392
3393 # Overrides default command so things don't shut down after the process ends.
3394 command: sleep infinity
3395
3396 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3397 network_mode: service:db
3398
3399 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3400 # (Adding the "ports" property to this file will not forward from a Codespace.)
3401
3402 db:
3403 image: postgres:14.1
3404 restart: unless-stopped
3405 volumes:
3406 - postgres-data:/var/lib/postgresql/data
3407 env_file:
3408 # Ensure that the variables in .env match the same variables in devcontainer.json
3409 - .env
3410
3411 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3412 # (Adding the "ports" property to this file will not forward from a Codespace.)
3413 "#.trim().to_string(),
3414 )
3415 .await
3416 .unwrap();
3417
3418 test_dependencies.fs.atomic_write(
3419 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3420 r#"
3421FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3422
3423# Include lld linker to improve build times either by using environment variable
3424# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3425RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3426 && apt-get -y install clang lld \
3427 && apt-get autoremove -y && apt-get clean -y
3428 "#.trim().to_string()).await.unwrap();
3429
3430 devcontainer_manifest.parse_nonremote_vars().unwrap();
3431
3432 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3433
3434 let files = test_dependencies.fs.files();
3435 let feature_dockerfile = files
3436 .iter()
3437 .find(|f| {
3438 f.file_name()
3439 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3440 })
3441 .expect("to be found");
3442 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3443 assert_eq!(
3444 &feature_dockerfile,
3445 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3446
3447FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3448
3449# Include lld linker to improve build times either by using environment variable
3450# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3451RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3452 && apt-get -y install clang lld \
3453 && apt-get autoremove -y && apt-get clean -y
3454
3455FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3456USER root
3457COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3458RUN chmod -R 0755 /tmp/build-features/
3459
3460FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3461
3462USER root
3463
3464RUN mkdir -p /tmp/dev-container-features
3465COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3466
3467RUN \
3468echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3469echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3470
3471
3472RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3473cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3474&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3475&& cd /tmp/dev-container-features/aws-cli_0 \
3476&& chmod +x ./devcontainer-features-install.sh \
3477&& ./devcontainer-features-install.sh \
3478&& rm -rf /tmp/dev-container-features/aws-cli_0
3479
3480RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3481cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3482&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3483&& cd /tmp/dev-container-features/docker-in-docker_1 \
3484&& chmod +x ./devcontainer-features-install.sh \
3485&& ./devcontainer-features-install.sh \
3486&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3487
3488
3489ARG _DEV_CONTAINERS_IMAGE_USER=root
3490USER $_DEV_CONTAINERS_IMAGE_USER
3491"#
3492 );
3493
3494 let uid_dockerfile = files
3495 .iter()
3496 .find(|f| {
3497 f.file_name()
3498 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3499 })
3500 .expect("to be found");
3501 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3502
3503 assert_eq!(
3504 &uid_dockerfile,
3505 r#"ARG BASE_IMAGE
3506FROM $BASE_IMAGE
3507
3508USER root
3509
3510ARG REMOTE_USER
3511ARG NEW_UID
3512ARG NEW_GID
3513SHELL ["/bin/sh", "-c"]
3514RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3515 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3516 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3517 if [ -z "$OLD_UID" ]; then \
3518 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3519 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3520 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3521 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3522 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3523 else \
3524 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3525 FREE_GID=65532; \
3526 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3527 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3528 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3529 fi; \
3530 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3531 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3532 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3533 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3534 fi; \
3535 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3536 fi;
3537
3538ARG IMAGE_USER
3539USER $IMAGE_USER
3540
3541# Ensure that /etc/profile does not clobber the existing path
3542RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3543
3544
3545ENV DOCKER_BUILDKIT=1
3546"#
3547 );
3548
3549 let runtime_override = files
3550 .iter()
3551 .find(|f| {
3552 f.file_name()
3553 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3554 })
3555 .expect("to be found");
3556 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3557
3558 let expected_runtime_override = DockerComposeConfig {
3559 name: None,
3560 services: HashMap::from([
3561 (
3562 "app".to_string(),
3563 DockerComposeService {
3564 entrypoint: Some(vec![
3565 "/bin/sh".to_string(),
3566 "-c".to_string(),
3567 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3568 "-".to_string(),
3569 ]),
3570 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3571 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3572 privileged: Some(true),
3573 labels: Some(HashMap::from([
3574 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3575 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3576 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3577 ])),
3578 volumes: vec![
3579 MountDefinition {
3580 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3581 target: "/var/lib/docker".to_string(),
3582 mount_type: Some("volume".to_string())
3583 }
3584 ],
3585 ..Default::default()
3586 },
3587 ),
3588 (
3589 "db".to_string(),
3590 DockerComposeService {
3591 ports: vec![
3592 DockerComposeServicePort {
3593 target: "8083".to_string(),
3594 published: "8083".to_string(),
3595 ..Default::default()
3596 },
3597 DockerComposeServicePort {
3598 target: "5432".to_string(),
3599 published: "5432".to_string(),
3600 ..Default::default()
3601 },
3602 DockerComposeServicePort {
3603 target: "1234".to_string(),
3604 published: "1234".to_string(),
3605 ..Default::default()
3606 },
3607 DockerComposeServicePort {
3608 target: "8084".to_string(),
3609 published: "8084".to_string(),
3610 ..Default::default()
3611 },
3612 ],
3613 ..Default::default()
3614 },
3615 ),
3616 ]),
3617 volumes: HashMap::from([(
3618 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3619 DockerComposeVolume {
3620 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3621 },
3622 )]),
3623 };
3624
3625 assert_eq!(
3626 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3627 expected_runtime_override
3628 )
3629 }
3630
3631 #[gpui::test]
3632 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3633 cx: &mut TestAppContext,
3634 ) {
3635 cx.executor().allow_parking();
3636 env_logger::try_init().ok();
3637 let given_devcontainer_contents = r#"
3638 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3639 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3640 {
3641 "features": {
3642 "ghcr.io/devcontainers/features/aws-cli:1": {},
3643 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3644 },
3645 "name": "Rust and PostgreSQL",
3646 "dockerComposeFile": "docker-compose.yml",
3647 "service": "app",
3648 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3649
3650 // Features to add to the dev container. More info: https://containers.dev/features.
3651 // "features": {},
3652
3653 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3654 "forwardPorts": [
3655 8083,
3656 "db:5432",
3657 "db:1234",
3658 ],
3659 "updateRemoteUserUID": false,
3660 "appPort": "8084",
3661
3662 // Use 'postCreateCommand' to run commands after the container is created.
3663 // "postCreateCommand": "rustc --version",
3664
3665 // Configure tool-specific properties.
3666 // "customizations": {},
3667
3668 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3669 // "remoteUser": "root"
3670 }
3671 "#;
3672 let (test_dependencies, mut devcontainer_manifest) =
3673 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3674 .await
3675 .unwrap();
3676
3677 test_dependencies
3678 .fs
3679 .atomic_write(
3680 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3681 r#"
3682version: '3.8'
3683
3684volumes:
3685postgres-data:
3686
3687services:
3688app:
3689 build:
3690 context: .
3691 dockerfile: Dockerfile
3692 env_file:
3693 # Ensure that the variables in .env match the same variables in devcontainer.json
3694 - .env
3695
3696 volumes:
3697 - ../..:/workspaces:cached
3698
3699 # Overrides default command so things don't shut down after the process ends.
3700 command: sleep infinity
3701
3702 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3703 network_mode: service:db
3704
3705 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3706 # (Adding the "ports" property to this file will not forward from a Codespace.)
3707
3708db:
3709 image: postgres:14.1
3710 restart: unless-stopped
3711 volumes:
3712 - postgres-data:/var/lib/postgresql/data
3713 env_file:
3714 # Ensure that the variables in .env match the same variables in devcontainer.json
3715 - .env
3716
3717 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3718 # (Adding the "ports" property to this file will not forward from a Codespace.)
3719 "#.trim().to_string(),
3720 )
3721 .await
3722 .unwrap();
3723
3724 test_dependencies.fs.atomic_write(
3725 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3726 r#"
3727FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3728
3729# Include lld linker to improve build times either by using environment variable
3730# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3731RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3732&& apt-get -y install clang lld \
3733&& apt-get autoremove -y && apt-get clean -y
3734 "#.trim().to_string()).await.unwrap();
3735
3736 devcontainer_manifest.parse_nonremote_vars().unwrap();
3737
3738 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3739
3740 let files = test_dependencies.fs.files();
3741 let feature_dockerfile = files
3742 .iter()
3743 .find(|f| {
3744 f.file_name()
3745 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3746 })
3747 .expect("to be found");
3748 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3749 assert_eq!(
3750 &feature_dockerfile,
3751 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3752
3753FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3754
3755# Include lld linker to improve build times either by using environment variable
3756# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3757RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3758&& apt-get -y install clang lld \
3759&& apt-get autoremove -y && apt-get clean -y
3760
3761FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3762USER root
3763COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3764RUN chmod -R 0755 /tmp/build-features/
3765
3766FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3767
3768USER root
3769
3770RUN mkdir -p /tmp/dev-container-features
3771COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3772
3773RUN \
3774echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3775echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3776
3777
3778RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3779cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3780&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3781&& cd /tmp/dev-container-features/aws-cli_0 \
3782&& chmod +x ./devcontainer-features-install.sh \
3783&& ./devcontainer-features-install.sh \
3784&& rm -rf /tmp/dev-container-features/aws-cli_0
3785
3786RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3787cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3788&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3789&& cd /tmp/dev-container-features/docker-in-docker_1 \
3790&& chmod +x ./devcontainer-features-install.sh \
3791&& ./devcontainer-features-install.sh \
3792&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3793
3794
3795ARG _DEV_CONTAINERS_IMAGE_USER=root
3796USER $_DEV_CONTAINERS_IMAGE_USER
3797
3798# Ensure that /etc/profile does not clobber the existing path
3799RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3800
3801
3802ENV DOCKER_BUILDKIT=1
3803"#
3804 );
3805 }
3806
3807 #[cfg(not(target_os = "windows"))]
3808 #[gpui::test]
3809 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3810 cx.executor().allow_parking();
3811 env_logger::try_init().ok();
3812 let given_devcontainer_contents = r#"
3813 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3814 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3815 {
3816 "features": {
3817 "ghcr.io/devcontainers/features/aws-cli:1": {},
3818 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3819 },
3820 "name": "Rust and PostgreSQL",
3821 "dockerComposeFile": "docker-compose.yml",
3822 "service": "app",
3823 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3824
3825 // Features to add to the dev container. More info: https://containers.dev/features.
3826 // "features": {},
3827
3828 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3829 // "forwardPorts": [5432],
3830
3831 // Use 'postCreateCommand' to run commands after the container is created.
3832 // "postCreateCommand": "rustc --version",
3833
3834 // Configure tool-specific properties.
3835 // "customizations": {},
3836
3837 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3838 // "remoteUser": "root"
3839 }
3840 "#;
3841 let mut fake_docker = FakeDocker::new();
3842 fake_docker.set_podman(true);
3843 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3844 cx,
3845 FakeFs::new(cx.executor()),
3846 fake_http_client(),
3847 Arc::new(fake_docker),
3848 Arc::new(TestCommandRunner::new()),
3849 HashMap::new(),
3850 given_devcontainer_contents,
3851 )
3852 .await
3853 .unwrap();
3854
3855 test_dependencies
3856 .fs
3857 .atomic_write(
3858 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3859 r#"
3860version: '3.8'
3861
3862volumes:
3863postgres-data:
3864
3865services:
3866app:
3867build:
3868 context: .
3869 dockerfile: Dockerfile
3870env_file:
3871 # Ensure that the variables in .env match the same variables in devcontainer.json
3872 - .env
3873
3874volumes:
3875 - ../..:/workspaces:cached
3876
3877# Overrides default command so things don't shut down after the process ends.
3878command: sleep infinity
3879
3880# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3881network_mode: service:db
3882
3883# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3884# (Adding the "ports" property to this file will not forward from a Codespace.)
3885
3886db:
3887image: postgres:14.1
3888restart: unless-stopped
3889volumes:
3890 - postgres-data:/var/lib/postgresql/data
3891env_file:
3892 # Ensure that the variables in .env match the same variables in devcontainer.json
3893 - .env
3894
3895# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3896# (Adding the "ports" property to this file will not forward from a Codespace.)
3897 "#.trim().to_string(),
3898 )
3899 .await
3900 .unwrap();
3901
3902 test_dependencies.fs.atomic_write(
3903 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3904 r#"
3905FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3906
3907# Include lld linker to improve build times either by using environment variable
3908# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3909RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3910&& apt-get -y install clang lld \
3911&& apt-get autoremove -y && apt-get clean -y
3912 "#.trim().to_string()).await.unwrap();
3913
3914 devcontainer_manifest.parse_nonremote_vars().unwrap();
3915
3916 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3917
3918 let files = test_dependencies.fs.files();
3919
3920 let feature_dockerfile = files
3921 .iter()
3922 .find(|f| {
3923 f.file_name()
3924 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3925 })
3926 .expect("to be found");
3927 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3928 assert_eq!(
3929 &feature_dockerfile,
3930 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3931
3932FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3933
3934# Include lld linker to improve build times either by using environment variable
3935# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3936RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3937&& apt-get -y install clang lld \
3938&& apt-get autoremove -y && apt-get clean -y
3939
3940FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3941
3942FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3943USER root
3944COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3945RUN chmod -R 0755 /tmp/build-features/
3946
3947FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3948
3949USER root
3950
3951RUN mkdir -p /tmp/dev-container-features
3952COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3953
3954RUN \
3955echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3956echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3957
3958
3959COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3960RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3961&& cd /tmp/dev-container-features/aws-cli_0 \
3962&& chmod +x ./devcontainer-features-install.sh \
3963&& ./devcontainer-features-install.sh
3964
3965COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3966RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3967&& cd /tmp/dev-container-features/docker-in-docker_1 \
3968&& chmod +x ./devcontainer-features-install.sh \
3969&& ./devcontainer-features-install.sh
3970
3971
3972ARG _DEV_CONTAINERS_IMAGE_USER=root
3973USER $_DEV_CONTAINERS_IMAGE_USER
3974"#
3975 );
3976
3977 let uid_dockerfile = files
3978 .iter()
3979 .find(|f| {
3980 f.file_name()
3981 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3982 })
3983 .expect("to be found");
3984 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3985
3986 assert_eq!(
3987 &uid_dockerfile,
3988 r#"ARG BASE_IMAGE
3989FROM $BASE_IMAGE
3990
3991USER root
3992
3993ARG REMOTE_USER
3994ARG NEW_UID
3995ARG NEW_GID
3996SHELL ["/bin/sh", "-c"]
3997RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3998 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3999 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4000 if [ -z "$OLD_UID" ]; then \
4001 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4002 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4003 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4004 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4005 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4006 else \
4007 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4008 FREE_GID=65532; \
4009 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4010 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4011 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4012 fi; \
4013 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4014 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4015 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4016 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4017 fi; \
4018 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4019 fi;
4020
4021ARG IMAGE_USER
4022USER $IMAGE_USER
4023
4024# Ensure that /etc/profile does not clobber the existing path
4025RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4026
4027
4028ENV DOCKER_BUILDKIT=1
4029"#
4030 );
4031 }
4032
4033 #[gpui::test]
4034 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4035 cx.executor().allow_parking();
4036 env_logger::try_init().ok();
4037 let given_devcontainer_contents = r#"
4038 /*---------------------------------------------------------------------------------------------
4039 * Copyright (c) Microsoft Corporation. All rights reserved.
4040 * Licensed under the MIT License. See License.txt in the project root for license information.
4041 *--------------------------------------------------------------------------------------------*/
4042 {
4043 "name": "cli-${devcontainerId}",
4044 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4045 "build": {
4046 "dockerfile": "Dockerfile",
4047 "args": {
4048 "VARIANT": "18-bookworm",
4049 "FOO": "bar",
4050 },
4051 },
4052 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4053 "workspaceFolder": "/workspace2",
4054 "mounts": [
4055 // Keep command history across instances
4056 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4057 ],
4058
4059 "forwardPorts": [
4060 8082,
4061 8083,
4062 ],
4063 "appPort": "8084",
4064 "updateRemoteUserUID": false,
4065
4066 "containerEnv": {
4067 "VARIABLE_VALUE": "value",
4068 },
4069
4070 "initializeCommand": "touch IAM.md",
4071
4072 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4073
4074 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4075
4076 "postCreateCommand": {
4077 "yarn": "yarn install",
4078 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4079 },
4080
4081 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4082
4083 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4084
4085 "remoteUser": "node",
4086
4087 "remoteEnv": {
4088 "PATH": "${containerEnv:PATH}:/some/other/path",
4089 "OTHER_ENV": "other_env_value"
4090 },
4091
4092 "features": {
4093 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4094 "moby": false,
4095 },
4096 "ghcr.io/devcontainers/features/go:1": {},
4097 },
4098
4099 "customizations": {
4100 "vscode": {
4101 "extensions": [
4102 "dbaeumer.vscode-eslint",
4103 "GitHub.vscode-pull-request-github",
4104 ],
4105 },
4106 "zed": {
4107 "extensions": ["vue", "ruby"],
4108 },
4109 "codespaces": {
4110 "repositories": {
4111 "devcontainers/features": {
4112 "permissions": {
4113 "contents": "write",
4114 "workflows": "write",
4115 },
4116 },
4117 },
4118 },
4119 },
4120 }
4121 "#;
4122
4123 let (test_dependencies, mut devcontainer_manifest) =
4124 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4125 .await
4126 .unwrap();
4127
4128 test_dependencies
4129 .fs
4130 .atomic_write(
4131 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4132 r#"
4133# Copyright (c) Microsoft Corporation. All rights reserved.
4134# Licensed under the MIT License. See License.txt in the project root for license information.
4135ARG VARIANT="16-bullseye"
4136FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4137
4138RUN mkdir -p /workspaces && chown node:node /workspaces
4139
4140ARG USERNAME=node
4141USER $USERNAME
4142
4143# Save command line history
4144RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4145&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4146&& mkdir -p /home/$USERNAME/commandhistory \
4147&& touch /home/$USERNAME/commandhistory/.bash_history \
4148&& chown -R $USERNAME /home/$USERNAME/commandhistory
4149 "#.trim().to_string(),
4150 )
4151 .await
4152 .unwrap();
4153
4154 devcontainer_manifest.parse_nonremote_vars().unwrap();
4155
4156 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4157
4158 assert_eq!(
4159 devcontainer_up.extension_ids,
4160 vec!["vue".to_string(), "ruby".to_string()]
4161 );
4162
4163 let files = test_dependencies.fs.files();
4164 let feature_dockerfile = files
4165 .iter()
4166 .find(|f| {
4167 f.file_name()
4168 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4169 })
4170 .expect("to be found");
4171 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4172 assert_eq!(
4173 &feature_dockerfile,
4174 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4175
4176# Copyright (c) Microsoft Corporation. All rights reserved.
4177# Licensed under the MIT License. See License.txt in the project root for license information.
4178ARG VARIANT="16-bullseye"
4179FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4180
4181RUN mkdir -p /workspaces && chown node:node /workspaces
4182
4183ARG USERNAME=node
4184USER $USERNAME
4185
4186# Save command line history
4187RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4188&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4189&& mkdir -p /home/$USERNAME/commandhistory \
4190&& touch /home/$USERNAME/commandhistory/.bash_history \
4191&& chown -R $USERNAME /home/$USERNAME/commandhistory
4192
4193FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4194USER root
4195COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4196RUN chmod -R 0755 /tmp/build-features/
4197
4198FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4199
4200USER root
4201
4202RUN mkdir -p /tmp/dev-container-features
4203COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4204
4205RUN \
4206echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4207echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4208
4209
4210RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4211cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4212&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4213&& cd /tmp/dev-container-features/docker-in-docker_0 \
4214&& chmod +x ./devcontainer-features-install.sh \
4215&& ./devcontainer-features-install.sh \
4216&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4217
4218RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4219cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4220&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4221&& cd /tmp/dev-container-features/go_1 \
4222&& chmod +x ./devcontainer-features-install.sh \
4223&& ./devcontainer-features-install.sh \
4224&& rm -rf /tmp/dev-container-features/go_1
4225
4226
4227ARG _DEV_CONTAINERS_IMAGE_USER=root
4228USER $_DEV_CONTAINERS_IMAGE_USER
4229
4230# Ensure that /etc/profile does not clobber the existing path
4231RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4232
4233ENV DOCKER_BUILDKIT=1
4234
4235ENV GOPATH=/go
4236ENV GOROOT=/usr/local/go
4237ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4238ENV VARIABLE_VALUE=value
4239"#
4240 );
4241
4242 let golang_install_wrapper = files
4243 .iter()
4244 .find(|f| {
4245 f.file_name()
4246 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4247 && f.to_str().is_some_and(|s| s.contains("go_"))
4248 })
4249 .expect("to be found");
4250 let golang_install_wrapper = test_dependencies
4251 .fs
4252 .load(golang_install_wrapper)
4253 .await
4254 .unwrap();
4255 assert_eq!(
4256 &golang_install_wrapper,
4257 r#"#!/bin/sh
4258set -e
4259
4260on_exit () {
4261 [ $? -eq 0 ] && exit
4262 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4263}
4264
4265trap on_exit EXIT
4266
4267echo ===========================================================================
4268echo 'Feature : go'
4269echo 'Id : ghcr.io/devcontainers/features/go:1'
4270echo 'Options :'
4271echo ' GOLANGCILINTVERSION=latest
4272 VERSION=latest'
4273echo ===========================================================================
4274
4275set -a
4276. ../devcontainer-features.builtin.env
4277. ./devcontainer-features.env
4278set +a
4279
4280chmod +x ./install.sh
4281./install.sh
4282"#
4283 );
4284
4285 let docker_commands = test_dependencies
4286 .command_runner
4287 .commands_by_program("docker");
4288
4289 let docker_run_command = docker_commands
4290 .iter()
4291 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4292
4293 assert!(docker_run_command.is_some());
4294
4295 let docker_exec_commands = test_dependencies
4296 .docker
4297 .exec_commands_recorded
4298 .lock()
4299 .unwrap();
4300
4301 assert!(docker_exec_commands.iter().all(|exec| {
4302 exec.env
4303 == HashMap::from([
4304 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4305 (
4306 "PATH".to_string(),
4307 "/initial/path:/some/other/path".to_string(),
4308 ),
4309 ])
4310 }))
4311 }
4312
4313 #[cfg(not(target_os = "windows"))]
4314 #[gpui::test]
4315 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4316 cx.executor().allow_parking();
4317 env_logger::try_init().ok();
4318 let given_devcontainer_contents = r#"
4319 {
4320 "name": "cli-${devcontainerId}",
4321 "image": "test_image:latest",
4322 }
4323 "#;
4324
4325 let (test_dependencies, mut devcontainer_manifest) =
4326 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4327 .await
4328 .unwrap();
4329
4330 devcontainer_manifest.parse_nonremote_vars().unwrap();
4331
4332 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4333
4334 let files = test_dependencies.fs.files();
4335 let uid_dockerfile = files
4336 .iter()
4337 .find(|f| {
4338 f.file_name()
4339 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4340 })
4341 .expect("to be found");
4342 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4343
4344 assert_eq!(
4345 &uid_dockerfile,
4346 r#"ARG BASE_IMAGE
4347FROM $BASE_IMAGE
4348
4349USER root
4350
4351ARG REMOTE_USER
4352ARG NEW_UID
4353ARG NEW_GID
4354SHELL ["/bin/sh", "-c"]
4355RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4356 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4357 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4358 if [ -z "$OLD_UID" ]; then \
4359 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4360 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4361 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4362 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4363 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4364 else \
4365 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4366 FREE_GID=65532; \
4367 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4368 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4369 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4370 fi; \
4371 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4372 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4373 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4374 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4375 fi; \
4376 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4377 fi;
4378
4379ARG IMAGE_USER
4380USER $IMAGE_USER
4381
4382# Ensure that /etc/profile does not clobber the existing path
4383RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4384"#
4385 );
4386 }
4387
4388 #[cfg(not(target_os = "windows"))]
4389 #[gpui::test]
4390 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4391 cx.executor().allow_parking();
4392 env_logger::try_init().ok();
4393 let given_devcontainer_contents = r#"
4394 {
4395 "name": "cli-${devcontainerId}",
4396 "dockerComposeFile": "docker-compose-plain.yml",
4397 "service": "app",
4398 }
4399 "#;
4400
4401 let (test_dependencies, mut devcontainer_manifest) =
4402 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4403 .await
4404 .unwrap();
4405
4406 test_dependencies
4407 .fs
4408 .atomic_write(
4409 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4410 r#"
4411services:
4412 app:
4413 image: test_image:latest
4414 command: sleep infinity
4415 volumes:
4416 - ..:/workspace:cached
4417 "#
4418 .trim()
4419 .to_string(),
4420 )
4421 .await
4422 .unwrap();
4423
4424 devcontainer_manifest.parse_nonremote_vars().unwrap();
4425
4426 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4427
4428 let files = test_dependencies.fs.files();
4429 let uid_dockerfile = files
4430 .iter()
4431 .find(|f| {
4432 f.file_name()
4433 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4434 })
4435 .expect("to be found");
4436 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4437
4438 assert_eq!(
4439 &uid_dockerfile,
4440 r#"ARG BASE_IMAGE
4441FROM $BASE_IMAGE
4442
4443USER root
4444
4445ARG REMOTE_USER
4446ARG NEW_UID
4447ARG NEW_GID
4448SHELL ["/bin/sh", "-c"]
4449RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4450 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4451 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4452 if [ -z "$OLD_UID" ]; then \
4453 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4454 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4455 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4456 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4457 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4458 else \
4459 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4460 FREE_GID=65532; \
4461 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4462 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4463 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4464 fi; \
4465 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4466 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4467 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4468 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4469 fi; \
4470 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4471 fi;
4472
4473ARG IMAGE_USER
4474USER $IMAGE_USER
4475
4476# Ensure that /etc/profile does not clobber the existing path
4477RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4478"#
4479 );
4480 }
4481
4482 pub(crate) struct RecordedExecCommand {
4483 pub(crate) _container_id: String,
4484 pub(crate) _remote_folder: String,
4485 pub(crate) _user: String,
4486 pub(crate) env: HashMap<String, String>,
4487 pub(crate) _inner_command: Command,
4488 }
4489
4490 pub(crate) struct FakeDocker {
4491 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4492 podman: bool,
4493 }
4494
4495 impl FakeDocker {
4496 pub(crate) fn new() -> Self {
4497 Self {
4498 podman: false,
4499 exec_commands_recorded: Mutex::new(Vec::new()),
4500 }
4501 }
4502 #[cfg(not(target_os = "windows"))]
4503 fn set_podman(&mut self, podman: bool) {
4504 self.podman = podman;
4505 }
4506 }
4507
4508 #[async_trait]
4509 impl DockerClient for FakeDocker {
4510 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4511 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4512 return Ok(DockerInspect {
4513 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4514 .to_string(),
4515 config: DockerInspectConfig {
4516 labels: DockerConfigLabels {
4517 metadata: Some(vec![HashMap::from([(
4518 "remoteUser".to_string(),
4519 Value::String("node".to_string()),
4520 )])]),
4521 },
4522 env: Vec::new(),
4523 image_user: Some("root".to_string()),
4524 },
4525 mounts: None,
4526 state: None,
4527 });
4528 }
4529 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4530 return Ok(DockerInspect {
4531 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4532 .to_string(),
4533 config: DockerInspectConfig {
4534 labels: DockerConfigLabels {
4535 metadata: Some(vec![HashMap::from([(
4536 "remoteUser".to_string(),
4537 Value::String("vscode".to_string()),
4538 )])]),
4539 },
4540 image_user: Some("root".to_string()),
4541 env: Vec::new(),
4542 },
4543 mounts: None,
4544 state: None,
4545 });
4546 }
4547 if id.starts_with("cli_") {
4548 return Ok(DockerInspect {
4549 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4550 .to_string(),
4551 config: DockerInspectConfig {
4552 labels: DockerConfigLabels {
4553 metadata: Some(vec![HashMap::from([(
4554 "remoteUser".to_string(),
4555 Value::String("node".to_string()),
4556 )])]),
4557 },
4558 image_user: Some("root".to_string()),
4559 env: vec!["PATH=/initial/path".to_string()],
4560 },
4561 mounts: None,
4562 state: None,
4563 });
4564 }
4565 if id == "found_docker_ps" {
4566 return Ok(DockerInspect {
4567 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4568 .to_string(),
4569 config: DockerInspectConfig {
4570 labels: DockerConfigLabels {
4571 metadata: Some(vec![HashMap::from([(
4572 "remoteUser".to_string(),
4573 Value::String("node".to_string()),
4574 )])]),
4575 },
4576 image_user: Some("root".to_string()),
4577 env: vec!["PATH=/initial/path".to_string()],
4578 },
4579 mounts: Some(vec![DockerInspectMount {
4580 source: "/path/to/local/project".to_string(),
4581 destination: "/workspaces/project".to_string(),
4582 }]),
4583 state: None,
4584 });
4585 }
4586 if id.starts_with("rust_a-") {
4587 return Ok(DockerInspect {
4588 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4589 .to_string(),
4590 config: DockerInspectConfig {
4591 labels: DockerConfigLabels {
4592 metadata: Some(vec![HashMap::from([(
4593 "remoteUser".to_string(),
4594 Value::String("vscode".to_string()),
4595 )])]),
4596 },
4597 image_user: Some("root".to_string()),
4598 env: Vec::new(),
4599 },
4600 mounts: None,
4601 state: None,
4602 });
4603 }
4604 if id == "test_image:latest" {
4605 return Ok(DockerInspect {
4606 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4607 .to_string(),
4608 config: DockerInspectConfig {
4609 labels: DockerConfigLabels {
4610 metadata: Some(vec![HashMap::from([(
4611 "remoteUser".to_string(),
4612 Value::String("node".to_string()),
4613 )])]),
4614 },
4615 env: Vec::new(),
4616 image_user: Some("root".to_string()),
4617 },
4618 mounts: None,
4619 state: None,
4620 });
4621 }
4622
4623 Err(DevContainerError::DockerNotAvailable)
4624 }
4625 async fn get_docker_compose_config(
4626 &self,
4627 config_files: &Vec<PathBuf>,
4628 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4629 if config_files.len() == 1
4630 && config_files.get(0)
4631 == Some(&PathBuf::from(
4632 "/path/to/local/project/.devcontainer/docker-compose.yml",
4633 ))
4634 {
4635 return Ok(Some(DockerComposeConfig {
4636 name: None,
4637 services: HashMap::from([
4638 (
4639 "app".to_string(),
4640 DockerComposeService {
4641 build: Some(DockerComposeServiceBuild {
4642 context: Some(".".to_string()),
4643 dockerfile: Some("Dockerfile".to_string()),
4644 args: None,
4645 additional_contexts: None,
4646 }),
4647 volumes: vec![MountDefinition {
4648 source: Some("../..".to_string()),
4649 target: "/workspaces".to_string(),
4650 mount_type: Some("bind".to_string()),
4651 }],
4652 network_mode: Some("service:db".to_string()),
4653 ..Default::default()
4654 },
4655 ),
4656 (
4657 "db".to_string(),
4658 DockerComposeService {
4659 image: Some("postgres:14.1".to_string()),
4660 volumes: vec![MountDefinition {
4661 source: Some("postgres-data".to_string()),
4662 target: "/var/lib/postgresql/data".to_string(),
4663 mount_type: Some("volume".to_string()),
4664 }],
4665 env_file: Some(vec![".env".to_string()]),
4666 ..Default::default()
4667 },
4668 ),
4669 ]),
4670 volumes: HashMap::from([(
4671 "postgres-data".to_string(),
4672 DockerComposeVolume::default(),
4673 )]),
4674 }));
4675 }
4676 if config_files.len() == 1
4677 && config_files.get(0)
4678 == Some(&PathBuf::from(
4679 "/path/to/local/project/.devcontainer/docker-compose-plain.yml",
4680 ))
4681 {
4682 return Ok(Some(DockerComposeConfig {
4683 name: None,
4684 services: HashMap::from([(
4685 "app".to_string(),
4686 DockerComposeService {
4687 image: Some("test_image:latest".to_string()),
4688 command: vec!["sleep".to_string(), "infinity".to_string()],
4689 ..Default::default()
4690 },
4691 )]),
4692 ..Default::default()
4693 }));
4694 }
4695 Err(DevContainerError::DockerNotAvailable)
4696 }
4697 async fn docker_compose_build(
4698 &self,
4699 _config_files: &Vec<PathBuf>,
4700 _project_name: &str,
4701 ) -> Result<(), DevContainerError> {
4702 Ok(())
4703 }
4704 async fn run_docker_exec(
4705 &self,
4706 container_id: &str,
4707 remote_folder: &str,
4708 user: &str,
4709 env: &HashMap<String, String>,
4710 inner_command: Command,
4711 ) -> Result<(), DevContainerError> {
4712 let mut record = self
4713 .exec_commands_recorded
4714 .lock()
4715 .expect("should be available");
4716 record.push(RecordedExecCommand {
4717 _container_id: container_id.to_string(),
4718 _remote_folder: remote_folder.to_string(),
4719 _user: user.to_string(),
4720 env: env.clone(),
4721 _inner_command: inner_command,
4722 });
4723 Ok(())
4724 }
4725 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4726 Err(DevContainerError::DockerNotAvailable)
4727 }
4728 async fn find_process_by_filters(
4729 &self,
4730 _filters: Vec<String>,
4731 ) -> Result<Option<DockerPs>, DevContainerError> {
4732 Ok(Some(DockerPs {
4733 id: "found_docker_ps".to_string(),
4734 }))
4735 }
4736 fn supports_compose_buildkit(&self) -> bool {
4737 !self.podman
4738 }
4739 fn docker_cli(&self) -> String {
4740 if self.podman {
4741 "podman".to_string()
4742 } else {
4743 "docker".to_string()
4744 }
4745 }
4746 }
4747
4748 #[derive(Debug, Clone)]
4749 pub(crate) struct TestCommand {
4750 pub(crate) program: String,
4751 pub(crate) args: Vec<String>,
4752 }
4753
4754 pub(crate) struct TestCommandRunner {
4755 commands_recorded: Mutex<Vec<TestCommand>>,
4756 }
4757
4758 impl TestCommandRunner {
4759 fn new() -> Self {
4760 Self {
4761 commands_recorded: Mutex::new(Vec::new()),
4762 }
4763 }
4764
4765 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4766 let record = self.commands_recorded.lock().expect("poisoned");
4767 record
4768 .iter()
4769 .filter(|r| r.program == program)
4770 .map(|r| r.clone())
4771 .collect()
4772 }
4773 }
4774
4775 #[async_trait]
4776 impl CommandRunner for TestCommandRunner {
4777 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4778 let mut record = self.commands_recorded.lock().expect("poisoned");
4779
4780 record.push(TestCommand {
4781 program: command.get_program().display().to_string(),
4782 args: command
4783 .get_args()
4784 .map(|a| a.display().to_string())
4785 .collect(),
4786 });
4787
4788 Ok(Output {
4789 status: ExitStatus::default(),
4790 stdout: vec![],
4791 stderr: vec![],
4792 })
4793 }
4794 }
4795
4796 fn fake_http_client() -> Arc<dyn HttpClient> {
4797 FakeHttpClient::create(|request| async move {
4798 let (parts, _body) = request.into_parts();
4799 if parts.uri.path() == "/token" {
4800 let token_response = TokenResponse {
4801 token: "token".to_string(),
4802 };
4803 return Ok(http::Response::builder()
4804 .status(200)
4805 .body(http_client::AsyncBody::from(
4806 serde_json_lenient::to_string(&token_response).unwrap(),
4807 ))
4808 .unwrap());
4809 }
4810
4811 // OCI specific things
4812 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4813 let response = r#"
4814 {
4815 "schemaVersion": 2,
4816 "mediaType": "application/vnd.oci.image.manifest.v1+json",
4817 "config": {
4818 "mediaType": "application/vnd.devcontainers",
4819 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4820 "size": 2
4821 },
4822 "layers": [
4823 {
4824 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4825 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4826 "size": 59392,
4827 "annotations": {
4828 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4829 }
4830 }
4831 ],
4832 "annotations": {
4833 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4834 "com.github.package.type": "devcontainer_feature"
4835 }
4836 }
4837 "#;
4838 return Ok(http::Response::builder()
4839 .status(200)
4840 .body(http_client::AsyncBody::from(response))
4841 .unwrap());
4842 }
4843
4844 if parts.uri.path()
4845 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4846 {
4847 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4852 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4853 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4854 ```
4855 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4856 ```
4857 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4858
4859
4860 ## OS Support
4861
4862 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4863
4864 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4865
4866 `bash` is required to execute the `install.sh` script."#),
4867 ("./README.md", r#"
4868 # Docker (Docker-in-Docker) (docker-in-docker)
4869
4870 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4871
4872 ## Example Usage
4873
4874 ```json
4875 "features": {
4876 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4877 }
4878 ```
4879
4880 ## Options
4881
4882 | Options Id | Description | Type | Default Value |
4883 |-----|-----|-----|-----|
4884 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4885 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4886 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4887 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4888 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4889 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4890 | installDockerBuildx | Install Docker Buildx | boolean | true |
4891 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4892 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4893
4894 ## Customizations
4895
4896 ### VS Code Extensions
4897
4898 - `ms-azuretools.vscode-containers`
4899
4900 ## Limitations
4901
4902 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4903 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4904 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4905 ```
4906 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4907 ```
4908 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4909
4910
4911 ## OS Support
4912
4913 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4914
4915 `bash` is required to execute the `install.sh` script.
4916
4917
4918 ---
4919
4920 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
4921 ("./devcontainer-feature.json", r#"
4922 {
4923 "id": "docker-in-docker",
4924 "version": "2.16.1",
4925 "name": "Docker (Docker-in-Docker)",
4926 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4927 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4928 "options": {
4929 "version": {
4930 "type": "string",
4931 "proposals": [
4932 "latest",
4933 "none",
4934 "20.10"
4935 ],
4936 "default": "latest",
4937 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4938 },
4939 "moby": {
4940 "type": "boolean",
4941 "default": true,
4942 "description": "Install OSS Moby build instead of Docker CE"
4943 },
4944 "mobyBuildxVersion": {
4945 "type": "string",
4946 "default": "latest",
4947 "description": "Install a specific version of moby-buildx when using Moby"
4948 },
4949 "dockerDashComposeVersion": {
4950 "type": "string",
4951 "enum": [
4952 "none",
4953 "v1",
4954 "v2"
4955 ],
4956 "default": "v2",
4957 "description": "Default version of Docker Compose (v1, v2 or none)"
4958 },
4959 "azureDnsAutoDetection": {
4960 "type": "boolean",
4961 "default": true,
4962 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4963 },
4964 "dockerDefaultAddressPool": {
4965 "type": "string",
4966 "default": "",
4967 "proposals": [],
4968 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4969 },
4970 "installDockerBuildx": {
4971 "type": "boolean",
4972 "default": true,
4973 "description": "Install Docker Buildx"
4974 },
4975 "installDockerComposeSwitch": {
4976 "type": "boolean",
4977 "default": false,
4978 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4979 },
4980 "disableIp6tables": {
4981 "type": "boolean",
4982 "default": false,
4983 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4984 }
4985 },
4986 "entrypoint": "/usr/local/share/docker-init.sh",
4987 "privileged": true,
4988 "containerEnv": {
4989 "DOCKER_BUILDKIT": "1"
4990 },
4991 "customizations": {
4992 "vscode": {
4993 "extensions": [
4994 "ms-azuretools.vscode-containers"
4995 ],
4996 "settings": {
4997 "github.copilot.chat.codeGeneration.instructions": [
4998 {
4999 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5000 }
5001 ]
5002 }
5003 }
5004 },
5005 "mounts": [
5006 {
5007 "source": "dind-var-lib-docker-${devcontainerId}",
5008 "target": "/var/lib/docker",
5009 "type": "volume"
5010 }
5011 ],
5012 "installsAfter": [
5013 "ghcr.io/devcontainers/features/common-utils"
5014 ]
5015 }"#),
5016 ("./install.sh", r#"
5017 #!/usr/bin/env bash
5018 #-------------------------------------------------------------------------------------------------------------
5019 # Copyright (c) Microsoft Corporation. All rights reserved.
5020 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5021 #-------------------------------------------------------------------------------------------------------------
5022 #
5023 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5024 # Maintainer: The Dev Container spec maintainers
5025
5026
5027 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5028 USE_MOBY="${MOBY:-"true"}"
5029 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5030 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5031 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5032 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5033 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5034 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5035 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5036 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5037 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5038 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5039 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5040 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5041
5042 # Default: Exit on any failure.
5043 set -e
5044
5045 # Clean up
5046 rm -rf /var/lib/apt/lists/*
5047
5048 # Setup STDERR.
5049 err() {
5050 echo "(!) $*" >&2
5051 }
5052
5053 if [ "$(id -u)" -ne 0 ]; then
5054 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5055 exit 1
5056 fi
5057
5058 ###################
5059 # Helper Functions
5060 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5061 ###################
5062
5063 # Determine the appropriate non-root user
5064 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5065 USERNAME=""
5066 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5067 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5068 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5069 USERNAME=${CURRENT_USER}
5070 break
5071 fi
5072 done
5073 if [ "${USERNAME}" = "" ]; then
5074 USERNAME=root
5075 fi
5076 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5077 USERNAME=root
5078 fi
5079
5080 # Package manager update function
5081 pkg_mgr_update() {
5082 case ${ADJUSTED_ID} in
5083 debian)
5084 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5085 echo "Running apt-get update..."
5086 apt-get update -y
5087 fi
5088 ;;
5089 rhel)
5090 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5091 cache_check_dir="/var/cache/yum"
5092 else
5093 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5094 fi
5095 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5096 echo "Running ${PKG_MGR_CMD} makecache ..."
5097 ${PKG_MGR_CMD} makecache
5098 fi
5099 ;;
5100 esac
5101 }
5102
5103 # Checks if packages are installed and installs them if not
5104 check_packages() {
5105 case ${ADJUSTED_ID} in
5106 debian)
5107 if ! dpkg -s "$@" > /dev/null 2>&1; then
5108 pkg_mgr_update
5109 apt-get -y install --no-install-recommends "$@"
5110 fi
5111 ;;
5112 rhel)
5113 if ! rpm -q "$@" > /dev/null 2>&1; then
5114 pkg_mgr_update
5115 ${PKG_MGR_CMD} -y install "$@"
5116 fi
5117 ;;
5118 esac
5119 }
5120
5121 # Figure out correct version of a three part version number is not passed
5122 find_version_from_git_tags() {
5123 local variable_name=$1
5124 local requested_version=${!variable_name}
5125 if [ "${requested_version}" = "none" ]; then return; fi
5126 local repository=$2
5127 local prefix=${3:-"tags/v"}
5128 local separator=${4:-"."}
5129 local last_part_optional=${5:-"false"}
5130 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5131 local escaped_separator=${separator//./\\.}
5132 local last_part
5133 if [ "${last_part_optional}" = "true" ]; then
5134 last_part="(${escaped_separator}[0-9]+)?"
5135 else
5136 last_part="${escaped_separator}[0-9]+"
5137 fi
5138 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5139 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5140 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5141 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5142 else
5143 set +e
5144 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5145 set -e
5146 fi
5147 fi
5148 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5149 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5150 exit 1
5151 fi
5152 echo "${variable_name}=${!variable_name}"
5153 }
5154
5155 # Use semver logic to decrement a version number then look for the closest match
5156 find_prev_version_from_git_tags() {
5157 local variable_name=$1
5158 local current_version=${!variable_name}
5159 local repository=$2
5160 # Normally a "v" is used before the version number, but support alternate cases
5161 local prefix=${3:-"tags/v"}
5162 # Some repositories use "_" instead of "." for version number part separation, support that
5163 local separator=${4:-"."}
5164 # Some tools release versions that omit the last digit (e.g. go)
5165 local last_part_optional=${5:-"false"}
5166 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5167 local version_suffix_regex=$6
5168 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5169 set +e
5170 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5171 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5172 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5173
5174 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5175 ((major=major-1))
5176 declare -g ${variable_name}="${major}"
5177 # Look for latest version from previous major release
5178 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5179 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5180 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5181 ((minor=minor-1))
5182 declare -g ${variable_name}="${major}.${minor}"
5183 # Look for latest version from previous minor release
5184 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5185 else
5186 ((breakfix=breakfix-1))
5187 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5188 declare -g ${variable_name}="${major}.${minor}"
5189 else
5190 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5191 fi
5192 fi
5193 set -e
5194 }
5195
5196 # Function to fetch the version released prior to the latest version
5197 get_previous_version() {
5198 local url=$1
5199 local repo_url=$2
5200 local variable_name=$3
5201 prev_version=${!variable_name}
5202
5203 output=$(curl -s "$repo_url");
5204 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5205 message=$(echo "$output" | jq -r '.message')
5206
5207 if [[ $message == "API rate limit exceeded"* ]]; then
5208 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5209 echo -e "\nAttempting to find latest version using GitHub tags."
5210 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5211 declare -g ${variable_name}="${prev_version}"
5212 fi
5213 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5214 echo -e "\nAttempting to find latest version using GitHub Api."
5215 version=$(echo "$output" | jq -r '.[1].tag_name')
5216 declare -g ${variable_name}="${version#v}"
5217 fi
5218 echo "${variable_name}=${!variable_name}"
5219 }
5220
5221 get_github_api_repo_url() {
5222 local url=$1
5223 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5224 }
5225
5226 ###########################################
5227 # Start docker-in-docker installation
5228 ###########################################
5229
5230 # Ensure apt is in non-interactive to avoid prompts
5231 export DEBIAN_FRONTEND=noninteractive
5232
5233 # Source /etc/os-release to get OS info
5234 . /etc/os-release
5235
5236 # Determine adjusted ID and package manager
5237 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5238 ADJUSTED_ID="debian"
5239 PKG_MGR_CMD="apt-get"
5240 # Use dpkg for Debian-based systems
5241 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5242 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5243 ADJUSTED_ID="rhel"
5244 # Determine the appropriate package manager for RHEL-based systems
5245 for pkg_mgr in tdnf dnf microdnf yum; do
5246 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5247 PKG_MGR_CMD="$pkg_mgr"
5248 break
5249 fi
5250 done
5251
5252 if [ -z "${PKG_MGR_CMD}" ]; then
5253 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5254 exit 1
5255 fi
5256
5257 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5258 else
5259 err "Linux distro ${ID} not supported."
5260 exit 1
5261 fi
5262
5263 # Azure Linux specific setup
5264 if [ "${ID}" = "azurelinux" ]; then
5265 VERSION_CODENAME="azurelinux${VERSION_ID}"
5266 fi
5267
5268 # Prevent attempting to install Moby on Debian trixie (packages removed)
5269 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5270 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5271 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5272 exit 1
5273 fi
5274
5275 # Check if distro is supported
5276 if [ "${USE_MOBY}" = "true" ]; then
5277 if [ "${ADJUSTED_ID}" = "debian" ]; then
5278 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5279 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5280 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5281 exit 1
5282 fi
5283 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5284 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5285 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5286 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5287 else
5288 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5289 fi
5290 fi
5291 else
5292 if [ "${ADJUSTED_ID}" = "debian" ]; then
5293 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5294 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5295 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5296 exit 1
5297 fi
5298 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5299 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5300
5301 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5302 fi
5303 fi
5304
5305 # Install base dependencies
5306 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5307 case ${ADJUSTED_ID} in
5308 debian)
5309 check_packages apt-transport-https $base_packages dirmngr
5310 ;;
5311 rhel)
5312 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5313
5314 ;;
5315 esac
5316
5317 # Install git if not already present
5318 if ! command -v git >/dev/null 2>&1; then
5319 check_packages git
5320 fi
5321
5322 # Update CA certificates to ensure HTTPS connections work properly
5323 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5324 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5325 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5326 update-ca-certificates
5327 fi
5328
5329 # Swap to legacy iptables for compatibility (Debian only)
5330 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5331 update-alternatives --set iptables /usr/sbin/iptables-legacy
5332 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5333 fi
5334
5335 # Set up the necessary repositories
5336 if [ "${USE_MOBY}" = "true" ]; then
5337 # Name of open source engine/cli
5338 engine_package_name="moby-engine"
5339 cli_package_name="moby-cli"
5340
5341 case ${ADJUSTED_ID} in
5342 debian)
5343 # Import key safely and import Microsoft apt repo
5344 {
5345 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5346 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5347 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5348 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5349 ;;
5350 rhel)
5351 echo "(*) ${ID} detected - checking for Moby packages..."
5352
5353 # Check if moby packages are available in default repos
5354 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5355 echo "(*) Using built-in ${ID} Moby packages"
5356 else
5357 case "${ID}" in
5358 azurelinux)
5359 echo "(*) Moby packages not found in Azure Linux repositories"
5360 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5361 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5362 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5363 exit 1
5364 ;;
5365 mariner)
5366 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5367 # Add Microsoft repository if packages aren't available locally
5368 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5369 cat > /etc/yum.repos.d/microsoft.repo << EOF
5370 [microsoft]
5371 name=Microsoft Repository
5372 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5373 enabled=1
5374 gpgcheck=1
5375 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5376 EOF
5377 # Verify packages are available after adding repo
5378 pkg_mgr_update
5379 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5380 echo "(*) Moby packages not found in Microsoft repository either"
5381 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5382 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5383 exit 1
5384 fi
5385 ;;
5386 *)
5387 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5388 exit 1
5389 ;;
5390 esac
5391 fi
5392 ;;
5393 esac
5394 else
5395 # Name of licensed engine/cli
5396 engine_package_name="docker-ce"
5397 cli_package_name="docker-ce-cli"
5398 case ${ADJUSTED_ID} in
5399 debian)
5400 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5401 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5402 ;;
5403 rhel)
5404 # Docker CE repository setup for RHEL-based systems
5405 setup_docker_ce_repo() {
5406 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5407 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5408 [docker-ce-stable]
5409 name=Docker CE Stable
5410 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5411 enabled=1
5412 gpgcheck=1
5413 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5414 skip_if_unavailable=1
5415 module_hotfixes=1
5416 EOF
5417 }
5418 install_azure_linux_deps() {
5419 echo "(*) Installing device-mapper libraries for Docker CE..."
5420 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5421 echo "(*) Installing additional Docker CE dependencies..."
5422 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5423 echo "(*) Some optional dependencies could not be installed, continuing..."
5424 }
5425 }
5426 setup_selinux_context() {
5427 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5428 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5429 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5430 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5431 fi
5432 }
5433
5434 # Special handling for RHEL Docker CE installation
5435 case "${ID}" in
5436 azurelinux|mariner)
5437 echo "(*) ${ID} detected"
5438 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5439 echo "(*) Setting up Docker CE repository..."
5440
5441 setup_docker_ce_repo
5442 install_azure_linux_deps
5443
5444 if [ "${USE_MOBY}" != "true" ]; then
5445 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5446 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5447 setup_selinux_context
5448 else
5449 echo "(*) Using Moby - container-selinux not required"
5450 fi
5451 ;;
5452 *)
5453 # Standard RHEL/CentOS/Fedora approach
5454 if command -v dnf >/dev/null 2>&1; then
5455 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5456 elif command -v yum-config-manager >/dev/null 2>&1; then
5457 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5458 else
5459 # Manual fallback
5460 setup_docker_ce_repo
5461 fi
5462 ;;
5463 esac
5464 ;;
5465 esac
5466 fi
5467
5468 # Refresh package database
5469 case ${ADJUSTED_ID} in
5470 debian)
5471 apt-get update
5472 ;;
5473 rhel)
5474 pkg_mgr_update
5475 ;;
5476 esac
5477
5478 # Soft version matching
5479 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5480 # Empty, meaning grab whatever "latest" is in apt repo
5481 engine_version_suffix=""
5482 cli_version_suffix=""
5483 else
5484 case ${ADJUSTED_ID} in
5485 debian)
5486 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5487 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5488 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5489 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5490 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5491 set +e # Don't exit if finding version fails - will handle gracefully
5492 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5493 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5494 set -e
5495 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5496 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5497 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5498 exit 1
5499 fi
5500 ;;
5501 rhel)
5502 # For RHEL-based systems, use dnf/yum to find versions
5503 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5504 set +e # Don't exit if finding version fails - will handle gracefully
5505 if [ "${USE_MOBY}" = "true" ]; then
5506 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5507 else
5508 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5509 fi
5510 set -e
5511 if [ -n "${available_versions}" ]; then
5512 engine_version_suffix="-${available_versions}"
5513 cli_version_suffix="-${available_versions}"
5514 else
5515 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5516 engine_version_suffix=""
5517 cli_version_suffix=""
5518 fi
5519 ;;
5520 esac
5521 fi
5522
5523 # Version matching for moby-buildx
5524 if [ "${USE_MOBY}" = "true" ]; then
5525 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5526 # Empty, meaning grab whatever "latest" is in apt repo
5527 buildx_version_suffix=""
5528 else
5529 case ${ADJUSTED_ID} in
5530 debian)
5531 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5532 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5533 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5534 set +e
5535 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5536 set -e
5537 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5538 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5539 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5540 exit 1
5541 fi
5542 ;;
5543 rhel)
5544 # For RHEL-based systems, try to find buildx version or use latest
5545 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5546 set +e
5547 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5548 set -e
5549 if [ -n "${available_buildx}" ]; then
5550 buildx_version_suffix="-${available_buildx}"
5551 else
5552 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5553 buildx_version_suffix=""
5554 fi
5555 ;;
5556 esac
5557 echo "buildx_version_suffix ${buildx_version_suffix}"
5558 fi
5559 fi
5560
5561 # Install Docker / Moby CLI if not already installed
5562 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5563 echo "Docker / Moby CLI and Engine already installed."
5564 else
5565 case ${ADJUSTED_ID} in
5566 debian)
5567 if [ "${USE_MOBY}" = "true" ]; then
5568 # Install engine
5569 set +e # Handle error gracefully
5570 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5571 exit_code=$?
5572 set -e
5573
5574 if [ ${exit_code} -ne 0 ]; then
5575 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5576 exit 1
5577 fi
5578
5579 # Install compose
5580 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5581 else
5582 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5583 # Install compose
5584 apt-mark hold docker-ce docker-ce-cli
5585 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5586 fi
5587 ;;
5588 rhel)
5589 if [ "${USE_MOBY}" = "true" ]; then
5590 set +e # Handle error gracefully
5591 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5592 exit_code=$?
5593 set -e
5594
5595 if [ ${exit_code} -ne 0 ]; then
5596 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5597 exit 1
5598 fi
5599
5600 # Install compose
5601 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5602 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5603 fi
5604 else
5605 # Special handling for Azure Linux Docker CE installation
5606 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5607 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5608
5609 # Use rpm with --force and --nodeps for Azure Linux
5610 set +e # Don't exit on error for this section
5611 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5612 install_result=$?
5613 set -e
5614
5615 if [ $install_result -ne 0 ]; then
5616 echo "(*) Standard installation failed, trying manual installation..."
5617
5618 echo "(*) Standard installation failed, trying manual installation..."
5619
5620 # Create directory for downloading packages
5621 mkdir -p /tmp/docker-ce-install
5622
5623 # Download packages manually using curl since tdnf doesn't support download
5624 echo "(*) Downloading Docker CE packages manually..."
5625
5626 # Get the repository baseurl
5627 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5628
5629 # Download packages directly
5630 cd /tmp/docker-ce-install
5631
5632 # Get package names with versions
5633 if [ -n "${cli_version_suffix}" ]; then
5634 docker_ce_version="${cli_version_suffix#-}"
5635 docker_cli_version="${engine_version_suffix#-}"
5636 else
5637 # Get latest version from repository
5638 docker_ce_version="latest"
5639 fi
5640
5641 echo "(*) Attempting to download Docker CE packages from repository..."
5642
5643 # Try to download latest packages if specific version fails
5644 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5645 # Fallback: try to get latest available version
5646 echo "(*) Specific version not found, trying latest..."
5647 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5648 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5649 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5650
5651 if [ -n "${latest_docker}" ]; then
5652 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5653 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5654 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5655 else
5656 echo "(*) ERROR: Could not find Docker CE packages in repository"
5657 echo "(*) Please check repository configuration or use 'moby': true"
5658 exit 1
5659 fi
5660 fi
5661 # Install systemd libraries required by Docker CE
5662 echo "(*) Installing systemd libraries required by Docker CE..."
5663 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5664 echo "(*) WARNING: Could not install systemd libraries"
5665 echo "(*) Docker may fail to start without these"
5666 }
5667
5668 # Install with rpm --force --nodeps
5669 echo "(*) Installing Docker CE packages with dependency override..."
5670 rpm -Uvh --force --nodeps *.rpm
5671
5672 # Cleanup
5673 cd /
5674 rm -rf /tmp/docker-ce-install
5675
5676 echo "(*) Docker CE installation completed with dependency bypass"
5677 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5678 fi
5679 else
5680 # Standard installation for other RHEL-based systems
5681 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5682 fi
5683 # Install compose
5684 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5685 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5686 fi
5687 fi
5688 ;;
5689 esac
5690 fi
5691
5692 echo "Finished installing docker / moby!"
5693
5694 docker_home="/usr/libexec/docker"
5695 cli_plugins_dir="${docker_home}/cli-plugins"
5696
5697 # fallback for docker-compose
5698 fallback_compose(){
5699 local url=$1
5700 local repo_url=$(get_github_api_repo_url "$url")
5701 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5702 get_previous_version "${url}" "${repo_url}" compose_version
5703 echo -e "\nAttempting to install v${compose_version}"
5704 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5705 }
5706
5707 # If 'docker-compose' command is to be included
5708 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5709 case "${architecture}" in
5710 amd64|x86_64) target_compose_arch=x86_64 ;;
5711 arm64|aarch64) target_compose_arch=aarch64 ;;
5712 *)
5713 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5714 exit 1
5715 esac
5716
5717 docker_compose_path="/usr/local/bin/docker-compose"
5718 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5719 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5720 INSTALL_DOCKER_COMPOSE_SWITCH="false"
5721
5722 if [ "${target_compose_arch}" = "x86_64" ]; then
5723 echo "(*) Installing docker compose v1..."
5724 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5725 chmod +x ${docker_compose_path}
5726
5727 # Download the SHA256 checksum
5728 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5729 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5730 sha256sum -c docker-compose.sha256sum --ignore-missing
5731 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5732 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5733 exit 1
5734 else
5735 # Use pip to get a version that runs on this architecture
5736 check_packages python3-minimal python3-pip libffi-dev python3-venv
5737 echo "(*) Installing docker compose v1 via pip..."
5738 export PYTHONUSERBASE=/usr/local
5739 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5740 fi
5741 else
5742 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5743 docker_compose_url="https://github.com/docker/compose"
5744 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5745 echo "(*) Installing docker-compose ${compose_version}..."
5746 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5747 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5748 fallback_compose "$docker_compose_url"
5749 }
5750
5751 chmod +x ${docker_compose_path}
5752
5753 # Download the SHA256 checksum
5754 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5755 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5756 sha256sum -c docker-compose.sha256sum --ignore-missing
5757
5758 mkdir -p ${cli_plugins_dir}
5759 cp ${docker_compose_path} ${cli_plugins_dir}
5760 fi
5761 fi
5762
5763 # fallback method for compose-switch
5764 fallback_compose-switch() {
5765 local url=$1
5766 local repo_url=$(get_github_api_repo_url "$url")
5767 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5768 get_previous_version "$url" "$repo_url" compose_switch_version
5769 echo -e "\nAttempting to install v${compose_switch_version}"
5770 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5771 }
5772 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5773 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5774 if type docker-compose > /dev/null 2>&1; then
5775 echo "(*) Installing compose-switch..."
5776 current_compose_path="$(command -v docker-compose)"
5777 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5778 compose_switch_version="latest"
5779 compose_switch_url="https://github.com/docker/compose-switch"
5780 # Try to get latest version, fallback to known stable version if GitHub API fails
5781 set +e
5782 find_version_from_git_tags compose_switch_version "$compose_switch_url"
5783 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5784 echo "(*) GitHub API rate limited or failed, using fallback method"
5785 fallback_compose-switch "$compose_switch_url"
5786 fi
5787 set -e
5788
5789 # Map architecture for compose-switch downloads
5790 case "${architecture}" in
5791 amd64|x86_64) target_switch_arch=amd64 ;;
5792 arm64|aarch64) target_switch_arch=arm64 ;;
5793 *) target_switch_arch=${architecture} ;;
5794 esac
5795 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5796 chmod +x /usr/local/bin/compose-switch
5797 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5798 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5799 mv "${current_compose_path}" "${target_compose_path}"
5800 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5801 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5802 else
5803 err "Skipping installation of compose-switch as docker compose is unavailable..."
5804 fi
5805 fi
5806
5807 # If init file already exists, exit
5808 if [ -f "/usr/local/share/docker-init.sh" ]; then
5809 echo "/usr/local/share/docker-init.sh already exists, so exiting."
5810 # Clean up
5811 rm -rf /var/lib/apt/lists/*
5812 exit 0
5813 fi
5814 echo "docker-init doesn't exist, adding..."
5815
5816 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5817 groupadd -r docker
5818 fi
5819
5820 usermod -aG docker ${USERNAME}
5821
5822 # fallback for docker/buildx
5823 fallback_buildx() {
5824 local url=$1
5825 local repo_url=$(get_github_api_repo_url "$url")
5826 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5827 get_previous_version "$url" "$repo_url" buildx_version
5828 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5829 echo -e "\nAttempting to install v${buildx_version}"
5830 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5831 }
5832
5833 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5834 buildx_version="latest"
5835 docker_buildx_url="https://github.com/docker/buildx"
5836 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5837 echo "(*) Installing buildx ${buildx_version}..."
5838
5839 # Map architecture for buildx downloads
5840 case "${architecture}" in
5841 amd64|x86_64) target_buildx_arch=amd64 ;;
5842 arm64|aarch64) target_buildx_arch=arm64 ;;
5843 *) target_buildx_arch=${architecture} ;;
5844 esac
5845
5846 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5847
5848 cd /tmp
5849 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5850
5851 docker_home="/usr/libexec/docker"
5852 cli_plugins_dir="${docker_home}/cli-plugins"
5853
5854 mkdir -p ${cli_plugins_dir}
5855 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5856 chmod +x ${cli_plugins_dir}/docker-buildx
5857
5858 chown -R "${USERNAME}:docker" "${docker_home}"
5859 chmod -R g+r+w "${docker_home}"
5860 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5861 fi
5862
5863 DOCKER_DEFAULT_IP6_TABLES=""
5864 if [ "$DISABLE_IP6_TABLES" == true ]; then
5865 requested_version=""
5866 # checking whether the version requested either is in semver format or just a number denoting the major version
5867 # and, extracting the major version number out of the two scenarios
5868 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5869 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5870 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5871 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5872 requested_version=$DOCKER_VERSION
5873 fi
5874 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5875 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5876 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5877 fi
5878 fi
5879
5880 if [ ! -d /usr/local/share ]; then
5881 mkdir -p /usr/local/share
5882 fi
5883
5884 tee /usr/local/share/docker-init.sh > /dev/null \
5885 << EOF
5886 #!/bin/sh
5887 #-------------------------------------------------------------------------------------------------------------
5888 # Copyright (c) Microsoft Corporation. All rights reserved.
5889 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5890 #-------------------------------------------------------------------------------------------------------------
5891
5892 set -e
5893
5894 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5895 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5896 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5897 EOF
5898
5899 tee -a /usr/local/share/docker-init.sh > /dev/null \
5900 << 'EOF'
5901 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5902 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5903 find /run /var/run -iname 'docker*.pid' -delete || :
5904 find /run /var/run -iname 'container*.pid' -delete || :
5905
5906 # -- Start: dind wrapper script --
5907 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5908
5909 export container=docker
5910
5911 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5912 mount -t securityfs none /sys/kernel/security || {
5913 echo >&2 'Could not mount /sys/kernel/security.'
5914 echo >&2 'AppArmor detection and --privileged mode might break.'
5915 }
5916 fi
5917
5918 # Mount /tmp (conditionally)
5919 if ! mountpoint -q /tmp; then
5920 mount -t tmpfs none /tmp
5921 fi
5922
5923 set_cgroup_nesting()
5924 {
5925 # cgroup v2: enable nesting
5926 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5927 # move the processes from the root group to the /init group,
5928 # otherwise writing subtree_control fails with EBUSY.
5929 # An error during moving non-existent process (i.e., "cat") is ignored.
5930 mkdir -p /sys/fs/cgroup/init
5931 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5932 # enable controllers
5933 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5934 > /sys/fs/cgroup/cgroup.subtree_control
5935 fi
5936 }
5937
5938 # Set cgroup nesting, retrying if necessary
5939 retry_cgroup_nesting=0
5940
5941 until [ "${retry_cgroup_nesting}" -eq "5" ];
5942 do
5943 set +e
5944 set_cgroup_nesting
5945
5946 if [ $? -ne 0 ]; then
5947 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5948 else
5949 break
5950 fi
5951
5952 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5953 set -e
5954 done
5955
5956 # -- End: dind wrapper script --
5957
5958 # Handle DNS
5959 set +e
5960 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5961 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5962 then
5963 echo "Setting dockerd Azure DNS."
5964 CUSTOMDNS="--dns 168.63.129.16"
5965 else
5966 echo "Not setting dockerd DNS manually."
5967 CUSTOMDNS=""
5968 fi
5969 set -e
5970
5971 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5972 then
5973 DEFAULT_ADDRESS_POOL=""
5974 else
5975 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5976 fi
5977
5978 # Start docker/moby engine
5979 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5980 INNEREOF
5981 )"
5982
5983 sudo_if() {
5984 COMMAND="$*"
5985
5986 if [ "$(id -u)" -ne 0 ]; then
5987 sudo $COMMAND
5988 else
5989 $COMMAND
5990 fi
5991 }
5992
5993 retry_docker_start_count=0
5994 docker_ok="false"
5995
5996 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
5997 do
5998 # Start using sudo if not invoked as root
5999 if [ "$(id -u)" -ne 0 ]; then
6000 sudo /bin/sh -c "${dockerd_start}"
6001 else
6002 eval "${dockerd_start}"
6003 fi
6004
6005 retry_count=0
6006 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6007 do
6008 sleep 1s
6009 set +e
6010 docker info > /dev/null 2>&1 && docker_ok="true"
6011 set -e
6012
6013 retry_count=`expr $retry_count + 1`
6014 done
6015
6016 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6017 echo "(*) Failed to start docker, retrying..."
6018 set +e
6019 sudo_if pkill dockerd
6020 sudo_if pkill containerd
6021 set -e
6022 fi
6023
6024 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6025 done
6026
6027 # Execute whatever commands were passed in (if any). This allows us
6028 # to set this script to ENTRYPOINT while still executing the default CMD.
6029 exec "$@"
6030 EOF
6031
6032 chmod +x /usr/local/share/docker-init.sh
6033 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6034
6035 # Clean up
6036 rm -rf /var/lib/apt/lists/*
6037
6038 echo 'docker-in-docker-debian script has completed!'"#),
6039 ]).await;
6040
6041 return Ok(http::Response::builder()
6042 .status(200)
6043 .body(AsyncBody::from(response))
6044 .unwrap());
6045 }
6046 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6047 let response = r#"
6048 {
6049 "schemaVersion": 2,
6050 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6051 "config": {
6052 "mediaType": "application/vnd.devcontainers",
6053 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6054 "size": 2
6055 },
6056 "layers": [
6057 {
6058 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6059 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6060 "size": 20992,
6061 "annotations": {
6062 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6063 }
6064 }
6065 ],
6066 "annotations": {
6067 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6068 "com.github.package.type": "devcontainer_feature"
6069 }
6070 }
6071 "#;
6072
6073 return Ok(http::Response::builder()
6074 .status(200)
6075 .body(http_client::AsyncBody::from(response))
6076 .unwrap());
6077 }
6078 if parts.uri.path()
6079 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6080 {
6081 let response = build_tarball(vec![
6082 ("./devcontainer-feature.json", r#"
6083 {
6084 "id": "go",
6085 "version": "1.3.3",
6086 "name": "Go",
6087 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6088 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6089 "options": {
6090 "version": {
6091 "type": "string",
6092 "proposals": [
6093 "latest",
6094 "none",
6095 "1.24",
6096 "1.23"
6097 ],
6098 "default": "latest",
6099 "description": "Select or enter a Go version to install"
6100 },
6101 "golangciLintVersion": {
6102 "type": "string",
6103 "default": "latest",
6104 "description": "Version of golangci-lint to install"
6105 }
6106 },
6107 "init": true,
6108 "customizations": {
6109 "vscode": {
6110 "extensions": [
6111 "golang.Go"
6112 ],
6113 "settings": {
6114 "github.copilot.chat.codeGeneration.instructions": [
6115 {
6116 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6117 }
6118 ]
6119 }
6120 }
6121 },
6122 "containerEnv": {
6123 "GOROOT": "/usr/local/go",
6124 "GOPATH": "/go",
6125 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6126 },
6127 "capAdd": [
6128 "SYS_PTRACE"
6129 ],
6130 "securityOpt": [
6131 "seccomp=unconfined"
6132 ],
6133 "installsAfter": [
6134 "ghcr.io/devcontainers/features/common-utils"
6135 ]
6136 }
6137 "#),
6138 ("./install.sh", r#"
6139 #!/usr/bin/env bash
6140 #-------------------------------------------------------------------------------------------------------------
6141 # Copyright (c) Microsoft Corporation. All rights reserved.
6142 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6143 #-------------------------------------------------------------------------------------------------------------
6144 #
6145 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6146 # Maintainer: The VS Code and Codespaces Teams
6147
6148 TARGET_GO_VERSION="${VERSION:-"latest"}"
6149 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6150
6151 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6152 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6153 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6154 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6155
6156 # https://www.google.com/linuxrepositories/
6157 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6158
6159 set -e
6160
6161 if [ "$(id -u)" -ne 0 ]; then
6162 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6163 exit 1
6164 fi
6165
6166 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6167 . /etc/os-release
6168 # Get an adjusted ID independent of distro variants
6169 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6170 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6171 ADJUSTED_ID="debian"
6172 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6173 ADJUSTED_ID="rhel"
6174 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6175 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6176 else
6177 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6178 fi
6179 else
6180 echo "Linux distro ${ID} not supported."
6181 exit 1
6182 fi
6183
6184 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6185 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6186 # Update the repo files to reference vault.centos.org.
6187 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6188 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6189 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6190 fi
6191
6192 # Setup INSTALL_CMD & PKG_MGR_CMD
6193 if type apt-get > /dev/null 2>&1; then
6194 PKG_MGR_CMD=apt-get
6195 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6196 elif type microdnf > /dev/null 2>&1; then
6197 PKG_MGR_CMD=microdnf
6198 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6199 elif type dnf > /dev/null 2>&1; then
6200 PKG_MGR_CMD=dnf
6201 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6202 else
6203 PKG_MGR_CMD=yum
6204 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6205 fi
6206
6207 # Clean up
6208 clean_up() {
6209 case ${ADJUSTED_ID} in
6210 debian)
6211 rm -rf /var/lib/apt/lists/*
6212 ;;
6213 rhel)
6214 rm -rf /var/cache/dnf/* /var/cache/yum/*
6215 rm -rf /tmp/yum.log
6216 rm -rf ${GPG_INSTALL_PATH}
6217 ;;
6218 esac
6219 }
6220 clean_up
6221
6222
6223 # Figure out correct version of a three part version number is not passed
6224 find_version_from_git_tags() {
6225 local variable_name=$1
6226 local requested_version=${!variable_name}
6227 if [ "${requested_version}" = "none" ]; then return; fi
6228 local repository=$2
6229 local prefix=${3:-"tags/v"}
6230 local separator=${4:-"."}
6231 local last_part_optional=${5:-"false"}
6232 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6233 local escaped_separator=${separator//./\\.}
6234 local last_part
6235 if [ "${last_part_optional}" = "true" ]; then
6236 last_part="(${escaped_separator}[0-9]+)?"
6237 else
6238 last_part="${escaped_separator}[0-9]+"
6239 fi
6240 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6241 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6242 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6243 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6244 else
6245 set +e
6246 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6247 set -e
6248 fi
6249 fi
6250 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6251 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6252 exit 1
6253 fi
6254 echo "${variable_name}=${!variable_name}"
6255 }
6256
6257 pkg_mgr_update() {
6258 case $ADJUSTED_ID in
6259 debian)
6260 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6261 echo "Running apt-get update..."
6262 ${PKG_MGR_CMD} update -y
6263 fi
6264 ;;
6265 rhel)
6266 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6267 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6268 echo "Running ${PKG_MGR_CMD} makecache ..."
6269 ${PKG_MGR_CMD} makecache
6270 fi
6271 else
6272 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6273 echo "Running ${PKG_MGR_CMD} check-update ..."
6274 set +e
6275 ${PKG_MGR_CMD} check-update
6276 rc=$?
6277 if [ $rc != 0 ] && [ $rc != 100 ]; then
6278 exit 1
6279 fi
6280 set -e
6281 fi
6282 fi
6283 ;;
6284 esac
6285 }
6286
6287 # Checks if packages are installed and installs them if not
6288 check_packages() {
6289 case ${ADJUSTED_ID} in
6290 debian)
6291 if ! dpkg -s "$@" > /dev/null 2>&1; then
6292 pkg_mgr_update
6293 ${INSTALL_CMD} "$@"
6294 fi
6295 ;;
6296 rhel)
6297 if ! rpm -q "$@" > /dev/null 2>&1; then
6298 pkg_mgr_update
6299 ${INSTALL_CMD} "$@"
6300 fi
6301 ;;
6302 esac
6303 }
6304
6305 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6306 rm -f /etc/profile.d/00-restore-env.sh
6307 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6308 chmod +x /etc/profile.d/00-restore-env.sh
6309
6310 # Some distributions do not install awk by default (e.g. Mariner)
6311 if ! type awk >/dev/null 2>&1; then
6312 check_packages awk
6313 fi
6314
6315 # Determine the appropriate non-root user
6316 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6317 USERNAME=""
6318 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6319 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6320 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6321 USERNAME=${CURRENT_USER}
6322 break
6323 fi
6324 done
6325 if [ "${USERNAME}" = "" ]; then
6326 USERNAME=root
6327 fi
6328 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6329 USERNAME=root
6330 fi
6331
6332 export DEBIAN_FRONTEND=noninteractive
6333
6334 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6335
6336 if [ $ADJUSTED_ID = "debian" ]; then
6337 check_packages g++ libc6-dev
6338 else
6339 check_packages gcc-c++ glibc-devel
6340 fi
6341 # Install curl, git, other dependencies if missing
6342 if ! type curl > /dev/null 2>&1; then
6343 check_packages curl
6344 fi
6345 if ! type git > /dev/null 2>&1; then
6346 check_packages git
6347 fi
6348 # Some systems, e.g. Mariner, still a few more packages
6349 if ! type as > /dev/null 2>&1; then
6350 check_packages binutils
6351 fi
6352 if ! [ -f /usr/include/linux/errno.h ]; then
6353 check_packages kernel-headers
6354 fi
6355 # Minimal RHEL install may need findutils installed
6356 if ! [ -f /usr/bin/find ]; then
6357 check_packages findutils
6358 fi
6359
6360 # Get closest match for version number specified
6361 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6362
6363 architecture="$(uname -m)"
6364 case $architecture in
6365 x86_64) architecture="amd64";;
6366 aarch64 | armv8*) architecture="arm64";;
6367 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6368 i?86) architecture="386";;
6369 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6370 esac
6371
6372 # Install Go
6373 umask 0002
6374 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6375 groupadd -r golang
6376 fi
6377 usermod -a -G golang "${USERNAME}"
6378 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6379
6380 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6381 # Use a temporary location for gpg keys to avoid polluting image
6382 export GNUPGHOME="/tmp/tmp-gnupg"
6383 mkdir -p ${GNUPGHOME}
6384 chmod 700 ${GNUPGHOME}
6385 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6386 gpg -q --import /tmp/tmp-gnupg/golang_key
6387 echo "Downloading Go ${TARGET_GO_VERSION}..."
6388 set +e
6389 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6390 exit_code=$?
6391 set -e
6392 if [ "$exit_code" != "0" ]; then
6393 echo "(!) Download failed."
6394 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6395 set +e
6396 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6397 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6398 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6399 # Handle Go's odd version pattern where "0" releases omit the last part
6400 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6401 ((minor=minor-1))
6402 TARGET_GO_VERSION="${major}.${minor}"
6403 # Look for latest version from previous minor release
6404 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6405 else
6406 ((breakfix=breakfix-1))
6407 if [ "${breakfix}" = "0" ]; then
6408 TARGET_GO_VERSION="${major}.${minor}"
6409 else
6410 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6411 fi
6412 fi
6413 set -e
6414 echo "Trying ${TARGET_GO_VERSION}..."
6415 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6416 fi
6417 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6418 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6419 echo "Extracting Go ${TARGET_GO_VERSION}..."
6420 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6421 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6422 else
6423 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6424 fi
6425
6426 # Install Go tools that are isImportant && !replacedByGopls based on
6427 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6428 GO_TOOLS="\
6429 golang.org/x/tools/gopls@latest \
6430 honnef.co/go/tools/cmd/staticcheck@latest \
6431 golang.org/x/lint/golint@latest \
6432 github.com/mgechev/revive@latest \
6433 github.com/go-delve/delve/cmd/dlv@latest \
6434 github.com/fatih/gomodifytags@latest \
6435 github.com/haya14busa/goplay/cmd/goplay@latest \
6436 github.com/cweill/gotests/gotests@latest \
6437 github.com/josharian/impl@latest"
6438
6439 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6440 echo "Installing common Go tools..."
6441 export PATH=${TARGET_GOROOT}/bin:${PATH}
6442 export GOPATH=/tmp/gotools
6443 export GOCACHE="${GOPATH}/cache"
6444
6445 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6446 cd "${GOPATH}"
6447
6448 # Use go get for versions of go under 1.16
6449 go_install_command=install
6450 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6451 export GO111MODULE=on
6452 go_install_command=get
6453 echo "Go version < 1.16, using go get."
6454 fi
6455
6456 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6457
6458 # Move Go tools into path
6459 if [ -d "${GOPATH}/bin" ]; then
6460 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6461 fi
6462
6463 # Install golangci-lint from precompiled binaries
6464 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6465 echo "Installing golangci-lint latest..."
6466 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6467 sh -s -- -b "${TARGET_GOPATH}/bin"
6468 else
6469 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6470 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6471 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6472 fi
6473
6474 # Remove Go tools temp directory
6475 rm -rf "${GOPATH}"
6476 fi
6477
6478
6479 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6480 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6481 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6482 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6483
6484 # Clean up
6485 clean_up
6486
6487 echo "Done!"
6488 "#),
6489 ])
6490 .await;
6491 return Ok(http::Response::builder()
6492 .status(200)
6493 .body(AsyncBody::from(response))
6494 .unwrap());
6495 }
6496 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6497 let response = r#"
6498 {
6499 "schemaVersion": 2,
6500 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6501 "config": {
6502 "mediaType": "application/vnd.devcontainers",
6503 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6504 "size": 2
6505 },
6506 "layers": [
6507 {
6508 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6509 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6510 "size": 19968,
6511 "annotations": {
6512 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6513 }
6514 }
6515 ],
6516 "annotations": {
6517 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6518 "com.github.package.type": "devcontainer_feature"
6519 }
6520 }"#;
6521 return Ok(http::Response::builder()
6522 .status(200)
6523 .body(AsyncBody::from(response))
6524 .unwrap());
6525 }
6526 if parts.uri.path()
6527 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6528 {
6529 let response = build_tarball(vec![
6530 (
6531 "./devcontainer-feature.json",
6532 r#"
6533{
6534 "id": "aws-cli",
6535 "version": "1.1.3",
6536 "name": "AWS CLI",
6537 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6538 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6539 "options": {
6540 "version": {
6541 "type": "string",
6542 "proposals": [
6543 "latest"
6544 ],
6545 "default": "latest",
6546 "description": "Select or enter an AWS CLI version."
6547 },
6548 "verbose": {
6549 "type": "boolean",
6550 "default": true,
6551 "description": "Suppress verbose output."
6552 }
6553 },
6554 "customizations": {
6555 "vscode": {
6556 "extensions": [
6557 "AmazonWebServices.aws-toolkit-vscode"
6558 ],
6559 "settings": {
6560 "github.copilot.chat.codeGeneration.instructions": [
6561 {
6562 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6563 }
6564 ]
6565 }
6566 }
6567 },
6568 "installsAfter": [
6569 "ghcr.io/devcontainers/features/common-utils"
6570 ]
6571}
6572 "#,
6573 ),
6574 (
6575 "./install.sh",
6576 r#"#!/usr/bin/env bash
6577 #-------------------------------------------------------------------------------------------------------------
6578 # Copyright (c) Microsoft Corporation. All rights reserved.
6579 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6580 #-------------------------------------------------------------------------------------------------------------
6581 #
6582 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6583 # Maintainer: The VS Code and Codespaces Teams
6584
6585 set -e
6586
6587 # Clean up
6588 rm -rf /var/lib/apt/lists/*
6589
6590 VERSION=${VERSION:-"latest"}
6591 VERBOSE=${VERBOSE:-"true"}
6592
6593 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6594 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6595
6596 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6597 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6598 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6599 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6600 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6601 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6602 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6603 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6604 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6605 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6606 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6607 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6608 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6609 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6610 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6611 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6612 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6613 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6614 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6615 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6616 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6617 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6618 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6619 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6620 YLZATHZKTJyiqA==
6621 =vYOk
6622 -----END PGP PUBLIC KEY BLOCK-----"
6623
6624 if [ "$(id -u)" -ne 0 ]; then
6625 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6626 exit 1
6627 fi
6628
6629 apt_get_update()
6630 {
6631 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6632 echo "Running apt-get update..."
6633 apt-get update -y
6634 fi
6635 }
6636
6637 # Checks if packages are installed and installs them if not
6638 check_packages() {
6639 if ! dpkg -s "$@" > /dev/null 2>&1; then
6640 apt_get_update
6641 apt-get -y install --no-install-recommends "$@"
6642 fi
6643 }
6644
6645 export DEBIAN_FRONTEND=noninteractive
6646
6647 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6648
6649 verify_aws_cli_gpg_signature() {
6650 local filePath=$1
6651 local sigFilePath=$2
6652 local awsGpgKeyring=aws-cli-public-key.gpg
6653
6654 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6655 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6656 local status=$?
6657
6658 rm "./${awsGpgKeyring}"
6659
6660 return ${status}
6661 }
6662
6663 install() {
6664 local scriptZipFile=awscli.zip
6665 local scriptSigFile=awscli.sig
6666
6667 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6668 if [ "${VERSION}" != "latest" ]; then
6669 local versionStr=-${VERSION}
6670 fi
6671 architecture=$(dpkg --print-architecture)
6672 case "${architecture}" in
6673 amd64) architectureStr=x86_64 ;;
6674 arm64) architectureStr=aarch64 ;;
6675 *)
6676 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6677 exit 1
6678 esac
6679 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6680 curl "${scriptUrl}" -o "${scriptZipFile}"
6681 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6682
6683 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6684 if (( $? > 0 )); then
6685 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6686 exit 1
6687 fi
6688
6689 if [ "${VERBOSE}" = "false" ]; then
6690 unzip -q "${scriptZipFile}"
6691 else
6692 unzip "${scriptZipFile}"
6693 fi
6694
6695 ./aws/install
6696
6697 # kubectl bash completion
6698 mkdir -p /etc/bash_completion.d
6699 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6700
6701 # kubectl zsh completion
6702 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6703 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6704 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6705 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6706 fi
6707
6708 rm -rf ./aws
6709 }
6710
6711 echo "(*) Installing AWS CLI..."
6712
6713 install
6714
6715 # Clean up
6716 rm -rf /var/lib/apt/lists/*
6717
6718 echo "Done!""#,
6719 ),
6720 ("./scripts/", r#""#),
6721 (
6722 "./scripts/fetch-latest-completer-scripts.sh",
6723 r#"
6724 #!/bin/bash
6725 #-------------------------------------------------------------------------------------------------------------
6726 # Copyright (c) Microsoft Corporation. All rights reserved.
6727 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6728 #-------------------------------------------------------------------------------------------------------------
6729 #
6730 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6731 # Maintainer: The Dev Container spec maintainers
6732 #
6733 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6734 #
6735 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6736 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6737 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6738
6739 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6740 chmod +x "$BASH_COMPLETER_SCRIPT"
6741
6742 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6743 chmod +x "$ZSH_COMPLETER_SCRIPT"
6744 "#,
6745 ),
6746 ("./scripts/vendor/", r#""#),
6747 (
6748 "./scripts/vendor/aws_bash_completer",
6749 r#"
6750 # Typically that would be added under one of the following paths:
6751 # - /etc/bash_completion.d
6752 # - /usr/local/etc/bash_completion.d
6753 # - /usr/share/bash-completion/completions
6754
6755 complete -C aws_completer aws
6756 "#,
6757 ),
6758 (
6759 "./scripts/vendor/aws_zsh_completer.sh",
6760 r#"
6761 # Source this file to activate auto completion for zsh using the bash
6762 # compatibility helper. Make sure to run `compinit` before, which should be
6763 # given usually.
6764 #
6765 # % source /path/to/zsh_complete.sh
6766 #
6767 # Typically that would be called somewhere in your .zshrc.
6768 #
6769 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6770 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6771 #
6772 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6773 #
6774 # zsh releases prior to that version do not export the required env variables!
6775
6776 autoload -Uz bashcompinit
6777 bashcompinit -i
6778
6779 _bash_complete() {
6780 local ret=1
6781 local -a suf matches
6782 local -x COMP_POINT COMP_CWORD
6783 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6784 local -x COMP_LINE="$words"
6785 local -A savejobstates savejobtexts
6786
6787 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6788 (( COMP_CWORD = CURRENT - 1))
6789 COMP_WORDS=( $words )
6790 BASH_VERSINFO=( 2 05b 0 1 release )
6791
6792 savejobstates=( ${(kv)jobstates} )
6793 savejobtexts=( ${(kv)jobtexts} )
6794
6795 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6796
6797 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6798
6799 if [[ -n $matches ]]; then
6800 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6801 compset -P '*/' && matches=( ${matches##*/} )
6802 compset -S '/*' && matches=( ${matches%%/*} )
6803 compadd -Q -f "${suf[@]}" -a matches && ret=0
6804 else
6805 compadd -Q "${suf[@]}" -a matches && ret=0
6806 fi
6807 fi
6808
6809 if (( ret )); then
6810 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6811 _default "${suf[@]}" && ret=0
6812 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6813 _directories "${suf[@]}" && ret=0
6814 fi
6815 fi
6816
6817 return ret
6818 }
6819
6820 complete -C aws_completer aws
6821 "#,
6822 ),
6823 ]).await;
6824
6825 return Ok(http::Response::builder()
6826 .status(200)
6827 .body(AsyncBody::from(response))
6828 .unwrap());
6829 }
6830
6831 Ok(http::Response::builder()
6832 .status(404)
6833 .body(http_client::AsyncBody::default())
6834 .unwrap())
6835 })
6836 }
6837}