1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use regex::Regex;
10
11use fs::Fs;
12use http_client::HttpClient;
13use util::{ResultExt, command::Command, normalize_path};
14
15use crate::{
16 DevContainerConfig, DevContainerContext,
17 command_json::{CommandRunner, DefaultCommandRunner},
18 devcontainer_api::{DevContainerError, DevContainerUp},
19 devcontainer_json::{
20 ContainerBuild, DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort,
21 MountDefinition, deserialize_devcontainer_json, deserialize_devcontainer_json_from_value,
22 deserialize_devcontainer_json_to_value,
23 },
24 docker::{
25 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
26 DockerComposeServicePort, DockerComposeVolume, DockerInspect, DockerPs,
27 },
28 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
29 get_oci_token,
30 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
31 safe_id_lower,
32};
33
34enum ConfigStatus {
35 Deserialized(DevContainer),
36 VariableParsed(DevContainer),
37}
38
39#[derive(Debug, Clone, Eq, PartialEq, Default)]
40pub(crate) struct DockerComposeResources {
41 files: Vec<PathBuf>,
42 config: DockerComposeConfig,
43}
44
45struct DevContainerManifest {
46 http_client: Arc<dyn HttpClient>,
47 fs: Arc<dyn Fs>,
48 docker_client: Arc<dyn DockerClient>,
49 command_runner: Arc<dyn CommandRunner>,
50 raw_config: String,
51 config: ConfigStatus,
52 local_environment: HashMap<String, String>,
53 local_project_directory: PathBuf,
54 config_directory: PathBuf,
55 file_name: String,
56 root_image: Option<DockerInspect>,
57 features_build_info: Option<FeaturesBuildInfo>,
58 features: Vec<FeatureManifest>,
59}
60const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces";
61impl DevContainerManifest {
62 async fn new(
63 context: &DevContainerContext,
64 environment: HashMap<String, String>,
65 docker_client: Arc<dyn DockerClient>,
66 command_runner: Arc<dyn CommandRunner>,
67 local_config: DevContainerConfig,
68 local_project_path: &Path,
69 ) -> Result<Self, DevContainerError> {
70 let config_path = local_project_path.join(local_config.config_path.clone());
71 log::debug!("parsing devcontainer json found in {:?}", &config_path);
72 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
73 log::error!("Unable to read devcontainer contents: {e}");
74 DevContainerError::DevContainerParseFailed
75 })?;
76
77 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
78
79 let devcontainer_directory = config_path.parent().ok_or_else(|| {
80 log::error!("Dev container file should be in a directory");
81 DevContainerError::NotInValidProject
82 })?;
83 let file_name = config_path
84 .file_name()
85 .and_then(|f| f.to_str())
86 .ok_or_else(|| {
87 log::error!("Dev container file has no file name, or is invalid unicode");
88 DevContainerError::DevContainerParseFailed
89 })?;
90
91 Ok(Self {
92 fs: context.fs.clone(),
93 http_client: context.http_client.clone(),
94 docker_client,
95 command_runner,
96 raw_config: devcontainer_contents,
97 config: ConfigStatus::Deserialized(devcontainer),
98 local_project_directory: local_project_path.to_path_buf(),
99 local_environment: environment,
100 config_directory: devcontainer_directory.to_path_buf(),
101 file_name: file_name.to_string(),
102 root_image: None,
103 features_build_info: None,
104 features: Vec::new(),
105 })
106 }
107
108 fn devcontainer_id(&self) -> String {
109 let mut labels = self.identifying_labels();
110 labels.sort_by_key(|(key, _)| *key);
111
112 let mut hasher = DefaultHasher::new();
113 for (key, value) in &labels {
114 key.hash(&mut hasher);
115 value.hash(&mut hasher);
116 }
117
118 format!("{:016x}", hasher.finish())
119 }
120
121 fn identifying_labels(&self) -> Vec<(&str, String)> {
122 let labels = vec![
123 (
124 "devcontainer.local_folder",
125 (self.local_project_directory.display()).to_string(),
126 ),
127 (
128 "devcontainer.config_file",
129 (self.config_file().display()).to_string(),
130 ),
131 ];
132 labels
133 }
134
135 fn parse_nonremote_vars_for_content(
136 &self,
137 content: &str,
138 ) -> Result<serde_json_lenient::Value, DevContainerError> {
139 let mut value = deserialize_devcontainer_json_to_value(content)?;
140 let mut to_visit = vec![&mut value];
141
142 while let Some(value) = to_visit.pop() {
143 use serde_json_lenient::Value;
144
145 match value {
146 Value::String(string) => {
147 *string = string
148 .replace("${devcontainerId}", &self.devcontainer_id())
149 .replace(
150 "${containerWorkspaceFolderBasename}",
151 &self.remote_workspace_base_name().unwrap_or_default(),
152 )
153 .replace(
154 "${localWorkspaceFolderBasename}",
155 &self.local_workspace_base_name()?,
156 )
157 .replace(
158 "${containerWorkspaceFolder}",
159 &self
160 .remote_workspace_folder()
161 .map(|path| path.display().to_string())
162 .unwrap_or_default()
163 .replace('\\', "/"),
164 )
165 .replace(
166 "${localWorkspaceFolder}",
167 &self.local_workspace_folder().replace('\\', "/"),
168 );
169 *string = Self::replace_environment_variables(
170 string,
171 "localEnv",
172 &self.local_environment,
173 );
174 }
175
176 Value::Array(array) => to_visit.extend(array.iter_mut()),
177 Value::Object(object) => to_visit.extend(object.values_mut()),
178
179 Value::Null | Value::Bool(_) | Value::Number(_) => {}
180 }
181 }
182
183 Ok(value)
184 }
185
186 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
187 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
188 let parsed_config = deserialize_devcontainer_json_from_value(replaced_content)?;
189
190 self.config = ConfigStatus::VariableParsed(parsed_config);
191
192 Ok(())
193 }
194
195 fn runtime_remote_env(
196 &self,
197 container_env: &HashMap<String, String>,
198 ) -> Result<HashMap<String, String>, DevContainerError> {
199 let mut merged_remote_env = container_env.clone();
200 // HOME is user-specific, and we will often not run as the image user
201 merged_remote_env.remove("HOME");
202 if let Some(mut remote_env) = self.dev_container().remote_env.clone() {
203 remote_env.values_mut().for_each(|value| {
204 *value = Self::replace_environment_variables(value, "containerEnv", &container_env)
205 });
206 for (k, v) in remote_env {
207 merged_remote_env.insert(k, v);
208 }
209 }
210 Ok(merged_remote_env)
211 }
212
213 fn replace_environment_variables(
214 mut orig: &str,
215 environment_source: &str,
216 environment: &HashMap<String, String>,
217 ) -> String {
218 let mut replaced = String::with_capacity(orig.len());
219 let prefix = format!("${{{environment_source}:");
220 while let Some(start) = orig.find(&prefix) {
221 let var_name_start = start + prefix.len();
222 let Some(end) = orig[var_name_start..].find('}') else {
223 // No closing `}` => malformed variable reference => paste as is.
224 break;
225 };
226 let end = var_name_start + end;
227
228 let (var_name_end, default_start) =
229 if let Some(var_name_end) = orig[var_name_start..end].find(':') {
230 let var_name_end = var_name_start + var_name_end;
231 (var_name_end, var_name_end + 1)
232 } else {
233 (end, end)
234 };
235
236 let var_name = &orig[var_name_start..var_name_end];
237 if var_name.is_empty() {
238 // Empty variable name => paste as is.
239 replaced.push_str(&orig[..end + 1]);
240 orig = &orig[end + 1..];
241 continue;
242 }
243 let default = &orig[default_start..end];
244
245 replaced.push_str(&orig[..start]);
246 replaced.push_str(
247 environment
248 .get(var_name)
249 .map(|value| value.as_str())
250 .unwrap_or(default),
251 );
252 orig = &orig[end + 1..];
253 }
254 replaced.push_str(orig);
255 replaced
256 }
257
258 fn config_file(&self) -> PathBuf {
259 self.config_directory.join(&self.file_name)
260 }
261
262 fn dev_container(&self) -> &DevContainer {
263 match &self.config {
264 ConfigStatus::Deserialized(dev_container) => dev_container,
265 ConfigStatus::VariableParsed(dev_container) => dev_container,
266 }
267 }
268
269 async fn dockerfile_location(&self) -> Option<PathBuf> {
270 let dev_container = self.dev_container();
271 match dev_container.build_type() {
272 DevContainerBuildType::Image(_) => None,
273 DevContainerBuildType::Dockerfile(build) => {
274 Some(self.config_directory.join(&build.dockerfile))
275 }
276 DevContainerBuildType::DockerCompose => {
277 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
278 return None;
279 };
280 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
281 else {
282 return None;
283 };
284 main_service.build.and_then(|b| {
285 let compose_file = docker_compose_manifest.files.first()?;
286 resolve_compose_dockerfile(
287 compose_file,
288 b.context.as_deref(),
289 b.dockerfile.as_deref()?,
290 )
291 })
292 }
293 DevContainerBuildType::None => None,
294 }
295 }
296
297 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
298 let mut hasher = DefaultHasher::new();
299 let prefix = match &self.dev_container().name {
300 Some(name) => &safe_id_lower(name),
301 None => "zed-dc",
302 };
303 let prefix = prefix.get(..6).unwrap_or(prefix);
304
305 dockerfile_build_path.hash(&mut hasher);
306
307 let hash = hasher.finish();
308 format!("{}-{:x}-features", prefix, hash)
309 }
310
311 /// Gets the base image from the devcontainer with the following precedence:
312 /// - The devcontainer image if an image is specified
313 /// - The image sourced in the Dockerfile if a Dockerfile is specified
314 /// - The image sourced in the docker-compose main service, if one is specified
315 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
316 /// If no such image is available, return an error
317 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
318 match self.dev_container().build_type() {
319 DevContainerBuildType::Image(image) => {
320 return Ok(image);
321 }
322 DevContainerBuildType::Dockerfile(build) => {
323 let dockerfile_contents = self.expanded_dockerfile_content().await?;
324 return image_from_dockerfile(dockerfile_contents, &build.target).ok_or_else(
325 || {
326 log::error!("Unable to find base image in Dockerfile");
327 DevContainerError::DevContainerParseFailed
328 },
329 );
330 }
331 DevContainerBuildType::DockerCompose => {
332 let docker_compose_manifest = self.docker_compose_manifest().await?;
333 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
334
335 if let Some(_) = main_service
336 .build
337 .as_ref()
338 .and_then(|b| b.dockerfile.as_ref())
339 {
340 let dockerfile_contents = self.expanded_dockerfile_content().await?;
341 return image_from_dockerfile(
342 dockerfile_contents,
343 &main_service.build.as_ref().and_then(|b| b.target.clone()),
344 )
345 .ok_or_else(|| {
346 log::error!("Unable to find base image in Dockerfile");
347 DevContainerError::DevContainerParseFailed
348 });
349 }
350 if let Some(image) = &main_service.image {
351 return Ok(image.to_string());
352 }
353
354 log::error!("No valid base image found in docker-compose configuration");
355 return Err(DevContainerError::DevContainerParseFailed);
356 }
357 DevContainerBuildType::None => {
358 log::error!("Not a valid devcontainer config for build");
359 return Err(DevContainerError::NotInValidProject);
360 }
361 }
362 }
363
364 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
365 let dev_container = match &self.config {
366 ConfigStatus::Deserialized(_) => {
367 log::error!(
368 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
369 );
370 return Err(DevContainerError::DevContainerParseFailed);
371 }
372 ConfigStatus::VariableParsed(dev_container) => dev_container,
373 };
374 let root_image_tag = self.get_base_image_from_config().await?;
375 let root_image = self.docker_client.inspect(&root_image_tag).await?;
376
377 let temp_base = std::env::temp_dir().join("devcontainer-zed");
378 let timestamp = std::time::SystemTime::now()
379 .duration_since(std::time::UNIX_EPOCH)
380 .map(|d| d.as_millis())
381 .unwrap_or(0);
382
383 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
384 let empty_context_dir = temp_base.join("empty-folder");
385
386 self.fs
387 .create_dir(&features_content_dir)
388 .await
389 .map_err(|e| {
390 log::error!("Failed to create features content dir: {e}");
391 DevContainerError::FilesystemError
392 })?;
393
394 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
395 log::error!("Failed to create empty context dir: {e}");
396 DevContainerError::FilesystemError
397 })?;
398
399 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
400 let image_tag =
401 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
402
403 let build_info = FeaturesBuildInfo {
404 dockerfile_path,
405 features_content_dir,
406 empty_context_dir,
407 build_image: dev_container.image.clone(),
408 image_tag,
409 };
410
411 let features = match &dev_container.features {
412 Some(features) => features,
413 None => &HashMap::new(),
414 };
415
416 let container_user = get_container_user_from_config(&root_image, self)?;
417 let remote_user = get_remote_user_from_config(&root_image, self)?;
418
419 let builtin_env_content = format!(
420 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
421 container_user, remote_user
422 );
423
424 let builtin_env_path = build_info
425 .features_content_dir
426 .join("devcontainer-features.builtin.env");
427
428 self.fs
429 .write(&builtin_env_path, &builtin_env_content.as_bytes())
430 .await
431 .map_err(|e| {
432 log::error!("Failed to write builtin env file: {e}");
433 DevContainerError::FilesystemError
434 })?;
435
436 let ordered_features =
437 resolve_feature_order(features, &dev_container.override_feature_install_order);
438
439 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
440 if matches!(options, FeatureOptions::Bool(false)) {
441 log::debug!(
442 "Feature '{}' is disabled (set to false), skipping",
443 feature_ref
444 );
445 continue;
446 }
447
448 let feature_id = extract_feature_id(feature_ref);
449 let consecutive_id = format!("{}_{}", feature_id, index);
450 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
451
452 self.fs.create_dir(&feature_dir).await.map_err(|e| {
453 log::error!(
454 "Failed to create feature directory for {}: {e}",
455 feature_ref
456 );
457 DevContainerError::FilesystemError
458 })?;
459
460 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
461 log::error!(
462 "Feature '{}' is not a supported OCI feature reference",
463 feature_ref
464 );
465 DevContainerError::DevContainerParseFailed
466 })?;
467 let TokenResponse { token } =
468 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
469 .await
470 .map_err(|e| {
471 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
472 DevContainerError::ResourceFetchFailed
473 })?;
474 let manifest = get_oci_manifest(
475 &oci_ref.registry,
476 &oci_ref.path,
477 &token,
478 &self.http_client,
479 &oci_ref.version,
480 None,
481 )
482 .await
483 .map_err(|e| {
484 log::error!(
485 "Failed to fetch OCI manifest for feature '{}': {e}",
486 feature_ref
487 );
488 DevContainerError::ResourceFetchFailed
489 })?;
490 let digest = &manifest
491 .layers
492 .first()
493 .ok_or_else(|| {
494 log::error!(
495 "OCI manifest for feature '{}' contains no layers",
496 feature_ref
497 );
498 DevContainerError::ResourceFetchFailed
499 })?
500 .digest;
501 download_oci_tarball(
502 &token,
503 &oci_ref.registry,
504 &oci_ref.path,
505 digest,
506 "application/vnd.devcontainers.layer.v1+tar",
507 &feature_dir,
508 &self.http_client,
509 &self.fs,
510 None,
511 )
512 .await?;
513
514 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
515 if !self.fs.is_file(feature_json_path).await {
516 let message = format!(
517 "No devcontainer-feature.json found in {:?}, no defaults to apply",
518 feature_json_path
519 );
520 log::error!("{}", &message);
521 return Err(DevContainerError::ResourceFetchFailed);
522 }
523
524 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
525 log::error!("error reading devcontainer-feature.json: {:?}", e);
526 DevContainerError::FilesystemError
527 })?;
528
529 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
530
531 let feature_json: DevContainerFeatureJson =
532 serde_json_lenient::from_value(contents_parsed).map_err(|e| {
533 log::error!("Failed to parse devcontainer-feature.json: {e}");
534 DevContainerError::ResourceFetchFailed
535 })?;
536
537 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
538
539 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
540
541 let env_content = feature_manifest
542 .write_feature_env(&self.fs, options)
543 .await?;
544
545 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
546
547 self.fs
548 .write(
549 &feature_manifest
550 .file_path()
551 .join("devcontainer-features-install.sh"),
552 &wrapper_content.as_bytes(),
553 )
554 .await
555 .map_err(|e| {
556 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
557 DevContainerError::FilesystemError
558 })?;
559
560 self.features.push(feature_manifest);
561 }
562
563 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
564
565 let is_compose = match dev_container.build_type() {
566 DevContainerBuildType::DockerCompose => true,
567 _ => false,
568 };
569 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
570
571 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
572 self.fs.load(location).await.log_err()
573 } else {
574 None
575 };
576
577 let build_target = if is_compose {
578 find_primary_service(&self.docker_compose_manifest().await?, self)?
579 .1
580 .build
581 .and_then(|b| b.target)
582 } else {
583 dev_container.build.as_ref().and_then(|b| b.target.clone())
584 };
585
586 let dockerfile_content = dockerfile_base_content
587 .map(|content| {
588 dockerfile_inject_alias(
589 &content,
590 "dev_container_auto_added_stage_label",
591 build_target,
592 )
593 })
594 .unwrap_or_default();
595
596 let dockerfile_content = self.generate_dockerfile_extended(
597 &container_user,
598 &remote_user,
599 dockerfile_content,
600 use_buildkit,
601 );
602
603 self.fs
604 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
605 .await
606 .map_err(|e| {
607 log::error!("Failed to write Dockerfile.extended: {e}");
608 DevContainerError::FilesystemError
609 })?;
610
611 log::debug!(
612 "Features build resources written to {:?}",
613 build_info.features_content_dir
614 );
615
616 self.root_image = Some(root_image);
617 self.features_build_info = Some(build_info);
618
619 Ok(())
620 }
621
622 fn generate_dockerfile_extended(
623 &self,
624 container_user: &str,
625 remote_user: &str,
626 dockerfile_content: String,
627 use_buildkit: bool,
628 ) -> String {
629 #[cfg(not(target_os = "windows"))]
630 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
631 #[cfg(target_os = "windows")]
632 let update_remote_user_uid = false;
633 let feature_layers: String = self
634 .features
635 .iter()
636 .map(|manifest| {
637 manifest.generate_dockerfile_feature_layer(
638 use_buildkit,
639 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
640 )
641 })
642 .collect();
643
644 let container_home_cmd = get_ent_passwd_shell_command(container_user);
645 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
646
647 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
648
649 let feature_content_source_stage = if use_buildkit {
650 "".to_string()
651 } else {
652 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
653 .to_string()
654 };
655
656 let builtin_env_source_path = if use_buildkit {
657 "./devcontainer-features.builtin.env"
658 } else {
659 "/tmp/build-features/devcontainer-features.builtin.env"
660 };
661
662 let mut extended_dockerfile = format!(
663 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
664
665{dockerfile_content}
666{feature_content_source_stage}
667FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
668USER root
669COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
670RUN chmod -R 0755 /tmp/build-features/
671
672FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
673
674USER root
675
676RUN mkdir -p {dest}
677COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
678
679RUN \
680echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
681echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
682
683{feature_layers}
684
685ARG _DEV_CONTAINERS_IMAGE_USER=root
686USER $_DEV_CONTAINERS_IMAGE_USER
687"#
688 );
689
690 // If we're not adding a uid update layer, then we should add env vars to this layer instead
691 if !update_remote_user_uid {
692 extended_dockerfile = format!(
693 r#"{extended_dockerfile}
694# Ensure that /etc/profile does not clobber the existing path
695RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
696"#
697 );
698
699 for feature in &self.features {
700 let container_env_layer = feature.generate_dockerfile_env();
701 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
702 }
703
704 if let Some(env) = &self.dev_container().container_env {
705 for (key, value) in env {
706 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
707 }
708 }
709 }
710
711 extended_dockerfile
712 }
713
714 fn build_merged_resources(
715 &self,
716 base_image: DockerInspect,
717 ) -> Result<DockerBuildResources, DevContainerError> {
718 let dev_container = match &self.config {
719 ConfigStatus::Deserialized(_) => {
720 log::error!(
721 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
722 );
723 return Err(DevContainerError::DevContainerParseFailed);
724 }
725 ConfigStatus::VariableParsed(dev_container) => dev_container,
726 };
727 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
728
729 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
730
731 mounts.append(&mut feature_mounts);
732
733 let privileged = dev_container.privileged.unwrap_or(false)
734 || self.features.iter().any(|f| f.privileged());
735
736 let mut entrypoint_script_lines = vec![
737 "echo Container started".to_string(),
738 "trap \"exit 0\" 15".to_string(),
739 ];
740
741 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
742 entrypoint_script_lines.push(entrypoint.clone());
743 }
744 entrypoint_script_lines.append(&mut vec![
745 "exec \"$@\"".to_string(),
746 "while sleep 1 & wait $!; do :; done".to_string(),
747 ]);
748
749 Ok(DockerBuildResources {
750 image: base_image,
751 additional_mounts: mounts,
752 privileged,
753 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
754 })
755 }
756
757 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
758 if let ConfigStatus::Deserialized(_) = &self.config {
759 log::error!(
760 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
761 );
762 return Err(DevContainerError::DevContainerParseFailed);
763 }
764 let dev_container = self.dev_container();
765 match dev_container.build_type() {
766 DevContainerBuildType::Image(base_image) => {
767 let built_docker_image = self.build_docker_image().await?;
768
769 let built_docker_image = self
770 .update_remote_user_uid(built_docker_image, &base_image)
771 .await?;
772
773 let resources = self.build_merged_resources(built_docker_image)?;
774 Ok(DevContainerBuildResources::Docker(resources))
775 }
776 DevContainerBuildType::Dockerfile(_) => {
777 let built_docker_image = self.build_docker_image().await?;
778 let Some(features_build_info) = &self.features_build_info else {
779 log::error!(
780 "Can't attempt to build update UID dockerfile before initial docker build"
781 );
782 return Err(DevContainerError::DevContainerParseFailed);
783 };
784 let built_docker_image = self
785 .update_remote_user_uid(built_docker_image, &features_build_info.image_tag)
786 .await?;
787
788 let resources = self.build_merged_resources(built_docker_image)?;
789 Ok(DevContainerBuildResources::Docker(resources))
790 }
791 DevContainerBuildType::DockerCompose => {
792 log::debug!("Using docker compose. Building extended compose files");
793 let docker_compose_resources = self.build_and_extend_compose_files().await?;
794
795 return Ok(DevContainerBuildResources::DockerCompose(
796 docker_compose_resources,
797 ));
798 }
799 DevContainerBuildType::None => {
800 return Err(DevContainerError::DevContainerParseFailed);
801 }
802 }
803 }
804
805 async fn run_dev_container(
806 &self,
807 build_resources: DevContainerBuildResources,
808 ) -> Result<DevContainerUp, DevContainerError> {
809 let ConfigStatus::VariableParsed(_) = &self.config else {
810 log::error!(
811 "Variables have not been parsed; cannot proceed with running the dev container"
812 );
813 return Err(DevContainerError::DevContainerParseFailed);
814 };
815 let running_container = match build_resources {
816 DevContainerBuildResources::DockerCompose(resources) => {
817 self.run_docker_compose(resources).await?
818 }
819 DevContainerBuildResources::Docker(resources) => {
820 self.run_docker_image(resources).await?
821 }
822 };
823
824 let remote_user = get_remote_user_from_config(&running_container, self)?;
825 let remote_workspace_folder = self.remote_workspace_folder()?;
826
827 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
828
829 Ok(DevContainerUp {
830 container_id: running_container.id,
831 remote_user,
832 remote_workspace_folder: remote_workspace_folder.display().to_string(),
833 extension_ids: self.extension_ids(),
834 remote_env,
835 })
836 }
837
838 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
839 let dev_container = match &self.config {
840 ConfigStatus::Deserialized(_) => {
841 log::error!(
842 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
843 );
844 return Err(DevContainerError::DevContainerParseFailed);
845 }
846 ConfigStatus::VariableParsed(dev_container) => dev_container,
847 };
848 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
849 return Err(DevContainerError::DevContainerParseFailed);
850 };
851 let docker_compose_full_paths = docker_compose_files
852 .iter()
853 .map(|relative| self.config_directory.join(relative))
854 .collect::<Vec<PathBuf>>();
855
856 let Some(config) = self
857 .docker_client
858 .get_docker_compose_config(&docker_compose_full_paths)
859 .await?
860 else {
861 log::error!("Output could not deserialize into DockerComposeConfig");
862 return Err(DevContainerError::DevContainerParseFailed);
863 };
864 Ok(DockerComposeResources {
865 files: docker_compose_full_paths,
866 config,
867 })
868 }
869
870 async fn build_and_extend_compose_files(
871 &self,
872 ) -> Result<DockerComposeResources, DevContainerError> {
873 let dev_container = match &self.config {
874 ConfigStatus::Deserialized(_) => {
875 log::error!(
876 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
877 );
878 return Err(DevContainerError::DevContainerParseFailed);
879 }
880 ConfigStatus::VariableParsed(dev_container) => dev_container,
881 };
882
883 let Some(features_build_info) = &self.features_build_info else {
884 log::error!(
885 "Cannot build and extend compose files: features build info is not yet constructed"
886 );
887 return Err(DevContainerError::DevContainerParseFailed);
888 };
889 let mut docker_compose_resources = self.docker_compose_manifest().await?;
890 let supports_buildkit = self.docker_client.supports_compose_buildkit();
891
892 let (main_service_name, main_service) =
893 find_primary_service(&docker_compose_resources, self)?;
894 let (built_service_image, built_service_image_tag) = if main_service
895 .build
896 .as_ref()
897 .map(|b| b.dockerfile.as_ref())
898 .is_some()
899 {
900 if !supports_buildkit {
901 self.build_feature_content_image().await?;
902 }
903
904 let dockerfile_path = &features_build_info.dockerfile_path;
905
906 let build_args = if !supports_buildkit {
907 HashMap::from([
908 (
909 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
910 "dev_container_auto_added_stage_label".to_string(),
911 ),
912 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
913 ])
914 } else {
915 HashMap::from([
916 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
917 (
918 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
919 "dev_container_auto_added_stage_label".to_string(),
920 ),
921 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
922 ])
923 };
924
925 let additional_contexts = if !supports_buildkit {
926 None
927 } else {
928 Some(HashMap::from([(
929 "dev_containers_feature_content_source".to_string(),
930 features_build_info
931 .features_content_dir
932 .display()
933 .to_string(),
934 )]))
935 };
936
937 let build_override = DockerComposeConfig {
938 name: None,
939 services: HashMap::from([(
940 main_service_name.clone(),
941 DockerComposeService {
942 image: Some(features_build_info.image_tag.clone()),
943 entrypoint: None,
944 cap_add: None,
945 security_opt: None,
946 labels: None,
947 build: Some(DockerComposeServiceBuild {
948 context: Some(
949 main_service
950 .build
951 .as_ref()
952 .and_then(|b| b.context.clone())
953 .unwrap_or_else(|| {
954 features_build_info.empty_context_dir.display().to_string()
955 }),
956 ),
957 dockerfile: Some(dockerfile_path.display().to_string()),
958 target: Some("dev_containers_target_stage".to_string()),
959 args: Some(build_args),
960 additional_contexts,
961 }),
962 volumes: Vec::new(),
963 ..Default::default()
964 },
965 )]),
966 volumes: HashMap::new(),
967 };
968
969 let temp_base = std::env::temp_dir().join("devcontainer-zed");
970 let config_location = temp_base.join("docker_compose_build.json");
971
972 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
973 log::error!("Error serializing docker compose runtime override: {e}");
974 DevContainerError::DevContainerParseFailed
975 })?;
976
977 self.fs
978 .write(&config_location, config_json.as_bytes())
979 .await
980 .map_err(|e| {
981 log::error!("Error writing the runtime override file: {e}");
982 DevContainerError::FilesystemError
983 })?;
984
985 docker_compose_resources.files.push(config_location);
986
987 self.docker_client
988 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
989 .await?;
990 (
991 self.docker_client
992 .inspect(&features_build_info.image_tag)
993 .await?,
994 &features_build_info.image_tag,
995 )
996 } else if let Some(image) = &main_service.image {
997 if dev_container
998 .features
999 .as_ref()
1000 .is_none_or(|features| features.is_empty())
1001 {
1002 (self.docker_client.inspect(image).await?, image)
1003 } else {
1004 if !supports_buildkit {
1005 self.build_feature_content_image().await?;
1006 }
1007
1008 let dockerfile_path = &features_build_info.dockerfile_path;
1009
1010 let build_args = if !supports_buildkit {
1011 HashMap::from([
1012 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1013 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1014 ])
1015 } else {
1016 HashMap::from([
1017 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
1018 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
1019 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
1020 ])
1021 };
1022
1023 let additional_contexts = if !supports_buildkit {
1024 None
1025 } else {
1026 Some(HashMap::from([(
1027 "dev_containers_feature_content_source".to_string(),
1028 features_build_info
1029 .features_content_dir
1030 .display()
1031 .to_string(),
1032 )]))
1033 };
1034
1035 let build_override = DockerComposeConfig {
1036 name: None,
1037 services: HashMap::from([(
1038 main_service_name.clone(),
1039 DockerComposeService {
1040 image: Some(features_build_info.image_tag.clone()),
1041 entrypoint: None,
1042 cap_add: None,
1043 security_opt: None,
1044 labels: None,
1045 build: Some(DockerComposeServiceBuild {
1046 context: Some(
1047 features_build_info.empty_context_dir.display().to_string(),
1048 ),
1049 dockerfile: Some(dockerfile_path.display().to_string()),
1050 target: Some("dev_containers_target_stage".to_string()),
1051 args: Some(build_args),
1052 additional_contexts,
1053 }),
1054 volumes: Vec::new(),
1055 ..Default::default()
1056 },
1057 )]),
1058 volumes: HashMap::new(),
1059 };
1060
1061 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1062 let config_location = temp_base.join("docker_compose_build.json");
1063
1064 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
1065 log::error!("Error serializing docker compose runtime override: {e}");
1066 DevContainerError::DevContainerParseFailed
1067 })?;
1068
1069 self.fs
1070 .write(&config_location, config_json.as_bytes())
1071 .await
1072 .map_err(|e| {
1073 log::error!("Error writing the runtime override file: {e}");
1074 DevContainerError::FilesystemError
1075 })?;
1076
1077 docker_compose_resources.files.push(config_location);
1078
1079 self.docker_client
1080 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
1081 .await?;
1082
1083 (
1084 self.docker_client
1085 .inspect(&features_build_info.image_tag)
1086 .await?,
1087 &features_build_info.image_tag,
1088 )
1089 }
1090 } else {
1091 log::error!("Docker compose must have either image or dockerfile defined");
1092 return Err(DevContainerError::DevContainerParseFailed);
1093 };
1094
1095 let built_service_image = self
1096 .update_remote_user_uid(built_service_image, built_service_image_tag)
1097 .await?;
1098
1099 let resources = self.build_merged_resources(built_service_image)?;
1100
1101 let network_mode = main_service.network_mode.as_ref();
1102 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1103 let runtime_override_file = self
1104 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1105 .await?;
1106
1107 docker_compose_resources.files.push(runtime_override_file);
1108
1109 Ok(docker_compose_resources)
1110 }
1111
1112 async fn write_runtime_override_file(
1113 &self,
1114 main_service_name: &str,
1115 network_mode_service: Option<&str>,
1116 resources: DockerBuildResources,
1117 ) -> Result<PathBuf, DevContainerError> {
1118 let config =
1119 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1120 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1121 let config_location = temp_base.join("docker_compose_runtime.json");
1122
1123 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1124 log::error!("Error serializing docker compose runtime override: {e}");
1125 DevContainerError::DevContainerParseFailed
1126 })?;
1127
1128 self.fs
1129 .write(&config_location, config_json.as_bytes())
1130 .await
1131 .map_err(|e| {
1132 log::error!("Error writing the runtime override file: {e}");
1133 DevContainerError::FilesystemError
1134 })?;
1135
1136 Ok(config_location)
1137 }
1138
1139 fn build_runtime_override(
1140 &self,
1141 main_service_name: &str,
1142 network_mode_service: Option<&str>,
1143 resources: DockerBuildResources,
1144 ) -> Result<DockerComposeConfig, DevContainerError> {
1145 let mut runtime_labels = HashMap::new();
1146
1147 if let Some(metadata) = &resources.image.config.labels.metadata {
1148 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1149 log::error!("Error serializing docker image metadata: {e}");
1150 DevContainerError::ContainerNotValid(resources.image.id.clone())
1151 })?;
1152
1153 runtime_labels.insert("devcontainer.metadata".to_string(), serialized_metadata);
1154 }
1155
1156 for (k, v) in self.identifying_labels() {
1157 runtime_labels.insert(k.to_string(), v.to_string());
1158 }
1159
1160 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1161 .additional_mounts
1162 .iter()
1163 .filter_map(|mount| {
1164 if let Some(mount_type) = &mount.mount_type
1165 && mount_type.to_lowercase() == "volume"
1166 && let Some(source) = &mount.source
1167 {
1168 Some((
1169 source.clone(),
1170 DockerComposeVolume {
1171 name: source.clone(),
1172 },
1173 ))
1174 } else {
1175 None
1176 }
1177 })
1178 .collect();
1179
1180 let volumes: Vec<MountDefinition> = resources
1181 .additional_mounts
1182 .iter()
1183 .map(|v| MountDefinition {
1184 source: v.source.clone(),
1185 target: v.target.clone(),
1186 mount_type: v.mount_type.clone(),
1187 })
1188 .collect();
1189
1190 let mut main_service = DockerComposeService {
1191 entrypoint: Some(vec![
1192 "/bin/sh".to_string(),
1193 "-c".to_string(),
1194 resources.entrypoint_script,
1195 "-".to_string(),
1196 ]),
1197 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1198 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1199 labels: Some(runtime_labels),
1200 volumes,
1201 privileged: Some(resources.privileged),
1202 ..Default::default()
1203 };
1204 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1205 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1206 if let Some(forward_ports) = &self.dev_container().forward_ports {
1207 let main_service_ports: Vec<String> = forward_ports
1208 .iter()
1209 .filter_map(|f| match f {
1210 ForwardPort::Number(port) => Some(port.to_string()),
1211 ForwardPort::String(port) => {
1212 let parts: Vec<&str> = port.split(":").collect();
1213 if parts.len() <= 1 {
1214 Some(port.to_string())
1215 } else if parts.len() == 2 {
1216 if parts[0] == main_service_name {
1217 Some(parts[1].to_string())
1218 } else {
1219 None
1220 }
1221 } else {
1222 None
1223 }
1224 }
1225 })
1226 .collect();
1227 for port in main_service_ports {
1228 // If the main service uses a different service's network bridge, append to that service's ports instead
1229 if let Some(network_service_name) = network_mode_service {
1230 if let Some(service) = service_declarations.get_mut(network_service_name) {
1231 service.ports.push(DockerComposeServicePort {
1232 target: port.clone(),
1233 published: port.clone(),
1234 ..Default::default()
1235 });
1236 } else {
1237 service_declarations.insert(
1238 network_service_name.to_string(),
1239 DockerComposeService {
1240 ports: vec![DockerComposeServicePort {
1241 target: port.clone(),
1242 published: port.clone(),
1243 ..Default::default()
1244 }],
1245 ..Default::default()
1246 },
1247 );
1248 }
1249 } else {
1250 main_service.ports.push(DockerComposeServicePort {
1251 target: port.clone(),
1252 published: port.clone(),
1253 ..Default::default()
1254 });
1255 }
1256 }
1257 let other_service_ports: Vec<(&str, &str)> = forward_ports
1258 .iter()
1259 .filter_map(|f| match f {
1260 ForwardPort::Number(_) => None,
1261 ForwardPort::String(port) => {
1262 let parts: Vec<&str> = port.split(":").collect();
1263 if parts.len() != 2 {
1264 None
1265 } else {
1266 if parts[0] == main_service_name {
1267 None
1268 } else {
1269 Some((parts[0], parts[1]))
1270 }
1271 }
1272 }
1273 })
1274 .collect();
1275 for (service_name, port) in other_service_ports {
1276 if let Some(service) = service_declarations.get_mut(service_name) {
1277 service.ports.push(DockerComposeServicePort {
1278 target: port.to_string(),
1279 published: port.to_string(),
1280 ..Default::default()
1281 });
1282 } else {
1283 service_declarations.insert(
1284 service_name.to_string(),
1285 DockerComposeService {
1286 ports: vec![DockerComposeServicePort {
1287 target: port.to_string(),
1288 published: port.to_string(),
1289 ..Default::default()
1290 }],
1291 ..Default::default()
1292 },
1293 );
1294 }
1295 }
1296 }
1297
1298 service_declarations.insert(main_service_name.to_string(), main_service);
1299 let new_docker_compose_config = DockerComposeConfig {
1300 name: None,
1301 services: service_declarations,
1302 volumes: config_volumes,
1303 };
1304
1305 Ok(new_docker_compose_config)
1306 }
1307
1308 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1309 let dev_container = match &self.config {
1310 ConfigStatus::Deserialized(_) => {
1311 log::error!(
1312 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1313 );
1314 return Err(DevContainerError::DevContainerParseFailed);
1315 }
1316 ConfigStatus::VariableParsed(dev_container) => dev_container,
1317 };
1318
1319 match dev_container.build_type() {
1320 DevContainerBuildType::Image(image_tag) => {
1321 let base_image = self.docker_client.inspect(&image_tag).await?;
1322 if dev_container
1323 .features
1324 .as_ref()
1325 .is_none_or(|features| features.is_empty())
1326 {
1327 log::debug!("No features to add. Using base image");
1328 return Ok(base_image);
1329 }
1330 }
1331 DevContainerBuildType::Dockerfile(_) => {}
1332 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1333 return Err(DevContainerError::DevContainerParseFailed);
1334 }
1335 };
1336
1337 let mut command = self.create_docker_build()?;
1338
1339 let output = self
1340 .command_runner
1341 .run_command(&mut command)
1342 .await
1343 .map_err(|e| {
1344 log::error!("Error building docker image: {e}");
1345 DevContainerError::CommandFailed(command.get_program().display().to_string())
1346 })?;
1347
1348 if !output.status.success() {
1349 let stderr = String::from_utf8_lossy(&output.stderr);
1350 log::error!("docker buildx build failed: {stderr}");
1351 return Err(DevContainerError::CommandFailed(
1352 command.get_program().display().to_string(),
1353 ));
1354 }
1355
1356 // After a successful build, inspect the newly tagged image to get its metadata
1357 let Some(features_build_info) = &self.features_build_info else {
1358 log::error!("Features build info expected, but not created");
1359 return Err(DevContainerError::DevContainerParseFailed);
1360 };
1361 let image = self
1362 .docker_client
1363 .inspect(&features_build_info.image_tag)
1364 .await?;
1365
1366 Ok(image)
1367 }
1368
1369 #[cfg(target_os = "windows")]
1370 async fn update_remote_user_uid(
1371 &self,
1372 image: DockerInspect,
1373 _base_image: &str,
1374 ) -> Result<DockerInspect, DevContainerError> {
1375 Ok(image)
1376 }
1377 #[cfg(not(target_os = "windows"))]
1378 async fn update_remote_user_uid(
1379 &self,
1380 image: DockerInspect,
1381 base_image: &str,
1382 ) -> Result<DockerInspect, DevContainerError> {
1383 let dev_container = self.dev_container();
1384
1385 let Some(features_build_info) = &self.features_build_info else {
1386 return Ok(image);
1387 };
1388
1389 // updateRemoteUserUID defaults to true per the devcontainers spec
1390 if dev_container.update_remote_user_uid == Some(false) {
1391 return Ok(image);
1392 }
1393
1394 let remote_user = get_remote_user_from_config(&image, self)?;
1395 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1396 return Ok(image);
1397 }
1398
1399 let image_user = image
1400 .config
1401 .image_user
1402 .as_deref()
1403 .unwrap_or("root")
1404 .to_string();
1405
1406 let host_uid = Command::new("id")
1407 .arg("-u")
1408 .output()
1409 .await
1410 .map_err(|e| {
1411 log::error!("Failed to get host UID: {e}");
1412 DevContainerError::CommandFailed("id -u".to_string())
1413 })
1414 .and_then(|output| {
1415 String::from_utf8_lossy(&output.stdout)
1416 .trim()
1417 .parse::<u32>()
1418 .map_err(|e| {
1419 log::error!("Failed to parse host UID: {e}");
1420 DevContainerError::CommandFailed("id -u".to_string())
1421 })
1422 })?;
1423
1424 let host_gid = Command::new("id")
1425 .arg("-g")
1426 .output()
1427 .await
1428 .map_err(|e| {
1429 log::error!("Failed to get host GID: {e}");
1430 DevContainerError::CommandFailed("id -g".to_string())
1431 })
1432 .and_then(|output| {
1433 String::from_utf8_lossy(&output.stdout)
1434 .trim()
1435 .parse::<u32>()
1436 .map_err(|e| {
1437 log::error!("Failed to parse host GID: {e}");
1438 DevContainerError::CommandFailed("id -g".to_string())
1439 })
1440 })?;
1441
1442 let dockerfile_content = self.generate_update_uid_dockerfile();
1443
1444 let dockerfile_path = features_build_info
1445 .features_content_dir
1446 .join("updateUID.Dockerfile");
1447 self.fs
1448 .write(&dockerfile_path, dockerfile_content.as_bytes())
1449 .await
1450 .map_err(|e| {
1451 log::error!("Failed to write updateUID Dockerfile: {e}");
1452 DevContainerError::FilesystemError
1453 })?;
1454
1455 let updated_image_tag = features_build_info.image_tag.clone();
1456
1457 let mut command = Command::new(self.docker_client.docker_cli());
1458 command.args(["build"]);
1459 command.args(["-f", &dockerfile_path.display().to_string()]);
1460 command.args(["-t", &updated_image_tag]);
1461 command.args(["--build-arg", &format!("BASE_IMAGE={}", base_image)]);
1462 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1463 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1464 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1465 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1466 command.arg(features_build_info.empty_context_dir.display().to_string());
1467
1468 let output = self
1469 .command_runner
1470 .run_command(&mut command)
1471 .await
1472 .map_err(|e| {
1473 log::error!("Error building UID update image: {e}");
1474 DevContainerError::CommandFailed(command.get_program().display().to_string())
1475 })?;
1476
1477 if !output.status.success() {
1478 let stderr = String::from_utf8_lossy(&output.stderr);
1479 log::error!("UID update build failed: {stderr}");
1480 return Err(DevContainerError::CommandFailed(
1481 command.get_program().display().to_string(),
1482 ));
1483 }
1484
1485 self.docker_client.inspect(&updated_image_tag).await
1486 }
1487
1488 #[cfg(not(target_os = "windows"))]
1489 fn generate_update_uid_dockerfile(&self) -> String {
1490 let mut dockerfile = r#"ARG BASE_IMAGE
1491FROM $BASE_IMAGE
1492
1493USER root
1494
1495ARG REMOTE_USER
1496ARG NEW_UID
1497ARG NEW_GID
1498SHELL ["/bin/sh", "-c"]
1499RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1500 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1501 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1502 if [ -z "$OLD_UID" ]; then \
1503 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1504 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1505 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1506 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1507 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1508 else \
1509 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1510 FREE_GID=65532; \
1511 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1512 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1513 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1514 fi; \
1515 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1516 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1517 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1518 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1519 fi; \
1520 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1521 fi;
1522
1523ARG IMAGE_USER
1524USER $IMAGE_USER
1525
1526# Ensure that /etc/profile does not clobber the existing path
1527RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1528"#.to_string();
1529 for feature in &self.features {
1530 let container_env_layer = feature.generate_dockerfile_env();
1531 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1532 }
1533
1534 if let Some(env) = &self.dev_container().container_env {
1535 for (key, value) in env {
1536 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1537 }
1538 }
1539 dockerfile
1540 }
1541
1542 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1543 let Some(features_build_info) = &self.features_build_info else {
1544 log::error!("Features build info not available for building feature content image");
1545 return Err(DevContainerError::DevContainerParseFailed);
1546 };
1547 let features_content_dir = &features_build_info.features_content_dir;
1548
1549 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1550 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1551
1552 self.fs
1553 .write(&dockerfile_path, dockerfile_content.as_bytes())
1554 .await
1555 .map_err(|e| {
1556 log::error!("Failed to write feature content Dockerfile: {e}");
1557 DevContainerError::FilesystemError
1558 })?;
1559
1560 let mut command = Command::new(self.docker_client.docker_cli());
1561 command.args([
1562 "build",
1563 "-t",
1564 "dev_container_feature_content_temp",
1565 "-f",
1566 &dockerfile_path.display().to_string(),
1567 &features_content_dir.display().to_string(),
1568 ]);
1569
1570 let output = self
1571 .command_runner
1572 .run_command(&mut command)
1573 .await
1574 .map_err(|e| {
1575 log::error!("Error building feature content image: {e}");
1576 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1577 })?;
1578
1579 if !output.status.success() {
1580 let stderr = String::from_utf8_lossy(&output.stderr);
1581 log::error!("Feature content image build failed: {stderr}");
1582 return Err(DevContainerError::CommandFailed(
1583 self.docker_client.docker_cli(),
1584 ));
1585 }
1586
1587 Ok(())
1588 }
1589
1590 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1591 let dev_container = match &self.config {
1592 ConfigStatus::Deserialized(_) => {
1593 log::error!(
1594 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1595 );
1596 return Err(DevContainerError::DevContainerParseFailed);
1597 }
1598 ConfigStatus::VariableParsed(dev_container) => dev_container,
1599 };
1600
1601 let Some(features_build_info) = &self.features_build_info else {
1602 log::error!(
1603 "Cannot create docker build command; features build info has not been constructed"
1604 );
1605 return Err(DevContainerError::DevContainerParseFailed);
1606 };
1607 let mut command = Command::new(self.docker_client.docker_cli());
1608
1609 command.args(["buildx", "build"]);
1610
1611 // --load is short for --output=docker, loading the built image into the local docker images
1612 command.arg("--load");
1613
1614 // BuildKit build context: provides the features content directory as a named context
1615 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1616 command.args([
1617 "--build-context",
1618 &format!(
1619 "dev_containers_feature_content_source={}",
1620 features_build_info.features_content_dir.display()
1621 ),
1622 ]);
1623
1624 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1625 if let Some(build_image) = &features_build_info.build_image {
1626 command.args([
1627 "--build-arg",
1628 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1629 ]);
1630 } else {
1631 command.args([
1632 "--build-arg",
1633 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1634 ]);
1635 }
1636
1637 command.args([
1638 "--build-arg",
1639 &format!(
1640 "_DEV_CONTAINERS_IMAGE_USER={}",
1641 self.root_image
1642 .as_ref()
1643 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1644 .unwrap_or(&"root".to_string())
1645 ),
1646 ]);
1647
1648 command.args([
1649 "--build-arg",
1650 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1651 ]);
1652
1653 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1654 for (key, value) in args {
1655 command.args(["--build-arg", &format!("{}={}", key, value)]);
1656 }
1657 }
1658
1659 if let Some(options) = dev_container
1660 .build
1661 .as_ref()
1662 .and_then(|b| b.options.as_ref())
1663 {
1664 for option in options {
1665 command.arg(option);
1666 }
1667 }
1668
1669 if let Some(cache_from_images) = dev_container
1670 .build
1671 .as_ref()
1672 .and_then(|b| b.cache_from.as_ref())
1673 {
1674 for cache_from_image in cache_from_images {
1675 command.args(["--cache-from", cache_from_image]);
1676 }
1677 }
1678
1679 command.args(["--target", "dev_containers_target_stage"]);
1680
1681 command.args([
1682 "-f",
1683 &features_build_info.dockerfile_path.display().to_string(),
1684 ]);
1685
1686 command.args(["-t", &features_build_info.image_tag]);
1687
1688 if let DevContainerBuildType::Dockerfile(build) = dev_container.build_type() {
1689 command.arg(self.calculate_context_dir(build).display().to_string());
1690 } else {
1691 // Use an empty folder as the build context to avoid pulling in unneeded files.
1692 // The actual feature content is supplied via the BuildKit build context above.
1693 command.arg(features_build_info.empty_context_dir.display().to_string());
1694 }
1695
1696 Ok(command)
1697 }
1698
1699 async fn run_docker_compose(
1700 &self,
1701 resources: DockerComposeResources,
1702 ) -> Result<DockerInspect, DevContainerError> {
1703 let mut command = Command::new(self.docker_client.docker_cli());
1704 command.args(&["compose", "--project-name", &self.project_name()]);
1705 for docker_compose_file in resources.files {
1706 command.args(&["-f", &docker_compose_file.display().to_string()]);
1707 }
1708 command.args(&["up", "-d"]);
1709
1710 let output = self
1711 .command_runner
1712 .run_command(&mut command)
1713 .await
1714 .map_err(|e| {
1715 log::error!("Error running docker compose up: {e}");
1716 DevContainerError::CommandFailed(command.get_program().display().to_string())
1717 })?;
1718
1719 if !output.status.success() {
1720 let stderr = String::from_utf8_lossy(&output.stderr);
1721 log::error!("Non-success status from docker compose up: {}", stderr);
1722 return Err(DevContainerError::CommandFailed(
1723 command.get_program().display().to_string(),
1724 ));
1725 }
1726
1727 if let Some(docker_ps) = self.check_for_existing_container().await? {
1728 log::debug!("Found newly created dev container");
1729 return self.docker_client.inspect(&docker_ps.id).await;
1730 }
1731
1732 log::error!("Could not find existing container after docker compose up");
1733
1734 Err(DevContainerError::DevContainerParseFailed)
1735 }
1736
1737 async fn run_docker_image(
1738 &self,
1739 build_resources: DockerBuildResources,
1740 ) -> Result<DockerInspect, DevContainerError> {
1741 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1742
1743 let output = self
1744 .command_runner
1745 .run_command(&mut docker_run_command)
1746 .await
1747 .map_err(|e| {
1748 log::error!("Error running docker run: {e}");
1749 DevContainerError::CommandFailed(
1750 docker_run_command.get_program().display().to_string(),
1751 )
1752 })?;
1753
1754 if !output.status.success() {
1755 let std_err = String::from_utf8_lossy(&output.stderr);
1756 log::error!("Non-success status from docker run. StdErr: {std_err}");
1757 return Err(DevContainerError::CommandFailed(
1758 docker_run_command.get_program().display().to_string(),
1759 ));
1760 }
1761
1762 log::debug!("Checking for container that was started");
1763 let Some(docker_ps) = self.check_for_existing_container().await? else {
1764 log::error!("Could not locate container just created");
1765 return Err(DevContainerError::DevContainerParseFailed);
1766 };
1767 self.docker_client.inspect(&docker_ps.id).await
1768 }
1769
1770 fn local_workspace_folder(&self) -> String {
1771 self.local_project_directory.display().to_string()
1772 }
1773 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1774 self.local_project_directory
1775 .file_name()
1776 .map(|f| f.display().to_string())
1777 .ok_or(DevContainerError::DevContainerParseFailed)
1778 }
1779
1780 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1781 self.dev_container()
1782 .workspace_folder
1783 .as_ref()
1784 .map(|folder| PathBuf::from(folder))
1785 .or(Some(
1786 // We explicitly use "/" here, instead of PathBuf::join
1787 // because we want remote targets to use unix-style filepaths,
1788 // even on a Windows host
1789 PathBuf::from(format!(
1790 "{}/{}",
1791 DEFAULT_REMOTE_PROJECT_DIR,
1792 self.local_workspace_base_name()?
1793 )),
1794 ))
1795 .ok_or(DevContainerError::DevContainerParseFailed)
1796 }
1797 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1798 self.remote_workspace_folder().and_then(|f| {
1799 f.file_name()
1800 .map(|file_name| file_name.display().to_string())
1801 .ok_or(DevContainerError::DevContainerParseFailed)
1802 })
1803 }
1804
1805 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1806 if let Some(mount) = &self.dev_container().workspace_mount {
1807 return Ok(mount.clone());
1808 }
1809 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1810 return Err(DevContainerError::DevContainerParseFailed);
1811 };
1812
1813 Ok(MountDefinition {
1814 source: Some(self.local_workspace_folder()),
1815 // We explicitly use "/" here, instead of PathBuf::join
1816 // because we want the remote target to use unix-style filepaths,
1817 // even on a Windows host
1818 target: format!(
1819 "{}/{}",
1820 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).display(),
1821 project_directory_name.display()
1822 ),
1823 mount_type: None,
1824 })
1825 }
1826
1827 fn create_docker_run_command(
1828 &self,
1829 build_resources: DockerBuildResources,
1830 ) -> Result<Command, DevContainerError> {
1831 let remote_workspace_mount = self.remote_workspace_mount()?;
1832
1833 let docker_cli = self.docker_client.docker_cli();
1834 let mut command = Command::new(&docker_cli);
1835
1836 command.arg("run");
1837
1838 if build_resources.privileged {
1839 command.arg("--privileged");
1840 }
1841
1842 let run_args = match &self.dev_container().run_args {
1843 Some(run_args) => run_args,
1844 None => &Vec::new(),
1845 };
1846
1847 for arg in run_args {
1848 command.arg(arg);
1849 }
1850
1851 let run_if_missing = {
1852 |arg_name: &str, arg: &str, command: &mut Command| {
1853 if !run_args
1854 .iter()
1855 .any(|arg| arg.strip_prefix(arg_name).is_some())
1856 {
1857 command.arg(arg);
1858 }
1859 }
1860 };
1861
1862 if &docker_cli == "podman" {
1863 run_if_missing(
1864 "--security-opt",
1865 "--security-opt=label=disable",
1866 &mut command,
1867 );
1868 run_if_missing("--userns", "--userns=keep-id", &mut command);
1869 }
1870
1871 run_if_missing("--sig-proxy", "--sig-proxy=false", &mut command);
1872 command.arg("-d");
1873 command.arg("--mount");
1874 command.arg(remote_workspace_mount.to_string());
1875
1876 for mount in &build_resources.additional_mounts {
1877 command.arg("--mount");
1878 command.arg(mount.to_string());
1879 }
1880
1881 for (key, val) in self.identifying_labels() {
1882 command.arg("-l");
1883 command.arg(format!("{}={}", key, val));
1884 }
1885
1886 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1887 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1888 log::error!("Problem serializing image metadata: {e}");
1889 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1890 })?;
1891 command.arg("-l");
1892 command.arg(format!(
1893 "{}={}",
1894 "devcontainer.metadata", serialized_metadata
1895 ));
1896 }
1897
1898 if let Some(forward_ports) = &self.dev_container().forward_ports {
1899 for port in forward_ports {
1900 if let ForwardPort::Number(port_number) = port {
1901 command.arg("-p");
1902 command.arg(format!("{port_number}:{port_number}"));
1903 }
1904 }
1905 }
1906 for app_port in &self.dev_container().app_port {
1907 command.arg("-p");
1908 command.arg(app_port);
1909 }
1910
1911 command.arg("--entrypoint");
1912 command.arg("/bin/sh");
1913 command.arg(&build_resources.image.id);
1914 command.arg("-c");
1915
1916 command.arg(build_resources.entrypoint_script);
1917 command.arg("-");
1918
1919 Ok(command)
1920 }
1921
1922 fn extension_ids(&self) -> Vec<String> {
1923 self.dev_container()
1924 .customizations
1925 .as_ref()
1926 .map(|c| c.zed.extensions.clone())
1927 .unwrap_or_default()
1928 }
1929
1930 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1931 self.dev_container().validate_devcontainer_contents()?;
1932
1933 self.run_initialize_commands().await?;
1934
1935 self.download_feature_and_dockerfile_resources().await?;
1936
1937 let build_resources = self.build_resources().await?;
1938
1939 let devcontainer_up = self.run_dev_container(build_resources).await?;
1940
1941 self.run_remote_scripts(&devcontainer_up, true).await?;
1942
1943 Ok(devcontainer_up)
1944 }
1945
1946 async fn run_remote_scripts(
1947 &self,
1948 devcontainer_up: &DevContainerUp,
1949 new_container: bool,
1950 ) -> Result<(), DevContainerError> {
1951 let ConfigStatus::VariableParsed(config) = &self.config else {
1952 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1953 return Err(DevContainerError::DevContainerScriptsFailed);
1954 };
1955 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1956
1957 if new_container {
1958 if let Some(on_create_command) = &config.on_create_command {
1959 for (command_name, command) in on_create_command.script_commands() {
1960 log::debug!("Running on create command {command_name}");
1961 self.docker_client
1962 .run_docker_exec(
1963 &devcontainer_up.container_id,
1964 &remote_folder,
1965 &devcontainer_up.remote_user,
1966 &devcontainer_up.remote_env,
1967 command,
1968 )
1969 .await?;
1970 }
1971 }
1972 if let Some(update_content_command) = &config.update_content_command {
1973 for (command_name, command) in update_content_command.script_commands() {
1974 log::debug!("Running update content command {command_name}");
1975 self.docker_client
1976 .run_docker_exec(
1977 &devcontainer_up.container_id,
1978 &remote_folder,
1979 &devcontainer_up.remote_user,
1980 &devcontainer_up.remote_env,
1981 command,
1982 )
1983 .await?;
1984 }
1985 }
1986
1987 if let Some(post_create_command) = &config.post_create_command {
1988 for (command_name, command) in post_create_command.script_commands() {
1989 log::debug!("Running post create command {command_name}");
1990 self.docker_client
1991 .run_docker_exec(
1992 &devcontainer_up.container_id,
1993 &remote_folder,
1994 &devcontainer_up.remote_user,
1995 &devcontainer_up.remote_env,
1996 command,
1997 )
1998 .await?;
1999 }
2000 }
2001 if let Some(post_start_command) = &config.post_start_command {
2002 for (command_name, command) in post_start_command.script_commands() {
2003 log::debug!("Running post start command {command_name}");
2004 self.docker_client
2005 .run_docker_exec(
2006 &devcontainer_up.container_id,
2007 &remote_folder,
2008 &devcontainer_up.remote_user,
2009 &devcontainer_up.remote_env,
2010 command,
2011 )
2012 .await?;
2013 }
2014 }
2015 }
2016 if let Some(post_attach_command) = &config.post_attach_command {
2017 for (command_name, command) in post_attach_command.script_commands() {
2018 log::debug!("Running post attach command {command_name}");
2019 self.docker_client
2020 .run_docker_exec(
2021 &devcontainer_up.container_id,
2022 &remote_folder,
2023 &devcontainer_up.remote_user,
2024 &devcontainer_up.remote_env,
2025 command,
2026 )
2027 .await?;
2028 }
2029 }
2030
2031 Ok(())
2032 }
2033
2034 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
2035 let ConfigStatus::VariableParsed(config) = &self.config else {
2036 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
2037 return Err(DevContainerError::DevContainerParseFailed);
2038 };
2039
2040 if let Some(initialize_command) = &config.initialize_command {
2041 log::debug!("Running initialize command");
2042 initialize_command
2043 .run(&self.command_runner, &self.local_project_directory)
2044 .await
2045 } else {
2046 log::warn!("No initialize command found");
2047 Ok(())
2048 }
2049 }
2050
2051 async fn check_for_existing_devcontainer(
2052 &self,
2053 ) -> Result<Option<DevContainerUp>, DevContainerError> {
2054 if let Some(docker_ps) = self.check_for_existing_container().await? {
2055 log::debug!("Dev container already found. Proceeding with it");
2056
2057 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
2058
2059 if !docker_inspect.is_running() {
2060 log::debug!("Container not running. Will attempt to start, and then proceed");
2061 self.docker_client.start_container(&docker_ps.id).await?;
2062 }
2063
2064 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
2065
2066 let remote_folder = self.remote_workspace_folder()?;
2067
2068 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
2069
2070 let dev_container_up = DevContainerUp {
2071 container_id: docker_ps.id,
2072 remote_user: remote_user,
2073 remote_workspace_folder: remote_folder.display().to_string(),
2074 extension_ids: self.extension_ids(),
2075 remote_env,
2076 };
2077
2078 self.run_remote_scripts(&dev_container_up, false).await?;
2079
2080 Ok(Some(dev_container_up))
2081 } else {
2082 log::debug!("Existing container not found.");
2083
2084 Ok(None)
2085 }
2086 }
2087
2088 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
2089 self.docker_client
2090 .find_process_by_filters(
2091 self.identifying_labels()
2092 .iter()
2093 .map(|(k, v)| format!("label={k}={v}"))
2094 .collect(),
2095 )
2096 .await
2097 }
2098
2099 fn project_name(&self) -> String {
2100 if let Some(name) = &self.dev_container().name {
2101 safe_id_lower(name)
2102 } else {
2103 let alternate_name = &self
2104 .local_workspace_base_name()
2105 .unwrap_or(self.local_workspace_folder());
2106 safe_id_lower(alternate_name)
2107 }
2108 }
2109
2110 async fn expanded_dockerfile_content(&self) -> Result<String, DevContainerError> {
2111 let Some(dockerfile_path) = self.dockerfile_location().await else {
2112 log::error!("Tried to expand dockerfile for an image-type config");
2113 return Err(DevContainerError::DevContainerParseFailed);
2114 };
2115
2116 let devcontainer_args = self
2117 .dev_container()
2118 .build
2119 .as_ref()
2120 .and_then(|b| b.args.clone())
2121 .unwrap_or_default();
2122 let contents = self.fs.load(&dockerfile_path).await.map_err(|e| {
2123 log::error!("Failed to load Dockerfile: {e}");
2124 DevContainerError::FilesystemError
2125 })?;
2126 let mut parsed_lines: Vec<String> = Vec::new();
2127 let mut inline_args: Vec<(String, String)> = Vec::new();
2128 let key_regex = Regex::new(r"(?:^|\s)(\w+)=").expect("valid regex");
2129
2130 for line in contents.lines() {
2131 let mut parsed_line = line.to_string();
2132 // Replace from devcontainer args first, since they take precedence
2133 for (key, value) in &devcontainer_args {
2134 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value)
2135 }
2136 for (key, value) in &inline_args {
2137 parsed_line = parsed_line.replace(&format!("${{{key}}}"), value);
2138 }
2139 if let Some(arg_directives) = parsed_line.strip_prefix("ARG ") {
2140 let trimmed = arg_directives.trim();
2141 let key_matches: Vec<_> = key_regex.captures_iter(trimmed).collect();
2142 for (i, captures) in key_matches.iter().enumerate() {
2143 let key = captures[1].to_string();
2144 // Insert the devcontainer overrides here if needed
2145 let value_start = captures.get(0).expect("full match").end();
2146 let value_end = if i + 1 < key_matches.len() {
2147 key_matches[i + 1].get(0).expect("full match").start()
2148 } else {
2149 trimmed.len()
2150 };
2151 let raw_value = trimmed[value_start..value_end].trim();
2152 let value = if raw_value.starts_with('"')
2153 && raw_value.ends_with('"')
2154 && raw_value.len() > 1
2155 {
2156 &raw_value[1..raw_value.len() - 1]
2157 } else {
2158 raw_value
2159 };
2160 inline_args.push((key, value.to_string()));
2161 }
2162 }
2163 parsed_lines.push(parsed_line);
2164 }
2165
2166 Ok(parsed_lines.join("\n"))
2167 }
2168
2169 fn calculate_context_dir(&self, build: ContainerBuild) -> PathBuf {
2170 let Some(context) = build.context else {
2171 return self.config_directory.clone();
2172 };
2173 let context_path = PathBuf::from(context);
2174
2175 if context_path.is_absolute() {
2176 context_path
2177 } else {
2178 self.config_directory.join(context_path)
2179 }
2180 }
2181}
2182
2183/// Holds all the information needed to construct a `docker buildx build` command
2184/// that extends a base image with dev container features.
2185///
2186/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
2187/// (cli/src/spec-node/containerFeatures.ts).
2188#[derive(Debug, Eq, PartialEq)]
2189pub(crate) struct FeaturesBuildInfo {
2190 /// Path to the generated Dockerfile.extended
2191 pub dockerfile_path: PathBuf,
2192 /// Path to the features content directory (used as a BuildKit build context)
2193 pub features_content_dir: PathBuf,
2194 /// Path to an empty directory used as the Docker build context
2195 pub empty_context_dir: PathBuf,
2196 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
2197 pub build_image: Option<String>,
2198 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
2199 pub image_tag: String,
2200}
2201
2202pub(crate) async fn read_devcontainer_configuration(
2203 config: DevContainerConfig,
2204 context: &DevContainerContext,
2205 environment: HashMap<String, String>,
2206) -> Result<DevContainer, DevContainerError> {
2207 let docker = if context.use_podman {
2208 Docker::new("podman").await
2209 } else {
2210 Docker::new("docker").await
2211 };
2212 let mut dev_container = DevContainerManifest::new(
2213 context,
2214 environment,
2215 Arc::new(docker),
2216 Arc::new(DefaultCommandRunner::new()),
2217 config,
2218 &context.project_directory.as_ref(),
2219 )
2220 .await?;
2221 dev_container.parse_nonremote_vars()?;
2222 Ok(dev_container.dev_container().clone())
2223}
2224
2225pub(crate) async fn spawn_dev_container(
2226 context: &DevContainerContext,
2227 environment: HashMap<String, String>,
2228 config: DevContainerConfig,
2229 local_project_path: &Path,
2230) -> Result<DevContainerUp, DevContainerError> {
2231 let docker = if context.use_podman {
2232 Docker::new("podman").await
2233 } else {
2234 Docker::new("docker").await
2235 };
2236 let mut devcontainer_manifest = DevContainerManifest::new(
2237 context,
2238 environment,
2239 Arc::new(docker),
2240 Arc::new(DefaultCommandRunner::new()),
2241 config,
2242 local_project_path,
2243 )
2244 .await?;
2245
2246 devcontainer_manifest.parse_nonremote_vars()?;
2247
2248 log::debug!("Checking for existing container");
2249 if let Some(devcontainer) = devcontainer_manifest
2250 .check_for_existing_devcontainer()
2251 .await?
2252 {
2253 Ok(devcontainer)
2254 } else {
2255 log::debug!("Existing container not found. Building");
2256
2257 devcontainer_manifest.build_and_run().await
2258 }
2259}
2260
2261#[derive(Debug)]
2262struct DockerBuildResources {
2263 image: DockerInspect,
2264 additional_mounts: Vec<MountDefinition>,
2265 privileged: bool,
2266 entrypoint_script: String,
2267}
2268
2269#[derive(Debug)]
2270enum DevContainerBuildResources {
2271 DockerCompose(DockerComposeResources),
2272 Docker(DockerBuildResources),
2273}
2274
2275fn find_primary_service(
2276 docker_compose: &DockerComposeResources,
2277 devcontainer: &DevContainerManifest,
2278) -> Result<(String, DockerComposeService), DevContainerError> {
2279 let Some(service_name) = &devcontainer.dev_container().service else {
2280 return Err(DevContainerError::DevContainerParseFailed);
2281 };
2282
2283 match docker_compose.config.services.get(service_name) {
2284 Some(service) => Ok((service_name.clone(), service.clone())),
2285 None => Err(DevContainerError::DevContainerParseFailed),
2286 }
2287}
2288
2289/// Resolves a compose service's dockerfile path according to the Docker Compose spec:
2290/// `dockerfile` is relative to the build `context`, and `context` is relative to
2291/// the compose file's directory.
2292fn resolve_compose_dockerfile(
2293 compose_file: &Path,
2294 context: Option<&str>,
2295 dockerfile: &str,
2296) -> Option<PathBuf> {
2297 let dockerfile = PathBuf::from(dockerfile);
2298 if dockerfile.is_absolute() {
2299 return Some(dockerfile);
2300 }
2301 let compose_dir = compose_file.parent()?;
2302 let context_dir = match context {
2303 Some(ctx) => {
2304 let ctx = PathBuf::from(ctx);
2305 if ctx.is_absolute() {
2306 ctx
2307 } else {
2308 normalize_path(&compose_dir.join(ctx))
2309 }
2310 }
2311 None => compose_dir.to_path_buf(),
2312 };
2313 Some(context_dir.join(dockerfile))
2314}
2315
2316/// Destination folder inside the container where feature content is staged during build.
2317/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2318const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2319
2320/// Escapes regex special characters in a string.
2321fn escape_regex_chars(input: &str) -> String {
2322 let mut result = String::with_capacity(input.len() * 2);
2323 for c in input.chars() {
2324 if ".*+?^${}()|[]\\".contains(c) {
2325 result.push('\\');
2326 }
2327 result.push(c);
2328 }
2329 result
2330}
2331
2332/// Extracts the short feature ID from a full feature reference string.
2333///
2334/// Examples:
2335/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2336/// - `ghcr.io/user/repo/go` → `go`
2337/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2338/// - `./myFeature` → `myFeature`
2339fn extract_feature_id(feature_ref: &str) -> &str {
2340 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2341 &feature_ref[..at_idx]
2342 } else {
2343 let last_slash = feature_ref.rfind('/');
2344 let last_colon = feature_ref.rfind(':');
2345 match (last_slash, last_colon) {
2346 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2347 _ => feature_ref,
2348 }
2349 };
2350 match without_version.rfind('/') {
2351 Some(idx) => &without_version[idx + 1..],
2352 None => without_version,
2353 }
2354}
2355
2356/// Generates a shell command that looks up a user's passwd entry.
2357///
2358/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2359/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2360fn get_ent_passwd_shell_command(user: &str) -> String {
2361 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2362 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2363 format!(
2364 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2365 shell = escaped_for_shell,
2366 re = escaped_for_regex,
2367 )
2368}
2369
2370/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2371///
2372/// Features listed in the override come first (in the specified order), followed
2373/// by any remaining features sorted lexicographically by their full reference ID.
2374fn resolve_feature_order<'a>(
2375 features: &'a HashMap<String, FeatureOptions>,
2376 override_order: &Option<Vec<String>>,
2377) -> Vec<(&'a String, &'a FeatureOptions)> {
2378 if let Some(order) = override_order {
2379 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2380 for ordered_id in order {
2381 if let Some((key, options)) = features.get_key_value(ordered_id) {
2382 ordered.push((key, options));
2383 }
2384 }
2385 let mut remaining: Vec<_> = features
2386 .iter()
2387 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2388 .collect();
2389 remaining.sort_by_key(|(id, _)| id.as_str());
2390 ordered.extend(remaining);
2391 ordered
2392 } else {
2393 let mut entries: Vec<_> = features.iter().collect();
2394 entries.sort_by_key(|(id, _)| id.as_str());
2395 entries
2396 }
2397}
2398
2399/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2400///
2401/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2402/// `containerFeaturesConfiguration.ts`.
2403fn generate_install_wrapper(
2404 feature_ref: &str,
2405 feature_id: &str,
2406 env_variables: &str,
2407) -> Result<String, DevContainerError> {
2408 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2409 log::error!("Error escaping feature ref {feature_ref}: {e}");
2410 DevContainerError::DevContainerParseFailed
2411 })?;
2412 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2413 log::error!("Error escaping feature {feature_id}: {e}");
2414 DevContainerError::DevContainerParseFailed
2415 })?;
2416 let options_indented: String = env_variables
2417 .lines()
2418 .filter(|l| !l.is_empty())
2419 .map(|l| format!(" {}", l))
2420 .collect::<Vec<_>>()
2421 .join("\n");
2422 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2423 log::error!("Error escaping options {options_indented}: {e}");
2424 DevContainerError::DevContainerParseFailed
2425 })?;
2426
2427 let script = format!(
2428 r#"#!/bin/sh
2429set -e
2430
2431on_exit () {{
2432 [ $? -eq 0 ] && exit
2433 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2434}}
2435
2436trap on_exit EXIT
2437
2438echo ===========================================================================
2439echo 'Feature : {escaped_name}'
2440echo 'Id : {escaped_id}'
2441echo 'Options :'
2442echo {escaped_options}
2443echo ===========================================================================
2444
2445set -a
2446. ../devcontainer-features.builtin.env
2447. ./devcontainer-features.env
2448set +a
2449
2450chmod +x ./install.sh
2451./install.sh
2452"#
2453 );
2454
2455 Ok(script)
2456}
2457
2458fn dockerfile_inject_alias(
2459 dockerfile_content: &str,
2460 alias: &str,
2461 build_target: Option<String>,
2462) -> String {
2463 let from_lines: Vec<(usize, &str)> = dockerfile_content
2464 .lines()
2465 .enumerate()
2466 .filter(|(_, line)| line.starts_with("FROM"))
2467 .collect();
2468
2469 let target_entry = match &build_target {
2470 Some(target) => from_lines.iter().rfind(|(_, line)| {
2471 let parts: Vec<&str> = line.split_whitespace().collect();
2472 parts.len() >= 3
2473 && parts
2474 .get(parts.len() - 2)
2475 .map_or(false, |p| p.eq_ignore_ascii_case("as"))
2476 && parts
2477 .last()
2478 .map_or(false, |p| p.eq_ignore_ascii_case(target))
2479 }),
2480 None => from_lines.last(),
2481 };
2482
2483 let Some(&(line_idx, from_line)) = target_entry else {
2484 return dockerfile_content.to_string();
2485 };
2486
2487 let parts: Vec<&str> = from_line.split_whitespace().collect();
2488 let has_alias = parts.len() >= 3
2489 && parts
2490 .get(parts.len() - 2)
2491 .map_or(false, |p| p.eq_ignore_ascii_case("as"));
2492
2493 if has_alias {
2494 let Some(existing_alias) = parts.last() else {
2495 return dockerfile_content.to_string();
2496 };
2497 format!("{dockerfile_content}\nFROM {existing_alias} AS {alias}")
2498 } else {
2499 let lines: Vec<&str> = dockerfile_content.lines().collect();
2500 let mut result = String::new();
2501 for (i, line) in lines.iter().enumerate() {
2502 if i > 0 {
2503 result.push('\n');
2504 }
2505 if i == line_idx {
2506 result.push_str(&format!("{line} AS {alias}"));
2507 } else {
2508 result.push_str(line);
2509 }
2510 }
2511 if dockerfile_content.ends_with('\n') {
2512 result.push('\n');
2513 }
2514 result
2515 }
2516}
2517
2518fn image_from_dockerfile(dockerfile_contents: String, target: &Option<String>) -> Option<String> {
2519 dockerfile_contents
2520 .lines()
2521 .filter(|line| line.starts_with("FROM"))
2522 .rfind(|from_line| match &target {
2523 Some(target) => {
2524 let parts = from_line.split(' ').collect::<Vec<&str>>();
2525 if parts.len() >= 3
2526 && parts.get(parts.len() - 2).unwrap_or(&"").to_lowercase() == "as"
2527 {
2528 parts.last().unwrap_or(&"").to_lowercase() == target.to_lowercase()
2529 } else {
2530 false
2531 }
2532 }
2533 None => true,
2534 })
2535 .and_then(|from_line| {
2536 from_line
2537 .split(' ')
2538 .collect::<Vec<&str>>()
2539 .get(1)
2540 .map(|s| s.to_string())
2541 })
2542}
2543
2544fn get_remote_user_from_config(
2545 docker_config: &DockerInspect,
2546 devcontainer: &DevContainerManifest,
2547) -> Result<String, DevContainerError> {
2548 if let DevContainer {
2549 remote_user: Some(user),
2550 ..
2551 } = &devcontainer.dev_container()
2552 {
2553 return Ok(user.clone());
2554 }
2555 if let Some(metadata) = &docker_config.config.labels.metadata {
2556 for metadatum in metadata {
2557 if let Some(remote_user) = metadatum.get("remoteUser") {
2558 if let Some(remote_user_str) = remote_user.as_str() {
2559 return Ok(remote_user_str.to_string());
2560 }
2561 }
2562 }
2563 }
2564 if let Some(image_user) = &docker_config.config.image_user {
2565 if !image_user.is_empty() {
2566 return Ok(image_user.to_string());
2567 }
2568 }
2569 Ok("root".to_string())
2570}
2571
2572// This should come from spec - see the docs
2573fn get_container_user_from_config(
2574 docker_config: &DockerInspect,
2575 devcontainer: &DevContainerManifest,
2576) -> Result<String, DevContainerError> {
2577 if let Some(user) = &devcontainer.dev_container().container_user {
2578 return Ok(user.to_string());
2579 }
2580 if let Some(metadata) = &docker_config.config.labels.metadata {
2581 for metadatum in metadata {
2582 if let Some(container_user) = metadatum.get("containerUser") {
2583 if let Some(container_user_str) = container_user.as_str() {
2584 return Ok(container_user_str.to_string());
2585 }
2586 }
2587 }
2588 }
2589 if let Some(image_user) = &docker_config.config.image_user {
2590 return Ok(image_user.to_string());
2591 }
2592
2593 Ok("root".to_string())
2594}
2595
2596#[cfg(test)]
2597mod test {
2598 use std::{
2599 collections::HashMap,
2600 ffi::OsStr,
2601 path::{Path, PathBuf},
2602 process::{ExitStatus, Output},
2603 sync::{Arc, Mutex},
2604 };
2605
2606 use async_trait::async_trait;
2607 use fs::{FakeFs, Fs};
2608 use gpui::{AppContext, TestAppContext};
2609 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2610 use project::{
2611 ProjectEnvironment,
2612 worktree_store::{WorktreeIdCounter, WorktreeStore},
2613 };
2614 use serde_json_lenient::Value;
2615 use util::{command::Command, paths::SanitizedPath};
2616
2617 #[cfg(not(target_os = "windows"))]
2618 use crate::docker::DockerComposeServicePort;
2619 use crate::{
2620 DevContainerConfig, DevContainerContext,
2621 command_json::CommandRunner,
2622 devcontainer_api::DevContainerError,
2623 devcontainer_json::MountDefinition,
2624 devcontainer_manifest::{
2625 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2626 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2627 image_from_dockerfile, resolve_compose_dockerfile,
2628 },
2629 docker::{
2630 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2631 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2632 DockerPs,
2633 },
2634 oci::TokenResponse,
2635 };
2636 #[cfg(not(target_os = "windows"))]
2637 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2638 #[cfg(target_os = "windows")]
2639 const TEST_PROJECT_PATH: &str = r#"C:\\path\to\local\project"#;
2640
2641 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2642 let buffer = futures::io::Cursor::new(Vec::new());
2643 let mut builder = async_tar::Builder::new(buffer);
2644 for (file_name, content) in content {
2645 if content.is_empty() {
2646 let mut header = async_tar::Header::new_gnu();
2647 header.set_size(0);
2648 header.set_mode(0o755);
2649 header.set_entry_type(async_tar::EntryType::Directory);
2650 header.set_cksum();
2651 builder
2652 .append_data(&mut header, file_name, &[] as &[u8])
2653 .await
2654 .unwrap();
2655 } else {
2656 let data = content.as_bytes();
2657 let mut header = async_tar::Header::new_gnu();
2658 header.set_size(data.len() as u64);
2659 header.set_mode(0o755);
2660 header.set_entry_type(async_tar::EntryType::Regular);
2661 header.set_cksum();
2662 builder
2663 .append_data(&mut header, file_name, data)
2664 .await
2665 .unwrap();
2666 }
2667 }
2668 let buffer = builder.into_inner().await.unwrap();
2669 buffer.into_inner()
2670 }
2671
2672 fn test_project_filename() -> String {
2673 PathBuf::from(TEST_PROJECT_PATH)
2674 .file_name()
2675 .expect("is valid")
2676 .display()
2677 .to_string()
2678 }
2679
2680 async fn init_devcontainer_config(
2681 fs: &Arc<FakeFs>,
2682 devcontainer_contents: &str,
2683 ) -> DevContainerConfig {
2684 fs.insert_tree(
2685 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2686 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2687 )
2688 .await;
2689
2690 DevContainerConfig::default_config()
2691 }
2692
2693 struct TestDependencies {
2694 fs: Arc<FakeFs>,
2695 _http_client: Arc<dyn HttpClient>,
2696 docker: Arc<FakeDocker>,
2697 command_runner: Arc<TestCommandRunner>,
2698 }
2699
2700 async fn init_default_devcontainer_manifest(
2701 cx: &mut TestAppContext,
2702 devcontainer_contents: &str,
2703 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2704 let fs = FakeFs::new(cx.executor());
2705 let http_client = fake_http_client();
2706 let command_runner = Arc::new(TestCommandRunner::new());
2707 let docker = Arc::new(FakeDocker::new());
2708 let environment = HashMap::new();
2709
2710 init_devcontainer_manifest(
2711 cx,
2712 fs,
2713 http_client,
2714 docker,
2715 command_runner,
2716 environment,
2717 devcontainer_contents,
2718 )
2719 .await
2720 }
2721
2722 async fn init_devcontainer_manifest(
2723 cx: &mut TestAppContext,
2724 fs: Arc<FakeFs>,
2725 http_client: Arc<dyn HttpClient>,
2726 docker_client: Arc<FakeDocker>,
2727 command_runner: Arc<TestCommandRunner>,
2728 environment: HashMap<String, String>,
2729 devcontainer_contents: &str,
2730 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2731 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2732 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2733 let worktree_store =
2734 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2735 let project_environment =
2736 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2737
2738 let context = DevContainerContext {
2739 project_directory: SanitizedPath::cast_arc(project_path),
2740 use_podman: false,
2741 fs: fs.clone(),
2742 http_client: http_client.clone(),
2743 environment: project_environment.downgrade(),
2744 };
2745
2746 let test_dependencies = TestDependencies {
2747 fs: fs.clone(),
2748 _http_client: http_client.clone(),
2749 docker: docker_client.clone(),
2750 command_runner: command_runner.clone(),
2751 };
2752 let manifest = DevContainerManifest::new(
2753 &context,
2754 environment,
2755 docker_client,
2756 command_runner,
2757 local_config,
2758 &PathBuf::from(TEST_PROJECT_PATH),
2759 )
2760 .await?;
2761
2762 Ok((test_dependencies, manifest))
2763 }
2764
2765 #[gpui::test]
2766 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2767 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2768 cx,
2769 r#"
2770// These are some external comments. serde_lenient should handle them
2771{
2772 // These are some internal comments
2773 "image": "image",
2774 "remoteUser": "root",
2775}
2776 "#,
2777 )
2778 .await
2779 .unwrap();
2780
2781 let mut metadata = HashMap::new();
2782 metadata.insert(
2783 "remoteUser".to_string(),
2784 serde_json_lenient::Value::String("vsCode".to_string()),
2785 );
2786 let given_docker_config = DockerInspect {
2787 id: "docker_id".to_string(),
2788 config: DockerInspectConfig {
2789 labels: DockerConfigLabels {
2790 metadata: Some(vec![metadata]),
2791 },
2792 image_user: None,
2793 env: Vec::new(),
2794 },
2795 mounts: None,
2796 state: None,
2797 };
2798
2799 let remote_user =
2800 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2801
2802 assert_eq!(remote_user, "root".to_string())
2803 }
2804
2805 #[gpui::test]
2806 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2807 let (_, devcontainer_manifest) =
2808 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2809 let mut metadata = HashMap::new();
2810 metadata.insert(
2811 "remoteUser".to_string(),
2812 serde_json_lenient::Value::String("vsCode".to_string()),
2813 );
2814 let given_docker_config = DockerInspect {
2815 id: "docker_id".to_string(),
2816 config: DockerInspectConfig {
2817 labels: DockerConfigLabels {
2818 metadata: Some(vec![metadata]),
2819 },
2820 image_user: None,
2821 env: Vec::new(),
2822 },
2823 mounts: None,
2824 state: None,
2825 };
2826
2827 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2828
2829 assert!(remote_user.is_ok());
2830 let remote_user = remote_user.expect("ok");
2831 assert_eq!(&remote_user, "vsCode")
2832 }
2833
2834 #[test]
2835 fn should_extract_feature_id_from_references() {
2836 assert_eq!(
2837 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2838 "aws-cli"
2839 );
2840 assert_eq!(
2841 extract_feature_id("ghcr.io/devcontainers/features/go"),
2842 "go"
2843 );
2844 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2845 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2846 assert_eq!(
2847 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2848 "rust"
2849 );
2850 }
2851
2852 #[gpui::test]
2853 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2854 let mut metadata = HashMap::new();
2855 metadata.insert(
2856 "remoteUser".to_string(),
2857 serde_json_lenient::Value::String("vsCode".to_string()),
2858 );
2859
2860 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2861 cx,
2862 r#"{
2863 "name": "TODO"
2864 }"#,
2865 )
2866 .await
2867 .unwrap();
2868 let build_resources = DockerBuildResources {
2869 image: DockerInspect {
2870 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2871 config: DockerInspectConfig {
2872 labels: DockerConfigLabels { metadata: None },
2873 image_user: None,
2874 env: Vec::new(),
2875 },
2876 mounts: None,
2877 state: None,
2878 },
2879 additional_mounts: vec![],
2880 privileged: false,
2881 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2882 };
2883 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2884
2885 assert!(docker_run_command.is_ok());
2886 let docker_run_command = docker_run_command.expect("ok");
2887
2888 assert_eq!(docker_run_command.get_program(), "docker");
2889 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2890 .join(".devcontainer")
2891 .join("devcontainer.json");
2892 let expected_config_file_label = expected_config_file_label.display();
2893 assert_eq!(
2894 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2895 vec![
2896 OsStr::new("run"),
2897 OsStr::new("--sig-proxy=false"),
2898 OsStr::new("-d"),
2899 OsStr::new("--mount"),
2900 OsStr::new(&format!(
2901 "type=bind,source={TEST_PROJECT_PATH},target=/workspaces/project,consistency=cached"
2902 )),
2903 OsStr::new("-l"),
2904 OsStr::new(&format!("devcontainer.local_folder={TEST_PROJECT_PATH}")),
2905 OsStr::new("-l"),
2906 OsStr::new(&format!(
2907 "devcontainer.config_file={expected_config_file_label}"
2908 )),
2909 OsStr::new("--entrypoint"),
2910 OsStr::new("/bin/sh"),
2911 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2912 OsStr::new("-c"),
2913 OsStr::new(
2914 "
2915 echo Container started
2916 trap \"exit 0\" 15
2917 exec \"$@\"
2918 while sleep 1 & wait $!; do :; done
2919 "
2920 .trim()
2921 ),
2922 OsStr::new("-"),
2923 ]
2924 )
2925 }
2926
2927 #[gpui::test]
2928 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2929 // State where service not defined in dev container
2930 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2931 let given_docker_compose_config = DockerComposeResources {
2932 config: DockerComposeConfig {
2933 name: Some("devcontainers".to_string()),
2934 services: HashMap::new(),
2935 ..Default::default()
2936 },
2937 ..Default::default()
2938 };
2939
2940 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2941
2942 assert!(bad_result.is_err());
2943
2944 // State where service defined in devcontainer, not found in DockerCompose config
2945 let (_, given_dev_container) =
2946 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2947 .await
2948 .unwrap();
2949 let given_docker_compose_config = DockerComposeResources {
2950 config: DockerComposeConfig {
2951 name: Some("devcontainers".to_string()),
2952 services: HashMap::new(),
2953 ..Default::default()
2954 },
2955 ..Default::default()
2956 };
2957
2958 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2959
2960 assert!(bad_result.is_err());
2961 // State where service defined in devcontainer and in DockerCompose config
2962
2963 let (_, given_dev_container) =
2964 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2965 .await
2966 .unwrap();
2967 let given_docker_compose_config = DockerComposeResources {
2968 config: DockerComposeConfig {
2969 name: Some("devcontainers".to_string()),
2970 services: HashMap::from([(
2971 "found_service".to_string(),
2972 DockerComposeService {
2973 ..Default::default()
2974 },
2975 )]),
2976 ..Default::default()
2977 },
2978 ..Default::default()
2979 };
2980
2981 let (service_name, _) =
2982 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2983
2984 assert_eq!(service_name, "found_service".to_string());
2985 }
2986
2987 #[gpui::test]
2988 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2989 let fs = FakeFs::new(cx.executor());
2990 let given_devcontainer_contents = r#"
2991// These are some external comments. serde_lenient should handle them
2992{
2993 // These are some internal comments
2994 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2995 "name": "myDevContainer-${devcontainerId}",
2996 "remoteUser": "root",
2997 "remoteEnv": {
2998 "DEVCONTAINER_ID": "${devcontainerId}",
2999 "MYVAR2": "myvarothervalue",
3000 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3001 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3002 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3003 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
3004 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
3005 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}",
3006 "LOCAL_ENV_VAR_3": "before-${localEnv:missing_local_env}-after",
3007 "LOCAL_ENV_VAR_4": "${localEnv:with_defaults:default}"
3008
3009 }
3010}
3011 "#;
3012 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
3013 cx,
3014 fs,
3015 fake_http_client(),
3016 Arc::new(FakeDocker::new()),
3017 Arc::new(TestCommandRunner::new()),
3018 HashMap::from([
3019 ("local_env_1".to_string(), "local_env_value1".to_string()),
3020 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
3021 ]),
3022 given_devcontainer_contents,
3023 )
3024 .await
3025 .unwrap();
3026
3027 devcontainer_manifest.parse_nonremote_vars().unwrap();
3028
3029 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3030 &devcontainer_manifest.config
3031 else {
3032 panic!("Config not parsed");
3033 };
3034
3035 // ${devcontainerId}
3036 let devcontainer_id = devcontainer_manifest.devcontainer_id();
3037 assert_eq!(
3038 variable_replaced_devcontainer.name,
3039 Some(format!("myDevContainer-{devcontainer_id}"))
3040 );
3041 assert_eq!(
3042 variable_replaced_devcontainer
3043 .remote_env
3044 .as_ref()
3045 .and_then(|env| env.get("DEVCONTAINER_ID")),
3046 Some(&devcontainer_id)
3047 );
3048
3049 // ${containerWorkspaceFolderBasename}
3050 assert_eq!(
3051 variable_replaced_devcontainer
3052 .remote_env
3053 .as_ref()
3054 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3055 Some(&test_project_filename())
3056 );
3057
3058 // ${localWorkspaceFolderBasename}
3059 assert_eq!(
3060 variable_replaced_devcontainer
3061 .remote_env
3062 .as_ref()
3063 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3064 Some(&test_project_filename())
3065 );
3066
3067 // ${containerWorkspaceFolder}
3068 assert_eq!(
3069 variable_replaced_devcontainer
3070 .remote_env
3071 .as_ref()
3072 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3073 Some(&format!("/workspaces/{}", test_project_filename()))
3074 );
3075
3076 // ${localWorkspaceFolder}
3077 assert_eq!(
3078 variable_replaced_devcontainer
3079 .remote_env
3080 .as_ref()
3081 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3082 // We replace backslashes with forward slashes during variable replacement for JSON safety
3083 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3084 );
3085
3086 // ${localEnv:VARIABLE_NAME}
3087 assert_eq!(
3088 variable_replaced_devcontainer
3089 .remote_env
3090 .as_ref()
3091 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
3092 Some(&"local_env_value1".to_string())
3093 );
3094 assert_eq!(
3095 variable_replaced_devcontainer
3096 .remote_env
3097 .as_ref()
3098 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
3099 Some(&"THISVALUEHERE".to_string())
3100 );
3101 assert_eq!(
3102 variable_replaced_devcontainer
3103 .remote_env
3104 .as_ref()
3105 .and_then(|env| env.get("LOCAL_ENV_VAR_3")),
3106 Some(&"before--after".to_string())
3107 );
3108 assert_eq!(
3109 variable_replaced_devcontainer
3110 .remote_env
3111 .as_ref()
3112 .and_then(|env| env.get("LOCAL_ENV_VAR_4")),
3113 Some(&"default".to_string())
3114 );
3115 }
3116
3117 #[test]
3118 fn test_replace_environment_variables() {
3119 let replaced = DevContainerManifest::replace_environment_variables(
3120 "before ${containerEnv:FOUND} middle ${containerEnv:MISSING:default-value} after${containerEnv:MISSING2}",
3121 "containerEnv",
3122 &HashMap::from([("FOUND".to_string(), "value".to_string())]),
3123 );
3124
3125 assert_eq!(replaced, "before value middle default-value after");
3126 }
3127
3128 #[test]
3129 fn test_replace_environment_variables_supports_defaults_with_colons() {
3130 let replaced = DevContainerManifest::replace_environment_variables(
3131 "before ${containerEnv:MISSING:one:two} after",
3132 "containerEnv",
3133 &HashMap::new(),
3134 );
3135
3136 assert_eq!(replaced, "before one:two after");
3137 }
3138
3139 #[gpui::test]
3140 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
3141 let given_devcontainer_contents = r#"
3142 // These are some external comments. serde_lenient should handle them
3143 {
3144 // These are some internal comments
3145 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
3146 "name": "myDevContainer-${devcontainerId}",
3147 "remoteUser": "root",
3148 "remoteEnv": {
3149 "DEVCONTAINER_ID": "${devcontainerId}",
3150 "MYVAR2": "myvarothervalue",
3151 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
3152 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
3153 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
3154 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
3155
3156 },
3157 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
3158 "workspaceFolder": "/workspace/customfolder"
3159 }
3160 "#;
3161
3162 let (_, mut devcontainer_manifest) =
3163 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3164 .await
3165 .unwrap();
3166
3167 devcontainer_manifest.parse_nonremote_vars().unwrap();
3168
3169 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
3170 &devcontainer_manifest.config
3171 else {
3172 panic!("Config not parsed");
3173 };
3174
3175 // ${devcontainerId}
3176 let devcontainer_id = devcontainer_manifest.devcontainer_id();
3177 assert_eq!(
3178 variable_replaced_devcontainer.name,
3179 Some(format!("myDevContainer-{devcontainer_id}"))
3180 );
3181 assert_eq!(
3182 variable_replaced_devcontainer
3183 .remote_env
3184 .as_ref()
3185 .and_then(|env| env.get("DEVCONTAINER_ID")),
3186 Some(&devcontainer_id)
3187 );
3188
3189 // ${containerWorkspaceFolderBasename}
3190 assert_eq!(
3191 variable_replaced_devcontainer
3192 .remote_env
3193 .as_ref()
3194 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
3195 Some(&"customfolder".to_string())
3196 );
3197
3198 // ${localWorkspaceFolderBasename}
3199 assert_eq!(
3200 variable_replaced_devcontainer
3201 .remote_env
3202 .as_ref()
3203 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
3204 Some(&"project".to_string())
3205 );
3206
3207 // ${containerWorkspaceFolder}
3208 assert_eq!(
3209 variable_replaced_devcontainer
3210 .remote_env
3211 .as_ref()
3212 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
3213 Some(&"/workspace/customfolder".to_string())
3214 );
3215
3216 // ${localWorkspaceFolder}
3217 assert_eq!(
3218 variable_replaced_devcontainer
3219 .remote_env
3220 .as_ref()
3221 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
3222 // We replace backslashes with forward slashes during variable replacement for JSON safety
3223 Some(&TEST_PROJECT_PATH.replace("\\", "/"))
3224 );
3225 }
3226
3227 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3228 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
3229 #[cfg(not(target_os = "windows"))]
3230 #[gpui::test]
3231 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
3232 cx.executor().allow_parking();
3233 env_logger::try_init().ok();
3234 let given_devcontainer_contents = r#"
3235 /*---------------------------------------------------------------------------------------------
3236 * Copyright (c) Microsoft Corporation. All rights reserved.
3237 * Licensed under the MIT License. See License.txt in the project root for license information.
3238 *--------------------------------------------------------------------------------------------*/
3239 {
3240 "name": "cli-${devcontainerId}",
3241 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3242 "build": {
3243 "dockerfile": "Dockerfile",
3244 "args": {
3245 "VARIANT": "18-bookworm",
3246 "FOO": "bar",
3247 },
3248 },
3249 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3250 "workspaceFolder": "/workspace2",
3251 "mounts": [
3252 // Keep command history across instances
3253 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3254 ],
3255
3256 "runArgs": [
3257 "--cap-add=SYS_PTRACE",
3258 "--sig-proxy=true",
3259 ],
3260
3261 "forwardPorts": [
3262 8082,
3263 8083,
3264 ],
3265 "appPort": [
3266 8084,
3267 "8085:8086",
3268 ],
3269
3270 "containerEnv": {
3271 "VARIABLE_VALUE": "value",
3272 },
3273
3274 "initializeCommand": "touch IAM.md",
3275
3276 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
3277
3278 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
3279
3280 "postCreateCommand": {
3281 "yarn": "yarn install",
3282 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3283 },
3284
3285 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
3286
3287 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
3288
3289 "remoteUser": "node",
3290
3291 "remoteEnv": {
3292 "PATH": "${containerEnv:PATH}:/some/other/path",
3293 "OTHER_ENV": "other_env_value"
3294 },
3295
3296 "features": {
3297 "ghcr.io/devcontainers/features/docker-in-docker:2": {
3298 "moby": false,
3299 },
3300 "ghcr.io/devcontainers/features/go:1": {},
3301 },
3302
3303 "customizations": {
3304 "vscode": {
3305 "extensions": [
3306 "dbaeumer.vscode-eslint",
3307 "GitHub.vscode-pull-request-github",
3308 ],
3309 },
3310 "zed": {
3311 "extensions": ["vue", "ruby"],
3312 },
3313 "codespaces": {
3314 "repositories": {
3315 "devcontainers/features": {
3316 "permissions": {
3317 "contents": "write",
3318 "workflows": "write",
3319 },
3320 },
3321 },
3322 },
3323 },
3324 }
3325 "#;
3326
3327 let (test_dependencies, mut devcontainer_manifest) =
3328 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3329 .await
3330 .unwrap();
3331
3332 test_dependencies
3333 .fs
3334 .atomic_write(
3335 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3336 r#"
3337# Copyright (c) Microsoft Corporation. All rights reserved.
3338# Licensed under the MIT License. See License.txt in the project root for license information.
3339ARG VARIANT="16-bullseye"
3340FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3341
3342RUN mkdir -p /workspaces && chown node:node /workspaces
3343
3344ARG USERNAME=node
3345USER $USERNAME
3346
3347# Save command line history
3348RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3349&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3350&& mkdir -p /home/$USERNAME/commandhistory \
3351&& touch /home/$USERNAME/commandhistory/.bash_history \
3352&& chown -R $USERNAME /home/$USERNAME/commandhistory
3353 "#.trim().to_string(),
3354 )
3355 .await
3356 .unwrap();
3357
3358 devcontainer_manifest.parse_nonremote_vars().unwrap();
3359
3360 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3361
3362 assert_eq!(
3363 devcontainer_up.extension_ids,
3364 vec!["vue".to_string(), "ruby".to_string()]
3365 );
3366
3367 let files = test_dependencies.fs.files();
3368 let feature_dockerfile = files
3369 .iter()
3370 .find(|f| {
3371 f.file_name()
3372 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3373 })
3374 .expect("to be found");
3375 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3376 assert_eq!(
3377 &feature_dockerfile,
3378 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3379
3380# Copyright (c) Microsoft Corporation. All rights reserved.
3381# Licensed under the MIT License. See License.txt in the project root for license information.
3382ARG VARIANT="16-bullseye"
3383FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3384
3385RUN mkdir -p /workspaces && chown node:node /workspaces
3386
3387ARG USERNAME=node
3388USER $USERNAME
3389
3390# Save command line history
3391RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3392&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3393&& mkdir -p /home/$USERNAME/commandhistory \
3394&& touch /home/$USERNAME/commandhistory/.bash_history \
3395&& chown -R $USERNAME /home/$USERNAME/commandhistory
3396
3397FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3398USER root
3399COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3400RUN chmod -R 0755 /tmp/build-features/
3401
3402FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3403
3404USER root
3405
3406RUN mkdir -p /tmp/dev-container-features
3407COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3408
3409RUN \
3410echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3411echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3412
3413
3414RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3415cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3416&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3417&& cd /tmp/dev-container-features/docker-in-docker_0 \
3418&& chmod +x ./devcontainer-features-install.sh \
3419&& ./devcontainer-features-install.sh \
3420&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3421
3422RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3423cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3424&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3425&& cd /tmp/dev-container-features/go_1 \
3426&& chmod +x ./devcontainer-features-install.sh \
3427&& ./devcontainer-features-install.sh \
3428&& rm -rf /tmp/dev-container-features/go_1
3429
3430
3431ARG _DEV_CONTAINERS_IMAGE_USER=root
3432USER $_DEV_CONTAINERS_IMAGE_USER
3433"#
3434 );
3435
3436 let uid_dockerfile = files
3437 .iter()
3438 .find(|f| {
3439 f.file_name()
3440 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3441 })
3442 .expect("to be found");
3443 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3444
3445 assert_eq!(
3446 &uid_dockerfile,
3447 r#"ARG BASE_IMAGE
3448FROM $BASE_IMAGE
3449
3450USER root
3451
3452ARG REMOTE_USER
3453ARG NEW_UID
3454ARG NEW_GID
3455SHELL ["/bin/sh", "-c"]
3456RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3457 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3458 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3459 if [ -z "$OLD_UID" ]; then \
3460 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3461 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3462 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3463 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3464 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3465 else \
3466 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3467 FREE_GID=65532; \
3468 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3469 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3470 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3471 fi; \
3472 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3473 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3474 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3475 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3476 fi; \
3477 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3478 fi;
3479
3480ARG IMAGE_USER
3481USER $IMAGE_USER
3482
3483# Ensure that /etc/profile does not clobber the existing path
3484RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3485
3486ENV DOCKER_BUILDKIT=1
3487
3488ENV GOPATH=/go
3489ENV GOROOT=/usr/local/go
3490ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3491ENV VARIABLE_VALUE=value
3492"#
3493 );
3494
3495 let golang_install_wrapper = files
3496 .iter()
3497 .find(|f| {
3498 f.file_name()
3499 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3500 && f.to_str().is_some_and(|s| s.contains("/go_"))
3501 })
3502 .expect("to be found");
3503 let golang_install_wrapper = test_dependencies
3504 .fs
3505 .load(golang_install_wrapper)
3506 .await
3507 .unwrap();
3508 assert_eq!(
3509 &golang_install_wrapper,
3510 r#"#!/bin/sh
3511set -e
3512
3513on_exit () {
3514 [ $? -eq 0 ] && exit
3515 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3516}
3517
3518trap on_exit EXIT
3519
3520echo ===========================================================================
3521echo 'Feature : go'
3522echo 'Id : ghcr.io/devcontainers/features/go:1'
3523echo 'Options :'
3524echo ' GOLANGCILINTVERSION=latest
3525 VERSION=latest'
3526echo ===========================================================================
3527
3528set -a
3529. ../devcontainer-features.builtin.env
3530. ./devcontainer-features.env
3531set +a
3532
3533chmod +x ./install.sh
3534./install.sh
3535"#
3536 );
3537
3538 let docker_commands = test_dependencies
3539 .command_runner
3540 .commands_by_program("docker");
3541
3542 let docker_run_command = docker_commands
3543 .iter()
3544 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3545 .expect("found");
3546
3547 assert_eq!(
3548 docker_run_command.args,
3549 vec![
3550 "run".to_string(),
3551 "--privileged".to_string(),
3552 "--cap-add=SYS_PTRACE".to_string(),
3553 "--sig-proxy=true".to_string(),
3554 "-d".to_string(),
3555 "--mount".to_string(),
3556 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3557 "--mount".to_string(),
3558 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3559 "--mount".to_string(),
3560 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3561 "-l".to_string(),
3562 "devcontainer.local_folder=/path/to/local/project".to_string(),
3563 "-l".to_string(),
3564 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3565 "-l".to_string(),
3566 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3567 "-p".to_string(),
3568 "8082:8082".to_string(),
3569 "-p".to_string(),
3570 "8083:8083".to_string(),
3571 "-p".to_string(),
3572 "8084:8084".to_string(),
3573 "-p".to_string(),
3574 "8085:8086".to_string(),
3575 "--entrypoint".to_string(),
3576 "/bin/sh".to_string(),
3577 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3578 "-c".to_string(),
3579 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3580 "-".to_string()
3581 ]
3582 );
3583
3584 let docker_exec_commands = test_dependencies
3585 .docker
3586 .exec_commands_recorded
3587 .lock()
3588 .unwrap();
3589
3590 assert!(docker_exec_commands.iter().all(|exec| {
3591 exec.env
3592 == HashMap::from([
3593 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3594 (
3595 "PATH".to_string(),
3596 "/initial/path:/some/other/path".to_string(),
3597 ),
3598 ])
3599 }))
3600 }
3601
3602 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3603 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3604 #[cfg(not(target_os = "windows"))]
3605 #[gpui::test]
3606 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3607 cx.executor().allow_parking();
3608 env_logger::try_init().ok();
3609 let given_devcontainer_contents = r#"
3610 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3611 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3612 {
3613 "features": {
3614 "ghcr.io/devcontainers/features/aws-cli:1": {},
3615 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3616 },
3617 "name": "Rust and PostgreSQL",
3618 "dockerComposeFile": "docker-compose.yml",
3619 "service": "app",
3620 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3621
3622 // Features to add to the dev container. More info: https://containers.dev/features.
3623 // "features": {},
3624
3625 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3626 "forwardPorts": [
3627 8083,
3628 "db:5432",
3629 "db:1234",
3630 ],
3631
3632 // Use 'postCreateCommand' to run commands after the container is created.
3633 // "postCreateCommand": "rustc --version",
3634
3635 // Configure tool-specific properties.
3636 // "customizations": {},
3637
3638 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3639 // "remoteUser": "root"
3640 }
3641 "#;
3642 let (test_dependencies, mut devcontainer_manifest) =
3643 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3644 .await
3645 .unwrap();
3646
3647 test_dependencies
3648 .fs
3649 .atomic_write(
3650 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3651 r#"
3652version: '3.8'
3653
3654volumes:
3655 postgres-data:
3656
3657services:
3658 app:
3659 build:
3660 context: .
3661 dockerfile: Dockerfile
3662 env_file:
3663 # Ensure that the variables in .env match the same variables in devcontainer.json
3664 - .env
3665
3666 volumes:
3667 - ../..:/workspaces:cached
3668
3669 # Overrides default command so things don't shut down after the process ends.
3670 command: sleep infinity
3671
3672 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3673 network_mode: service:db
3674
3675 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3676 # (Adding the "ports" property to this file will not forward from a Codespace.)
3677
3678 db:
3679 image: postgres:14.1
3680 restart: unless-stopped
3681 volumes:
3682 - postgres-data:/var/lib/postgresql/data
3683 env_file:
3684 # Ensure that the variables in .env match the same variables in devcontainer.json
3685 - .env
3686
3687 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3688 # (Adding the "ports" property to this file will not forward from a Codespace.)
3689 "#.trim().to_string(),
3690 )
3691 .await
3692 .unwrap();
3693
3694 test_dependencies.fs.atomic_write(
3695 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3696 r#"
3697FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3698
3699# Include lld linker to improve build times either by using environment variable
3700# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3701RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3702 && apt-get -y install clang lld \
3703 && apt-get autoremove -y && apt-get clean -y
3704 "#.trim().to_string()).await.unwrap();
3705
3706 devcontainer_manifest.parse_nonremote_vars().unwrap();
3707
3708 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3709
3710 let files = test_dependencies.fs.files();
3711 let feature_dockerfile = files
3712 .iter()
3713 .find(|f| {
3714 f.file_name()
3715 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3716 })
3717 .expect("to be found");
3718 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3719 assert_eq!(
3720 &feature_dockerfile,
3721 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3722
3723FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3724
3725# Include lld linker to improve build times either by using environment variable
3726# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3727RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3728 && apt-get -y install clang lld \
3729 && apt-get autoremove -y && apt-get clean -y
3730
3731FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3732USER root
3733COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3734RUN chmod -R 0755 /tmp/build-features/
3735
3736FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3737
3738USER root
3739
3740RUN mkdir -p /tmp/dev-container-features
3741COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3742
3743RUN \
3744echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3745echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3746
3747
3748RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3749cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3750&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3751&& cd /tmp/dev-container-features/aws-cli_0 \
3752&& chmod +x ./devcontainer-features-install.sh \
3753&& ./devcontainer-features-install.sh \
3754&& rm -rf /tmp/dev-container-features/aws-cli_0
3755
3756RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3757cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3758&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3759&& cd /tmp/dev-container-features/docker-in-docker_1 \
3760&& chmod +x ./devcontainer-features-install.sh \
3761&& ./devcontainer-features-install.sh \
3762&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3763
3764
3765ARG _DEV_CONTAINERS_IMAGE_USER=root
3766USER $_DEV_CONTAINERS_IMAGE_USER
3767"#
3768 );
3769
3770 let uid_dockerfile = files
3771 .iter()
3772 .find(|f| {
3773 f.file_name()
3774 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3775 })
3776 .expect("to be found");
3777 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3778
3779 assert_eq!(
3780 &uid_dockerfile,
3781 r#"ARG BASE_IMAGE
3782FROM $BASE_IMAGE
3783
3784USER root
3785
3786ARG REMOTE_USER
3787ARG NEW_UID
3788ARG NEW_GID
3789SHELL ["/bin/sh", "-c"]
3790RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3791 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3792 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3793 if [ -z "$OLD_UID" ]; then \
3794 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3795 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3796 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3797 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3798 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3799 else \
3800 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3801 FREE_GID=65532; \
3802 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3803 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3804 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3805 fi; \
3806 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3807 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3808 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3809 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3810 fi; \
3811 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3812 fi;
3813
3814ARG IMAGE_USER
3815USER $IMAGE_USER
3816
3817# Ensure that /etc/profile does not clobber the existing path
3818RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3819
3820
3821ENV DOCKER_BUILDKIT=1
3822"#
3823 );
3824
3825 let build_override = files
3826 .iter()
3827 .find(|f| {
3828 f.file_name()
3829 .is_some_and(|s| s.display().to_string() == "docker_compose_build.json")
3830 })
3831 .expect("to be found");
3832 let build_override = test_dependencies.fs.load(build_override).await.unwrap();
3833 let build_config: DockerComposeConfig =
3834 serde_json_lenient::from_str(&build_override).unwrap();
3835 let build_context = build_config
3836 .services
3837 .get("app")
3838 .and_then(|s| s.build.as_ref())
3839 .and_then(|b| b.context.clone())
3840 .expect("build override should have a context");
3841 assert_eq!(
3842 build_context, ".",
3843 "build override should preserve the original build context from docker-compose.yml"
3844 );
3845
3846 let runtime_override = files
3847 .iter()
3848 .find(|f| {
3849 f.file_name()
3850 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3851 })
3852 .expect("to be found");
3853 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3854
3855 let expected_runtime_override = DockerComposeConfig {
3856 name: None,
3857 services: HashMap::from([
3858 (
3859 "app".to_string(),
3860 DockerComposeService {
3861 entrypoint: Some(vec![
3862 "/bin/sh".to_string(),
3863 "-c".to_string(),
3864 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3865 "-".to_string(),
3866 ]),
3867 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3868 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3869 privileged: Some(true),
3870 labels: Some(HashMap::from([
3871 ("devcontainer.metadata".to_string(), "[{\"remoteUser\":\"vscode\"}]".to_string()),
3872 ("devcontainer.local_folder".to_string(), "/path/to/local/project".to_string()),
3873 ("devcontainer.config_file".to_string(), "/path/to/local/project/.devcontainer/devcontainer.json".to_string())
3874 ])),
3875 volumes: vec![
3876 MountDefinition {
3877 source: Some("dind-var-lib-docker-42dad4b4ca7b8ced".to_string()),
3878 target: "/var/lib/docker".to_string(),
3879 mount_type: Some("volume".to_string())
3880 }
3881 ],
3882 ..Default::default()
3883 },
3884 ),
3885 (
3886 "db".to_string(),
3887 DockerComposeService {
3888 ports: vec![
3889 DockerComposeServicePort {
3890 target: "8083".to_string(),
3891 published: "8083".to_string(),
3892 ..Default::default()
3893 },
3894 DockerComposeServicePort {
3895 target: "5432".to_string(),
3896 published: "5432".to_string(),
3897 ..Default::default()
3898 },
3899 DockerComposeServicePort {
3900 target: "1234".to_string(),
3901 published: "1234".to_string(),
3902 ..Default::default()
3903 },
3904 ],
3905 ..Default::default()
3906 },
3907 ),
3908 ]),
3909 volumes: HashMap::from([(
3910 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3911 DockerComposeVolume {
3912 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3913 },
3914 )]),
3915 };
3916
3917 assert_eq!(
3918 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3919 expected_runtime_override
3920 )
3921 }
3922
3923 #[test]
3924 fn test_resolve_compose_dockerfile() {
3925 let compose = Path::new("/project/.devcontainer/docker-compose.yml");
3926
3927 // Bug case (#53473): context ".." with relative dockerfile
3928 assert_eq!(
3929 resolve_compose_dockerfile(compose, Some(".."), ".devcontainer/Dockerfile"),
3930 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3931 );
3932
3933 // Compose path containing ".." (as docker_compose_manifest() produces)
3934 assert_eq!(
3935 resolve_compose_dockerfile(
3936 Path::new("/project/.devcontainer/../docker-compose.yml"),
3937 Some("."),
3938 "docker/Dockerfile",
3939 ),
3940 Some(PathBuf::from("/project/docker/Dockerfile")),
3941 );
3942
3943 // Absolute dockerfile returned as-is
3944 assert_eq!(
3945 resolve_compose_dockerfile(compose, Some("."), "/absolute/Dockerfile"),
3946 Some(PathBuf::from("/absolute/Dockerfile")),
3947 );
3948
3949 // Absolute context used directly
3950 assert_eq!(
3951 resolve_compose_dockerfile(compose, Some("/abs/context"), "Dockerfile"),
3952 Some(PathBuf::from("/abs/context/Dockerfile")),
3953 );
3954
3955 // No context defaults to compose file's directory
3956 assert_eq!(
3957 resolve_compose_dockerfile(compose, None, "Dockerfile"),
3958 Some(PathBuf::from("/project/.devcontainer/Dockerfile")),
3959 );
3960 }
3961
3962 #[gpui::test]
3963 async fn test_dockerfile_location_with_compose_context_parent(cx: &mut TestAppContext) {
3964 cx.executor().allow_parking();
3965 env_logger::try_init().ok();
3966
3967 let given_devcontainer_contents = r#"
3968 {
3969 "name": "Test",
3970 "dockerComposeFile": "docker-compose-context-parent.yml",
3971 "service": "app",
3972 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
3973 }
3974 "#;
3975 let (_, mut devcontainer_manifest) =
3976 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3977 .await
3978 .unwrap();
3979
3980 devcontainer_manifest.parse_nonremote_vars().unwrap();
3981
3982 let expected = PathBuf::from(TEST_PROJECT_PATH)
3983 .join(".devcontainer")
3984 .join("Dockerfile");
3985 assert_eq!(
3986 devcontainer_manifest.dockerfile_location().await,
3987 Some(expected)
3988 );
3989 }
3990
3991 #[gpui::test]
3992 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3993 cx: &mut TestAppContext,
3994 ) {
3995 cx.executor().allow_parking();
3996 env_logger::try_init().ok();
3997 let given_devcontainer_contents = r#"
3998 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3999 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4000 {
4001 "features": {
4002 "ghcr.io/devcontainers/features/aws-cli:1": {},
4003 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4004 },
4005 "name": "Rust and PostgreSQL",
4006 "dockerComposeFile": "docker-compose.yml",
4007 "service": "app",
4008 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4009
4010 // Features to add to the dev container. More info: https://containers.dev/features.
4011 // "features": {},
4012
4013 // Use 'forwardPorts' to make a list of ports inside the container available locally.
4014 "forwardPorts": [
4015 8083,
4016 "db:5432",
4017 "db:1234",
4018 ],
4019 "updateRemoteUserUID": false,
4020 "appPort": "8084",
4021
4022 // Use 'postCreateCommand' to run commands after the container is created.
4023 // "postCreateCommand": "rustc --version",
4024
4025 // Configure tool-specific properties.
4026 // "customizations": {},
4027
4028 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4029 // "remoteUser": "root"
4030 }
4031 "#;
4032 let (test_dependencies, mut devcontainer_manifest) =
4033 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4034 .await
4035 .unwrap();
4036
4037 test_dependencies
4038 .fs
4039 .atomic_write(
4040 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4041 r#"
4042version: '3.8'
4043
4044volumes:
4045postgres-data:
4046
4047services:
4048app:
4049 build:
4050 context: .
4051 dockerfile: Dockerfile
4052 env_file:
4053 # Ensure that the variables in .env match the same variables in devcontainer.json
4054 - .env
4055
4056 volumes:
4057 - ../..:/workspaces:cached
4058
4059 # Overrides default command so things don't shut down after the process ends.
4060 command: sleep infinity
4061
4062 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4063 network_mode: service:db
4064
4065 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4066 # (Adding the "ports" property to this file will not forward from a Codespace.)
4067
4068db:
4069 image: postgres:14.1
4070 restart: unless-stopped
4071 volumes:
4072 - postgres-data:/var/lib/postgresql/data
4073 env_file:
4074 # Ensure that the variables in .env match the same variables in devcontainer.json
4075 - .env
4076
4077 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4078 # (Adding the "ports" property to this file will not forward from a Codespace.)
4079 "#.trim().to_string(),
4080 )
4081 .await
4082 .unwrap();
4083
4084 test_dependencies.fs.atomic_write(
4085 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4086 r#"
4087FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4088
4089# Include lld linker to improve build times either by using environment variable
4090# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4091RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4092&& apt-get -y install clang lld \
4093&& apt-get autoremove -y && apt-get clean -y
4094 "#.trim().to_string()).await.unwrap();
4095
4096 devcontainer_manifest.parse_nonremote_vars().unwrap();
4097
4098 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4099
4100 let files = test_dependencies.fs.files();
4101 let feature_dockerfile = files
4102 .iter()
4103 .find(|f| {
4104 f.file_name()
4105 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4106 })
4107 .expect("to be found");
4108 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4109 assert_eq!(
4110 &feature_dockerfile,
4111 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4112
4113FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4114
4115# Include lld linker to improve build times either by using environment variable
4116# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4117RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4118&& apt-get -y install clang lld \
4119&& apt-get autoremove -y && apt-get clean -y
4120
4121FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4122USER root
4123COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4124RUN chmod -R 0755 /tmp/build-features/
4125
4126FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4127
4128USER root
4129
4130RUN mkdir -p /tmp/dev-container-features
4131COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4132
4133RUN \
4134echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4135echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4136
4137
4138RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
4139cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
4140&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4141&& cd /tmp/dev-container-features/aws-cli_0 \
4142&& chmod +x ./devcontainer-features-install.sh \
4143&& ./devcontainer-features-install.sh \
4144&& rm -rf /tmp/dev-container-features/aws-cli_0
4145
4146RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
4147cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
4148&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4149&& cd /tmp/dev-container-features/docker-in-docker_1 \
4150&& chmod +x ./devcontainer-features-install.sh \
4151&& ./devcontainer-features-install.sh \
4152&& rm -rf /tmp/dev-container-features/docker-in-docker_1
4153
4154
4155ARG _DEV_CONTAINERS_IMAGE_USER=root
4156USER $_DEV_CONTAINERS_IMAGE_USER
4157
4158# Ensure that /etc/profile does not clobber the existing path
4159RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4160
4161
4162ENV DOCKER_BUILDKIT=1
4163"#
4164 );
4165 }
4166
4167 #[cfg(not(target_os = "windows"))]
4168 #[gpui::test]
4169 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
4170 cx.executor().allow_parking();
4171 env_logger::try_init().ok();
4172 let given_devcontainer_contents = r#"
4173 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
4174 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
4175 {
4176 "features": {
4177 "ghcr.io/devcontainers/features/aws-cli:1": {},
4178 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
4179 },
4180 "name": "Rust and PostgreSQL",
4181 "dockerComposeFile": "docker-compose.yml",
4182 "service": "app",
4183 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
4184
4185 // Features to add to the dev container. More info: https://containers.dev/features.
4186 // "features": {},
4187
4188 // Use 'forwardPorts' to make a list of ports inside the container available locally.
4189 // "forwardPorts": [5432],
4190
4191 // Use 'postCreateCommand' to run commands after the container is created.
4192 // "postCreateCommand": "rustc --version",
4193
4194 // Configure tool-specific properties.
4195 // "customizations": {},
4196
4197 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
4198 // "remoteUser": "root"
4199 }
4200 "#;
4201 let mut fake_docker = FakeDocker::new();
4202 fake_docker.set_podman(true);
4203 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
4204 cx,
4205 FakeFs::new(cx.executor()),
4206 fake_http_client(),
4207 Arc::new(fake_docker),
4208 Arc::new(TestCommandRunner::new()),
4209 HashMap::new(),
4210 given_devcontainer_contents,
4211 )
4212 .await
4213 .unwrap();
4214
4215 test_dependencies
4216 .fs
4217 .atomic_write(
4218 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
4219 r#"
4220version: '3.8'
4221
4222volumes:
4223postgres-data:
4224
4225services:
4226app:
4227build:
4228 context: .
4229 dockerfile: Dockerfile
4230env_file:
4231 # Ensure that the variables in .env match the same variables in devcontainer.json
4232 - .env
4233
4234volumes:
4235 - ../..:/workspaces:cached
4236
4237# Overrides default command so things don't shut down after the process ends.
4238command: sleep infinity
4239
4240# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
4241network_mode: service:db
4242
4243# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
4244# (Adding the "ports" property to this file will not forward from a Codespace.)
4245
4246db:
4247image: postgres:14.1
4248restart: unless-stopped
4249volumes:
4250 - postgres-data:/var/lib/postgresql/data
4251env_file:
4252 # Ensure that the variables in .env match the same variables in devcontainer.json
4253 - .env
4254
4255# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
4256# (Adding the "ports" property to this file will not forward from a Codespace.)
4257 "#.trim().to_string(),
4258 )
4259 .await
4260 .unwrap();
4261
4262 test_dependencies.fs.atomic_write(
4263 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4264 r#"
4265FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
4266
4267# Include lld linker to improve build times either by using environment variable
4268# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4269RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4270&& apt-get -y install clang lld \
4271&& apt-get autoremove -y && apt-get clean -y
4272 "#.trim().to_string()).await.unwrap();
4273
4274 devcontainer_manifest.parse_nonremote_vars().unwrap();
4275
4276 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4277
4278 let files = test_dependencies.fs.files();
4279
4280 let feature_dockerfile = files
4281 .iter()
4282 .find(|f| {
4283 f.file_name()
4284 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4285 })
4286 .expect("to be found");
4287 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4288 assert_eq!(
4289 &feature_dockerfile,
4290 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4291
4292FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
4293
4294# Include lld linker to improve build times either by using environment variable
4295# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
4296RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
4297&& apt-get -y install clang lld \
4298&& apt-get autoremove -y && apt-get clean -y
4299
4300FROM dev_container_feature_content_temp as dev_containers_feature_content_source
4301
4302FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4303USER root
4304COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
4305RUN chmod -R 0755 /tmp/build-features/
4306
4307FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4308
4309USER root
4310
4311RUN mkdir -p /tmp/dev-container-features
4312COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4313
4314RUN \
4315echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4316echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4317
4318
4319COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
4320RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
4321&& cd /tmp/dev-container-features/aws-cli_0 \
4322&& chmod +x ./devcontainer-features-install.sh \
4323&& ./devcontainer-features-install.sh
4324
4325COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
4326RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
4327&& cd /tmp/dev-container-features/docker-in-docker_1 \
4328&& chmod +x ./devcontainer-features-install.sh \
4329&& ./devcontainer-features-install.sh
4330
4331
4332ARG _DEV_CONTAINERS_IMAGE_USER=root
4333USER $_DEV_CONTAINERS_IMAGE_USER
4334"#
4335 );
4336
4337 let uid_dockerfile = files
4338 .iter()
4339 .find(|f| {
4340 f.file_name()
4341 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4342 })
4343 .expect("to be found");
4344 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4345
4346 assert_eq!(
4347 &uid_dockerfile,
4348 r#"ARG BASE_IMAGE
4349FROM $BASE_IMAGE
4350
4351USER root
4352
4353ARG REMOTE_USER
4354ARG NEW_UID
4355ARG NEW_GID
4356SHELL ["/bin/sh", "-c"]
4357RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4358 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4359 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4360 if [ -z "$OLD_UID" ]; then \
4361 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4362 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4363 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4364 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4365 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4366 else \
4367 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4368 FREE_GID=65532; \
4369 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4370 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4371 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4372 fi; \
4373 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4374 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4375 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4376 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4377 fi; \
4378 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4379 fi;
4380
4381ARG IMAGE_USER
4382USER $IMAGE_USER
4383
4384# Ensure that /etc/profile does not clobber the existing path
4385RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4386
4387
4388ENV DOCKER_BUILDKIT=1
4389"#
4390 );
4391 }
4392
4393 #[gpui::test]
4394 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
4395 cx.executor().allow_parking();
4396 env_logger::try_init().ok();
4397 let given_devcontainer_contents = r#"
4398 /*---------------------------------------------------------------------------------------------
4399 * Copyright (c) Microsoft Corporation. All rights reserved.
4400 * Licensed under the MIT License. See License.txt in the project root for license information.
4401 *--------------------------------------------------------------------------------------------*/
4402 {
4403 "name": "cli-${devcontainerId}",
4404 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
4405 "build": {
4406 "dockerfile": "Dockerfile",
4407 "args": {
4408 "VARIANT": "18-bookworm",
4409 "FOO": "bar",
4410 },
4411 "target": "development",
4412 },
4413 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
4414 "workspaceFolder": "/workspace2",
4415 "mounts": [
4416 // Keep command history across instances
4417 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
4418 ],
4419
4420 "forwardPorts": [
4421 8082,
4422 8083,
4423 ],
4424 "appPort": "8084",
4425 "updateRemoteUserUID": false,
4426
4427 "containerEnv": {
4428 "VARIABLE_VALUE": "value",
4429 },
4430
4431 "initializeCommand": "touch IAM.md",
4432
4433 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4434
4435 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4436
4437 "postCreateCommand": {
4438 "yarn": "yarn install",
4439 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4440 },
4441
4442 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4443
4444 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4445
4446 "remoteUser": "node",
4447
4448 "remoteEnv": {
4449 "PATH": "${containerEnv:PATH}:/some/other/path",
4450 "OTHER_ENV": "other_env_value"
4451 },
4452
4453 "features": {
4454 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4455 "moby": false,
4456 },
4457 "ghcr.io/devcontainers/features/go:1": {},
4458 },
4459
4460 "customizations": {
4461 "vscode": {
4462 "extensions": [
4463 "dbaeumer.vscode-eslint",
4464 "GitHub.vscode-pull-request-github",
4465 ],
4466 },
4467 "zed": {
4468 "extensions": ["vue", "ruby"],
4469 },
4470 "codespaces": {
4471 "repositories": {
4472 "devcontainers/features": {
4473 "permissions": {
4474 "contents": "write",
4475 "workflows": "write",
4476 },
4477 },
4478 },
4479 },
4480 },
4481 }
4482 "#;
4483
4484 let (test_dependencies, mut devcontainer_manifest) =
4485 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4486 .await
4487 .unwrap();
4488
4489 test_dependencies
4490 .fs
4491 .atomic_write(
4492 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4493 r#"
4494# Copyright (c) Microsoft Corporation. All rights reserved.
4495# Licensed under the MIT License. See License.txt in the project root for license information.
4496ARG VARIANT="16-bullseye"
4497FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4498FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4499
4500RUN mkdir -p /workspaces && chown node:node /workspaces
4501
4502ARG USERNAME=node
4503USER $USERNAME
4504
4505# Save command line history
4506RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4507&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4508&& mkdir -p /home/$USERNAME/commandhistory \
4509&& touch /home/$USERNAME/commandhistory/.bash_history \
4510&& chown -R $USERNAME /home/$USERNAME/commandhistory
4511 "#.trim().to_string(),
4512 )
4513 .await
4514 .unwrap();
4515
4516 devcontainer_manifest.parse_nonremote_vars().unwrap();
4517
4518 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4519
4520 assert_eq!(
4521 devcontainer_up.extension_ids,
4522 vec!["vue".to_string(), "ruby".to_string()]
4523 );
4524
4525 let files = test_dependencies.fs.files();
4526 let feature_dockerfile = files
4527 .iter()
4528 .find(|f| {
4529 f.file_name()
4530 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4531 })
4532 .expect("to be found");
4533 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4534 assert_eq!(
4535 &feature_dockerfile,
4536 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4537
4538# Copyright (c) Microsoft Corporation. All rights reserved.
4539# Licensed under the MIT License. See License.txt in the project root for license information.
4540ARG VARIANT="16-bullseye"
4541FROM mcr.microsoft.com/devcontainers/typescript-node:latest as predev
4542FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} as development
4543
4544RUN mkdir -p /workspaces && chown node:node /workspaces
4545
4546ARG USERNAME=node
4547USER $USERNAME
4548
4549# Save command line history
4550RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4551&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4552&& mkdir -p /home/$USERNAME/commandhistory \
4553&& touch /home/$USERNAME/commandhistory/.bash_history \
4554&& chown -R $USERNAME /home/$USERNAME/commandhistory
4555FROM development AS dev_container_auto_added_stage_label
4556
4557FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4558USER root
4559COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4560RUN chmod -R 0755 /tmp/build-features/
4561
4562FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4563
4564USER root
4565
4566RUN mkdir -p /tmp/dev-container-features
4567COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4568
4569RUN \
4570echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4571echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4572
4573
4574RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4575cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4576&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4577&& cd /tmp/dev-container-features/docker-in-docker_0 \
4578&& chmod +x ./devcontainer-features-install.sh \
4579&& ./devcontainer-features-install.sh \
4580&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4581
4582RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4583cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4584&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4585&& cd /tmp/dev-container-features/go_1 \
4586&& chmod +x ./devcontainer-features-install.sh \
4587&& ./devcontainer-features-install.sh \
4588&& rm -rf /tmp/dev-container-features/go_1
4589
4590
4591ARG _DEV_CONTAINERS_IMAGE_USER=root
4592USER $_DEV_CONTAINERS_IMAGE_USER
4593
4594# Ensure that /etc/profile does not clobber the existing path
4595RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4596
4597ENV DOCKER_BUILDKIT=1
4598
4599ENV GOPATH=/go
4600ENV GOROOT=/usr/local/go
4601ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4602ENV VARIABLE_VALUE=value
4603"#
4604 );
4605
4606 let golang_install_wrapper = files
4607 .iter()
4608 .find(|f| {
4609 f.file_name()
4610 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4611 && f.to_str().is_some_and(|s| s.contains("go_"))
4612 })
4613 .expect("to be found");
4614 let golang_install_wrapper = test_dependencies
4615 .fs
4616 .load(golang_install_wrapper)
4617 .await
4618 .unwrap();
4619 assert_eq!(
4620 &golang_install_wrapper,
4621 r#"#!/bin/sh
4622set -e
4623
4624on_exit () {
4625 [ $? -eq 0 ] && exit
4626 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4627}
4628
4629trap on_exit EXIT
4630
4631echo ===========================================================================
4632echo 'Feature : go'
4633echo 'Id : ghcr.io/devcontainers/features/go:1'
4634echo 'Options :'
4635echo ' GOLANGCILINTVERSION=latest
4636 VERSION=latest'
4637echo ===========================================================================
4638
4639set -a
4640. ../devcontainer-features.builtin.env
4641. ./devcontainer-features.env
4642set +a
4643
4644chmod +x ./install.sh
4645./install.sh
4646"#
4647 );
4648
4649 let docker_commands = test_dependencies
4650 .command_runner
4651 .commands_by_program("docker");
4652
4653 let docker_run_command = docker_commands
4654 .iter()
4655 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4656
4657 assert!(docker_run_command.is_some());
4658
4659 let docker_exec_commands = test_dependencies
4660 .docker
4661 .exec_commands_recorded
4662 .lock()
4663 .unwrap();
4664
4665 assert!(docker_exec_commands.iter().all(|exec| {
4666 exec.env
4667 == HashMap::from([
4668 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4669 (
4670 "PATH".to_string(),
4671 "/initial/path:/some/other/path".to_string(),
4672 ),
4673 ])
4674 }))
4675 }
4676
4677 #[cfg(not(target_os = "windows"))]
4678 #[gpui::test]
4679 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4680 cx.executor().allow_parking();
4681 env_logger::try_init().ok();
4682 let given_devcontainer_contents = r#"
4683 {
4684 "name": "cli-${devcontainerId}",
4685 "image": "test_image:latest",
4686 }
4687 "#;
4688
4689 let (test_dependencies, mut devcontainer_manifest) =
4690 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4691 .await
4692 .unwrap();
4693
4694 devcontainer_manifest.parse_nonremote_vars().unwrap();
4695
4696 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4697
4698 let files = test_dependencies.fs.files();
4699 let uid_dockerfile = files
4700 .iter()
4701 .find(|f| {
4702 f.file_name()
4703 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4704 })
4705 .expect("to be found");
4706 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4707
4708 assert_eq!(
4709 &uid_dockerfile,
4710 r#"ARG BASE_IMAGE
4711FROM $BASE_IMAGE
4712
4713USER root
4714
4715ARG REMOTE_USER
4716ARG NEW_UID
4717ARG NEW_GID
4718SHELL ["/bin/sh", "-c"]
4719RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4720 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4721 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4722 if [ -z "$OLD_UID" ]; then \
4723 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4724 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4725 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4726 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4727 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4728 else \
4729 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4730 FREE_GID=65532; \
4731 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4732 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4733 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4734 fi; \
4735 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4736 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4737 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4738 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4739 fi; \
4740 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4741 fi;
4742
4743ARG IMAGE_USER
4744USER $IMAGE_USER
4745
4746# Ensure that /etc/profile does not clobber the existing path
4747RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4748"#
4749 );
4750 }
4751
4752 #[cfg(target_os = "windows")]
4753 #[gpui::test]
4754 async fn test_spawns_devcontainer_with_plain_image(cx: &mut TestAppContext) {
4755 cx.executor().allow_parking();
4756 env_logger::try_init().ok();
4757 let given_devcontainer_contents = r#"
4758 {
4759 "name": "cli-${devcontainerId}",
4760 "image": "test_image:latest",
4761 }
4762 "#;
4763
4764 let (_, mut devcontainer_manifest) =
4765 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4766 .await
4767 .unwrap();
4768
4769 devcontainer_manifest.parse_nonremote_vars().unwrap();
4770
4771 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4772
4773 assert_eq!(
4774 devcontainer_up.remote_workspace_folder,
4775 "/workspaces/project"
4776 );
4777 }
4778
4779 #[cfg(not(target_os = "windows"))]
4780 #[gpui::test]
4781 async fn test_spawns_devcontainer_with_docker_compose_and_plain_image(cx: &mut TestAppContext) {
4782 cx.executor().allow_parking();
4783 env_logger::try_init().ok();
4784 let given_devcontainer_contents = r#"
4785 {
4786 "name": "cli-${devcontainerId}",
4787 "dockerComposeFile": "docker-compose-plain.yml",
4788 "service": "app",
4789 }
4790 "#;
4791
4792 let (test_dependencies, mut devcontainer_manifest) =
4793 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4794 .await
4795 .unwrap();
4796
4797 test_dependencies
4798 .fs
4799 .atomic_write(
4800 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose-plain.yml"),
4801 r#"
4802services:
4803 app:
4804 image: test_image:latest
4805 command: sleep infinity
4806 volumes:
4807 - ..:/workspace:cached
4808 "#
4809 .trim()
4810 .to_string(),
4811 )
4812 .await
4813 .unwrap();
4814
4815 devcontainer_manifest.parse_nonremote_vars().unwrap();
4816
4817 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4818
4819 let files = test_dependencies.fs.files();
4820 let uid_dockerfile = files
4821 .iter()
4822 .find(|f| {
4823 f.file_name()
4824 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
4825 })
4826 .expect("to be found");
4827 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
4828
4829 assert_eq!(
4830 &uid_dockerfile,
4831 r#"ARG BASE_IMAGE
4832FROM $BASE_IMAGE
4833
4834USER root
4835
4836ARG REMOTE_USER
4837ARG NEW_UID
4838ARG NEW_GID
4839SHELL ["/bin/sh", "-c"]
4840RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
4841 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
4842 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
4843 if [ -z "$OLD_UID" ]; then \
4844 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
4845 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
4846 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
4847 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
4848 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
4849 else \
4850 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
4851 FREE_GID=65532; \
4852 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
4853 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
4854 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
4855 fi; \
4856 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
4857 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
4858 if [ "$OLD_GID" != "$NEW_GID" ]; then \
4859 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
4860 fi; \
4861 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
4862 fi;
4863
4864ARG IMAGE_USER
4865USER $IMAGE_USER
4866
4867# Ensure that /etc/profile does not clobber the existing path
4868RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4869"#
4870 );
4871 }
4872
4873 #[gpui::test]
4874 async fn test_gets_base_image_from_dockerfile(cx: &mut TestAppContext) {
4875 cx.executor().allow_parking();
4876 env_logger::try_init().ok();
4877 let given_devcontainer_contents = r#"
4878 {
4879 "name": "cli-${devcontainerId}",
4880 "build": {
4881 "dockerfile": "Dockerfile",
4882 "args": {
4883 "VERSION": "1.22",
4884 }
4885 },
4886 }
4887 "#;
4888
4889 let (test_dependencies, mut devcontainer_manifest) =
4890 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4891 .await
4892 .unwrap();
4893
4894 test_dependencies
4895 .fs
4896 .atomic_write(
4897 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4898 r#"
4899FROM dontgrabme as build_context
4900ARG VERSION=1.21
4901ARG REPOSITORY=mybuild
4902ARG REGISTRY=docker.io/stuff
4903
4904ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4905
4906FROM ${IMAGE} AS devcontainer
4907 "#
4908 .trim()
4909 .to_string(),
4910 )
4911 .await
4912 .unwrap();
4913
4914 devcontainer_manifest.parse_nonremote_vars().unwrap();
4915
4916 let dockerfile_contents = devcontainer_manifest
4917 .expanded_dockerfile_content()
4918 .await
4919 .unwrap();
4920 let base_image = image_from_dockerfile(
4921 dockerfile_contents,
4922 &devcontainer_manifest
4923 .dev_container()
4924 .build
4925 .as_ref()
4926 .and_then(|b| b.target.clone()),
4927 )
4928 .unwrap();
4929
4930 assert_eq!(base_image, "docker.io/stuff/mybuild:1.22".to_string());
4931 }
4932
4933 #[gpui::test]
4934 async fn test_gets_base_image_from_dockerfile_with_target_specified(cx: &mut TestAppContext) {
4935 cx.executor().allow_parking();
4936 env_logger::try_init().ok();
4937 let given_devcontainer_contents = r#"
4938 {
4939 "name": "cli-${devcontainerId}",
4940 "build": {
4941 "dockerfile": "Dockerfile",
4942 "args": {
4943 "VERSION": "1.22",
4944 },
4945 "target": "development"
4946 },
4947 }
4948 "#;
4949
4950 let (test_dependencies, mut devcontainer_manifest) =
4951 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4952 .await
4953 .unwrap();
4954
4955 test_dependencies
4956 .fs
4957 .atomic_write(
4958 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4959 r#"
4960FROM dontgrabme as build_context
4961ARG VERSION=1.21
4962ARG REPOSITORY=mybuild
4963ARG REGISTRY=docker.io/stuff
4964
4965ARG IMAGE=${REGISTRY}/${REPOSITORY}:${VERSION}
4966ARG DEV_IMAGE=${REGISTRY}/${REPOSITORY}:latest
4967
4968FROM ${DEV_IMAGE} AS development
4969FROM ${IMAGE} AS production
4970 "#
4971 .trim()
4972 .to_string(),
4973 )
4974 .await
4975 .unwrap();
4976
4977 devcontainer_manifest.parse_nonremote_vars().unwrap();
4978
4979 let dockerfile_contents = devcontainer_manifest
4980 .expanded_dockerfile_content()
4981 .await
4982 .unwrap();
4983 let base_image = image_from_dockerfile(
4984 dockerfile_contents,
4985 &devcontainer_manifest
4986 .dev_container()
4987 .build
4988 .as_ref()
4989 .and_then(|b| b.target.clone()),
4990 )
4991 .unwrap();
4992
4993 assert_eq!(base_image, "docker.io/stuff/mybuild:latest".to_string());
4994 }
4995
4996 #[gpui::test]
4997 async fn test_expands_args_in_dockerfile(cx: &mut TestAppContext) {
4998 cx.executor().allow_parking();
4999 env_logger::try_init().ok();
5000 let given_devcontainer_contents = r#"
5001 {
5002 "name": "cli-${devcontainerId}",
5003 "build": {
5004 "dockerfile": "Dockerfile",
5005 "args": {
5006 "JSON_ARG": "some-value",
5007 "ELIXIR_VERSION": "1.21",
5008 }
5009 },
5010 }
5011 "#;
5012
5013 let (test_dependencies, mut devcontainer_manifest) =
5014 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
5015 .await
5016 .unwrap();
5017
5018 test_dependencies
5019 .fs
5020 .atomic_write(
5021 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
5022 r#"
5023ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5024ARG ELIXIR_VERSION=1.20.0-rc.4
5025ARG FOO=foo BAR=bar
5026ARG FOOBAR=${FOO}${BAR}
5027ARG OTP_VERSION=28.4.1
5028ARG DEBIAN_VERSION=trixie-20260316-slim
5029ARG IMAGE="docker.io/hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}"
5030ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5031ARG WRAPPING_MAP={"nested_map": ${NESTED_MAP}}
5032ARG FROM_JSON=${JSON_ARG}
5033
5034FROM ${IMAGE} AS devcontainer
5035 "#
5036 .trim()
5037 .to_string(),
5038 )
5039 .await
5040 .unwrap();
5041
5042 devcontainer_manifest.parse_nonremote_vars().unwrap();
5043
5044 let expanded_dockerfile = devcontainer_manifest
5045 .expanded_dockerfile_content()
5046 .await
5047 .unwrap();
5048
5049 assert_eq!(
5050 &expanded_dockerfile,
5051 r#"
5052ARG INVALID_FORWARD_REFERENCE=${OTP_VERSION}
5053ARG ELIXIR_VERSION=1.20.0-rc.4
5054ARG FOO=foo BAR=bar
5055ARG FOOBAR=foobar
5056ARG OTP_VERSION=28.4.1
5057ARG DEBIAN_VERSION=trixie-20260316-slim
5058ARG IMAGE="docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim"
5059ARG NESTED_MAP="{"key1": "val1", "key2": "val2"}"
5060ARG WRAPPING_MAP={"nested_map": {"key1": "val1", "key2": "val2"}}
5061ARG FROM_JSON=some-value
5062
5063FROM docker.io/hexpm/elixir:1.21-erlang-28.4.1-debian-trixie-20260316-slim AS devcontainer
5064 "#
5065 .trim()
5066 )
5067 }
5068
5069 #[test]
5070 fn test_aliases_dockerfile_with_pre_existing_aliases_for_build() {}
5071
5072 #[test]
5073 fn test_aliases_dockerfile_with_no_aliases_for_build() {}
5074
5075 #[test]
5076 fn test_aliases_dockerfile_with_build_target_specified() {}
5077
5078 pub(crate) struct RecordedExecCommand {
5079 pub(crate) _container_id: String,
5080 pub(crate) _remote_folder: String,
5081 pub(crate) _user: String,
5082 pub(crate) env: HashMap<String, String>,
5083 pub(crate) _inner_command: Command,
5084 }
5085
5086 pub(crate) struct FakeDocker {
5087 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
5088 podman: bool,
5089 has_buildx: bool,
5090 }
5091
5092 impl FakeDocker {
5093 pub(crate) fn new() -> Self {
5094 Self {
5095 podman: false,
5096 has_buildx: true,
5097 exec_commands_recorded: Mutex::new(Vec::new()),
5098 }
5099 }
5100 #[cfg(not(target_os = "windows"))]
5101 fn set_podman(&mut self, podman: bool) {
5102 self.podman = podman;
5103 }
5104 }
5105
5106 #[async_trait]
5107 impl DockerClient for FakeDocker {
5108 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
5109 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
5110 return Ok(DockerInspect {
5111 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5112 .to_string(),
5113 config: DockerInspectConfig {
5114 labels: DockerConfigLabels {
5115 metadata: Some(vec![HashMap::from([(
5116 "remoteUser".to_string(),
5117 Value::String("node".to_string()),
5118 )])]),
5119 },
5120 env: Vec::new(),
5121 image_user: Some("root".to_string()),
5122 },
5123 mounts: None,
5124 state: None,
5125 });
5126 }
5127 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
5128 return Ok(DockerInspect {
5129 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
5130 .to_string(),
5131 config: DockerInspectConfig {
5132 labels: DockerConfigLabels {
5133 metadata: Some(vec![HashMap::from([(
5134 "remoteUser".to_string(),
5135 Value::String("vscode".to_string()),
5136 )])]),
5137 },
5138 image_user: Some("root".to_string()),
5139 env: Vec::new(),
5140 },
5141 mounts: None,
5142 state: None,
5143 });
5144 }
5145 if id.starts_with("cli_") {
5146 return Ok(DockerInspect {
5147 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5148 .to_string(),
5149 config: DockerInspectConfig {
5150 labels: DockerConfigLabels {
5151 metadata: Some(vec![HashMap::from([(
5152 "remoteUser".to_string(),
5153 Value::String("node".to_string()),
5154 )])]),
5155 },
5156 image_user: Some("root".to_string()),
5157 env: vec!["PATH=/initial/path".to_string()],
5158 },
5159 mounts: None,
5160 state: None,
5161 });
5162 }
5163 if id == "found_docker_ps" {
5164 return Ok(DockerInspect {
5165 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
5166 .to_string(),
5167 config: DockerInspectConfig {
5168 labels: DockerConfigLabels {
5169 metadata: Some(vec![HashMap::from([(
5170 "remoteUser".to_string(),
5171 Value::String("node".to_string()),
5172 )])]),
5173 },
5174 image_user: Some("root".to_string()),
5175 env: vec!["PATH=/initial/path".to_string()],
5176 },
5177 mounts: Some(vec![DockerInspectMount {
5178 source: "/path/to/local/project".to_string(),
5179 destination: "/workspaces/project".to_string(),
5180 }]),
5181 state: None,
5182 });
5183 }
5184 if id.starts_with("rust_a-") {
5185 return Ok(DockerInspect {
5186 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
5187 .to_string(),
5188 config: DockerInspectConfig {
5189 labels: DockerConfigLabels {
5190 metadata: Some(vec![HashMap::from([(
5191 "remoteUser".to_string(),
5192 Value::String("vscode".to_string()),
5193 )])]),
5194 },
5195 image_user: Some("root".to_string()),
5196 env: Vec::new(),
5197 },
5198 mounts: None,
5199 state: None,
5200 });
5201 }
5202 if id == "test_image:latest" {
5203 return Ok(DockerInspect {
5204 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
5205 .to_string(),
5206 config: DockerInspectConfig {
5207 labels: DockerConfigLabels {
5208 metadata: Some(vec![HashMap::from([(
5209 "remoteUser".to_string(),
5210 Value::String("node".to_string()),
5211 )])]),
5212 },
5213 env: Vec::new(),
5214 image_user: Some("root".to_string()),
5215 },
5216 mounts: None,
5217 state: None,
5218 });
5219 }
5220
5221 Err(DevContainerError::DockerNotAvailable)
5222 }
5223 async fn get_docker_compose_config(
5224 &self,
5225 config_files: &Vec<PathBuf>,
5226 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
5227 let project_path = PathBuf::from(TEST_PROJECT_PATH);
5228 if config_files.len() == 1
5229 && config_files.get(0)
5230 == Some(
5231 &project_path
5232 .join(".devcontainer")
5233 .join("docker-compose.yml"),
5234 )
5235 {
5236 return Ok(Some(DockerComposeConfig {
5237 name: None,
5238 services: HashMap::from([
5239 (
5240 "app".to_string(),
5241 DockerComposeService {
5242 build: Some(DockerComposeServiceBuild {
5243 context: Some(".".to_string()),
5244 dockerfile: Some("Dockerfile".to_string()),
5245 args: None,
5246 additional_contexts: None,
5247 target: None,
5248 }),
5249 volumes: vec![MountDefinition {
5250 source: Some("../..".to_string()),
5251 target: "/workspaces".to_string(),
5252 mount_type: Some("bind".to_string()),
5253 }],
5254 network_mode: Some("service:db".to_string()),
5255 ..Default::default()
5256 },
5257 ),
5258 (
5259 "db".to_string(),
5260 DockerComposeService {
5261 image: Some("postgres:14.1".to_string()),
5262 volumes: vec![MountDefinition {
5263 source: Some("postgres-data".to_string()),
5264 target: "/var/lib/postgresql/data".to_string(),
5265 mount_type: Some("volume".to_string()),
5266 }],
5267 env_file: Some(vec![".env".to_string()]),
5268 ..Default::default()
5269 },
5270 ),
5271 ]),
5272 volumes: HashMap::from([(
5273 "postgres-data".to_string(),
5274 DockerComposeVolume::default(),
5275 )]),
5276 }));
5277 }
5278 if config_files.len() == 1
5279 && config_files.get(0)
5280 == Some(
5281 &project_path
5282 .join(".devcontainer")
5283 .join("docker-compose-context-parent.yml"),
5284 )
5285 {
5286 return Ok(Some(DockerComposeConfig {
5287 name: None,
5288 services: HashMap::from([(
5289 "app".to_string(),
5290 DockerComposeService {
5291 build: Some(DockerComposeServiceBuild {
5292 context: Some("..".to_string()),
5293 dockerfile: Some(
5294 PathBuf::from(".devcontainer")
5295 .join("Dockerfile")
5296 .display()
5297 .to_string(),
5298 ),
5299 args: None,
5300 additional_contexts: None,
5301 target: None,
5302 }),
5303 ..Default::default()
5304 },
5305 )]),
5306 volumes: HashMap::new(),
5307 }));
5308 }
5309 if config_files.len() == 1
5310 && config_files.get(0)
5311 == Some(
5312 &project_path
5313 .join(".devcontainer")
5314 .join("docker-compose-plain.yml"),
5315 )
5316 {
5317 return Ok(Some(DockerComposeConfig {
5318 name: None,
5319 services: HashMap::from([(
5320 "app".to_string(),
5321 DockerComposeService {
5322 image: Some("test_image:latest".to_string()),
5323 command: vec!["sleep".to_string(), "infinity".to_string()],
5324 ..Default::default()
5325 },
5326 )]),
5327 ..Default::default()
5328 }));
5329 }
5330 Err(DevContainerError::DockerNotAvailable)
5331 }
5332 async fn docker_compose_build(
5333 &self,
5334 _config_files: &Vec<PathBuf>,
5335 _project_name: &str,
5336 ) -> Result<(), DevContainerError> {
5337 Ok(())
5338 }
5339 async fn run_docker_exec(
5340 &self,
5341 container_id: &str,
5342 remote_folder: &str,
5343 user: &str,
5344 env: &HashMap<String, String>,
5345 inner_command: Command,
5346 ) -> Result<(), DevContainerError> {
5347 let mut record = self
5348 .exec_commands_recorded
5349 .lock()
5350 .expect("should be available");
5351 record.push(RecordedExecCommand {
5352 _container_id: container_id.to_string(),
5353 _remote_folder: remote_folder.to_string(),
5354 _user: user.to_string(),
5355 env: env.clone(),
5356 _inner_command: inner_command,
5357 });
5358 Ok(())
5359 }
5360 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
5361 Err(DevContainerError::DockerNotAvailable)
5362 }
5363 async fn find_process_by_filters(
5364 &self,
5365 _filters: Vec<String>,
5366 ) -> Result<Option<DockerPs>, DevContainerError> {
5367 Ok(Some(DockerPs {
5368 id: "found_docker_ps".to_string(),
5369 }))
5370 }
5371 fn supports_compose_buildkit(&self) -> bool {
5372 !self.podman && self.has_buildx
5373 }
5374 fn docker_cli(&self) -> String {
5375 if self.podman {
5376 "podman".to_string()
5377 } else {
5378 "docker".to_string()
5379 }
5380 }
5381 }
5382
5383 #[derive(Debug, Clone)]
5384 pub(crate) struct TestCommand {
5385 pub(crate) program: String,
5386 pub(crate) args: Vec<String>,
5387 }
5388
5389 pub(crate) struct TestCommandRunner {
5390 commands_recorded: Mutex<Vec<TestCommand>>,
5391 }
5392
5393 impl TestCommandRunner {
5394 fn new() -> Self {
5395 Self {
5396 commands_recorded: Mutex::new(Vec::new()),
5397 }
5398 }
5399
5400 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
5401 let record = self.commands_recorded.lock().expect("poisoned");
5402 record
5403 .iter()
5404 .filter(|r| r.program == program)
5405 .map(|r| r.clone())
5406 .collect()
5407 }
5408 }
5409
5410 #[async_trait]
5411 impl CommandRunner for TestCommandRunner {
5412 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
5413 let mut record = self.commands_recorded.lock().expect("poisoned");
5414
5415 record.push(TestCommand {
5416 program: command.get_program().display().to_string(),
5417 args: command
5418 .get_args()
5419 .map(|a| a.display().to_string())
5420 .collect(),
5421 });
5422
5423 Ok(Output {
5424 status: ExitStatus::default(),
5425 stdout: vec![],
5426 stderr: vec![],
5427 })
5428 }
5429 }
5430
5431 fn fake_http_client() -> Arc<dyn HttpClient> {
5432 FakeHttpClient::create(|request| async move {
5433 let (parts, _body) = request.into_parts();
5434 if parts.uri.path() == "/token" {
5435 let token_response = TokenResponse {
5436 token: "token".to_string(),
5437 };
5438 return Ok(http::Response::builder()
5439 .status(200)
5440 .body(http_client::AsyncBody::from(
5441 serde_json_lenient::to_string(&token_response).unwrap(),
5442 ))
5443 .unwrap());
5444 }
5445
5446 // OCI specific things
5447 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
5448 let response = r#"
5449 {
5450 "schemaVersion": 2,
5451 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5452 "config": {
5453 "mediaType": "application/vnd.devcontainers",
5454 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5455 "size": 2
5456 },
5457 "layers": [
5458 {
5459 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5460 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
5461 "size": 59392,
5462 "annotations": {
5463 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
5464 }
5465 }
5466 ],
5467 "annotations": {
5468 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5469 "com.github.package.type": "devcontainer_feature"
5470 }
5471 }
5472 "#;
5473 return Ok(http::Response::builder()
5474 .status(200)
5475 .body(http_client::AsyncBody::from(response))
5476 .unwrap());
5477 }
5478
5479 if parts.uri.path()
5480 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
5481 {
5482 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5487 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5488 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5489 ```
5490 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5491 ```
5492 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5493
5494
5495 ## OS Support
5496
5497 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5498
5499 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
5500
5501 `bash` is required to execute the `install.sh` script."#),
5502 ("./README.md", r#"
5503 # Docker (Docker-in-Docker) (docker-in-docker)
5504
5505 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
5506
5507 ## Example Usage
5508
5509 ```json
5510 "features": {
5511 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
5512 }
5513 ```
5514
5515 ## Options
5516
5517 | Options Id | Description | Type | Default Value |
5518 |-----|-----|-----|-----|
5519 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
5520 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
5521 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
5522 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
5523 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
5524 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
5525 | installDockerBuildx | Install Docker Buildx | boolean | true |
5526 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
5527 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
5528
5529 ## Customizations
5530
5531 ### VS Code Extensions
5532
5533 - `ms-azuretools.vscode-containers`
5534
5535 ## Limitations
5536
5537 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
5538 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
5539 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
5540 ```
5541 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
5542 ```
5543 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
5544
5545
5546 ## OS Support
5547
5548 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
5549
5550 `bash` is required to execute the `install.sh` script.
5551
5552
5553 ---
5554
5555 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
5556 ("./devcontainer-feature.json", r#"
5557 {
5558 "id": "docker-in-docker",
5559 "version": "2.16.1",
5560 "name": "Docker (Docker-in-Docker)",
5561 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
5562 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
5563 "options": {
5564 "version": {
5565 "type": "string",
5566 "proposals": [
5567 "latest",
5568 "none",
5569 "20.10"
5570 ],
5571 "default": "latest",
5572 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
5573 },
5574 "moby": {
5575 "type": "boolean",
5576 "default": true,
5577 "description": "Install OSS Moby build instead of Docker CE"
5578 },
5579 "mobyBuildxVersion": {
5580 "type": "string",
5581 "default": "latest",
5582 "description": "Install a specific version of moby-buildx when using Moby"
5583 },
5584 "dockerDashComposeVersion": {
5585 "type": "string",
5586 "enum": [
5587 "none",
5588 "v1",
5589 "v2"
5590 ],
5591 "default": "v2",
5592 "description": "Default version of Docker Compose (v1, v2 or none)"
5593 },
5594 "azureDnsAutoDetection": {
5595 "type": "boolean",
5596 "default": true,
5597 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
5598 },
5599 "dockerDefaultAddressPool": {
5600 "type": "string",
5601 "default": "",
5602 "proposals": [],
5603 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
5604 },
5605 "installDockerBuildx": {
5606 "type": "boolean",
5607 "default": true,
5608 "description": "Install Docker Buildx"
5609 },
5610 "installDockerComposeSwitch": {
5611 "type": "boolean",
5612 "default": false,
5613 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
5614 },
5615 "disableIp6tables": {
5616 "type": "boolean",
5617 "default": false,
5618 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
5619 }
5620 },
5621 "entrypoint": "/usr/local/share/docker-init.sh",
5622 "privileged": true,
5623 "containerEnv": {
5624 "DOCKER_BUILDKIT": "1"
5625 },
5626 "customizations": {
5627 "vscode": {
5628 "extensions": [
5629 "ms-azuretools.vscode-containers"
5630 ],
5631 "settings": {
5632 "github.copilot.chat.codeGeneration.instructions": [
5633 {
5634 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
5635 }
5636 ]
5637 }
5638 }
5639 },
5640 "mounts": [
5641 {
5642 "source": "dind-var-lib-docker-${devcontainerId}",
5643 "target": "/var/lib/docker",
5644 "type": "volume"
5645 }
5646 ],
5647 "installsAfter": [
5648 "ghcr.io/devcontainers/features/common-utils"
5649 ]
5650 }"#),
5651 ("./install.sh", r#"
5652 #!/usr/bin/env bash
5653 #-------------------------------------------------------------------------------------------------------------
5654 # Copyright (c) Microsoft Corporation. All rights reserved.
5655 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5656 #-------------------------------------------------------------------------------------------------------------
5657 #
5658 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
5659 # Maintainer: The Dev Container spec maintainers
5660
5661
5662 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
5663 USE_MOBY="${MOBY:-"true"}"
5664 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
5665 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
5666 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
5667 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
5668 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5669 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
5670 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
5671 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
5672 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
5673 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
5674 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
5675 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
5676
5677 # Default: Exit on any failure.
5678 set -e
5679
5680 # Clean up
5681 rm -rf /var/lib/apt/lists/*
5682
5683 # Setup STDERR.
5684 err() {
5685 echo "(!) $*" >&2
5686 }
5687
5688 if [ "$(id -u)" -ne 0 ]; then
5689 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5690 exit 1
5691 fi
5692
5693 ###################
5694 # Helper Functions
5695 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
5696 ###################
5697
5698 # Determine the appropriate non-root user
5699 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
5700 USERNAME=""
5701 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
5702 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
5703 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
5704 USERNAME=${CURRENT_USER}
5705 break
5706 fi
5707 done
5708 if [ "${USERNAME}" = "" ]; then
5709 USERNAME=root
5710 fi
5711 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
5712 USERNAME=root
5713 fi
5714
5715 # Package manager update function
5716 pkg_mgr_update() {
5717 case ${ADJUSTED_ID} in
5718 debian)
5719 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5720 echo "Running apt-get update..."
5721 apt-get update -y
5722 fi
5723 ;;
5724 rhel)
5725 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
5726 cache_check_dir="/var/cache/yum"
5727 else
5728 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
5729 fi
5730 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
5731 echo "Running ${PKG_MGR_CMD} makecache ..."
5732 ${PKG_MGR_CMD} makecache
5733 fi
5734 ;;
5735 esac
5736 }
5737
5738 # Checks if packages are installed and installs them if not
5739 check_packages() {
5740 case ${ADJUSTED_ID} in
5741 debian)
5742 if ! dpkg -s "$@" > /dev/null 2>&1; then
5743 pkg_mgr_update
5744 apt-get -y install --no-install-recommends "$@"
5745 fi
5746 ;;
5747 rhel)
5748 if ! rpm -q "$@" > /dev/null 2>&1; then
5749 pkg_mgr_update
5750 ${PKG_MGR_CMD} -y install "$@"
5751 fi
5752 ;;
5753 esac
5754 }
5755
5756 # Figure out correct version of a three part version number is not passed
5757 find_version_from_git_tags() {
5758 local variable_name=$1
5759 local requested_version=${!variable_name}
5760 if [ "${requested_version}" = "none" ]; then return; fi
5761 local repository=$2
5762 local prefix=${3:-"tags/v"}
5763 local separator=${4:-"."}
5764 local last_part_optional=${5:-"false"}
5765 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5766 local escaped_separator=${separator//./\\.}
5767 local last_part
5768 if [ "${last_part_optional}" = "true" ]; then
5769 last_part="(${escaped_separator}[0-9]+)?"
5770 else
5771 last_part="${escaped_separator}[0-9]+"
5772 fi
5773 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5774 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5775 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5776 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5777 else
5778 set +e
5779 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5780 set -e
5781 fi
5782 fi
5783 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5784 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5785 exit 1
5786 fi
5787 echo "${variable_name}=${!variable_name}"
5788 }
5789
5790 # Use semver logic to decrement a version number then look for the closest match
5791 find_prev_version_from_git_tags() {
5792 local variable_name=$1
5793 local current_version=${!variable_name}
5794 local repository=$2
5795 # Normally a "v" is used before the version number, but support alternate cases
5796 local prefix=${3:-"tags/v"}
5797 # Some repositories use "_" instead of "." for version number part separation, support that
5798 local separator=${4:-"."}
5799 # Some tools release versions that omit the last digit (e.g. go)
5800 local last_part_optional=${5:-"false"}
5801 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
5802 local version_suffix_regex=$6
5803 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
5804 set +e
5805 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
5806 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
5807 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
5808
5809 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
5810 ((major=major-1))
5811 declare -g ${variable_name}="${major}"
5812 # Look for latest version from previous major release
5813 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5814 # Handle situations like Go's odd version pattern where "0" releases omit the last part
5815 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
5816 ((minor=minor-1))
5817 declare -g ${variable_name}="${major}.${minor}"
5818 # Look for latest version from previous minor release
5819 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
5820 else
5821 ((breakfix=breakfix-1))
5822 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
5823 declare -g ${variable_name}="${major}.${minor}"
5824 else
5825 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
5826 fi
5827 fi
5828 set -e
5829 }
5830
5831 # Function to fetch the version released prior to the latest version
5832 get_previous_version() {
5833 local url=$1
5834 local repo_url=$2
5835 local variable_name=$3
5836 prev_version=${!variable_name}
5837
5838 output=$(curl -s "$repo_url");
5839 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
5840 message=$(echo "$output" | jq -r '.message')
5841
5842 if [[ $message == "API rate limit exceeded"* ]]; then
5843 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
5844 echo -e "\nAttempting to find latest version using GitHub tags."
5845 find_prev_version_from_git_tags prev_version "$url" "tags/v"
5846 declare -g ${variable_name}="${prev_version}"
5847 fi
5848 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
5849 echo -e "\nAttempting to find latest version using GitHub Api."
5850 version=$(echo "$output" | jq -r '.[1].tag_name')
5851 declare -g ${variable_name}="${version#v}"
5852 fi
5853 echo "${variable_name}=${!variable_name}"
5854 }
5855
5856 get_github_api_repo_url() {
5857 local url=$1
5858 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
5859 }
5860
5861 ###########################################
5862 # Start docker-in-docker installation
5863 ###########################################
5864
5865 # Ensure apt is in non-interactive to avoid prompts
5866 export DEBIAN_FRONTEND=noninteractive
5867
5868 # Source /etc/os-release to get OS info
5869 . /etc/os-release
5870
5871 # Determine adjusted ID and package manager
5872 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5873 ADJUSTED_ID="debian"
5874 PKG_MGR_CMD="apt-get"
5875 # Use dpkg for Debian-based systems
5876 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
5877 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
5878 ADJUSTED_ID="rhel"
5879 # Determine the appropriate package manager for RHEL-based systems
5880 for pkg_mgr in tdnf dnf microdnf yum; do
5881 if command -v "$pkg_mgr" >/dev/null 2>&1; then
5882 PKG_MGR_CMD="$pkg_mgr"
5883 break
5884 fi
5885 done
5886
5887 if [ -z "${PKG_MGR_CMD}" ]; then
5888 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
5889 exit 1
5890 fi
5891
5892 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
5893 else
5894 err "Linux distro ${ID} not supported."
5895 exit 1
5896 fi
5897
5898 # Azure Linux specific setup
5899 if [ "${ID}" = "azurelinux" ]; then
5900 VERSION_CODENAME="azurelinux${VERSION_ID}"
5901 fi
5902
5903 # Prevent attempting to install Moby on Debian trixie (packages removed)
5904 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5905 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5906 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5907 exit 1
5908 fi
5909
5910 # Check if distro is supported
5911 if [ "${USE_MOBY}" = "true" ]; then
5912 if [ "${ADJUSTED_ID}" = "debian" ]; then
5913 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5914 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5915 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5916 exit 1
5917 fi
5918 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5919 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5920 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5921 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5922 else
5923 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5924 fi
5925 fi
5926 else
5927 if [ "${ADJUSTED_ID}" = "debian" ]; then
5928 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5929 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5930 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5931 exit 1
5932 fi
5933 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5934 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5935
5936 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5937 fi
5938 fi
5939
5940 # Install base dependencies
5941 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5942 case ${ADJUSTED_ID} in
5943 debian)
5944 check_packages apt-transport-https $base_packages dirmngr
5945 ;;
5946 rhel)
5947 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5948
5949 ;;
5950 esac
5951
5952 # Install git if not already present
5953 if ! command -v git >/dev/null 2>&1; then
5954 check_packages git
5955 fi
5956
5957 # Update CA certificates to ensure HTTPS connections work properly
5958 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5959 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5960 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5961 update-ca-certificates
5962 fi
5963
5964 # Swap to legacy iptables for compatibility (Debian only)
5965 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5966 update-alternatives --set iptables /usr/sbin/iptables-legacy
5967 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5968 fi
5969
5970 # Set up the necessary repositories
5971 if [ "${USE_MOBY}" = "true" ]; then
5972 # Name of open source engine/cli
5973 engine_package_name="moby-engine"
5974 cli_package_name="moby-cli"
5975
5976 case ${ADJUSTED_ID} in
5977 debian)
5978 # Import key safely and import Microsoft apt repo
5979 {
5980 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5981 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5982 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5983 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5984 ;;
5985 rhel)
5986 echo "(*) ${ID} detected - checking for Moby packages..."
5987
5988 # Check if moby packages are available in default repos
5989 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5990 echo "(*) Using built-in ${ID} Moby packages"
5991 else
5992 case "${ID}" in
5993 azurelinux)
5994 echo "(*) Moby packages not found in Azure Linux repositories"
5995 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5996 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5997 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5998 exit 1
5999 ;;
6000 mariner)
6001 echo "(*) Adding Microsoft repository for CBL-Mariner..."
6002 # Add Microsoft repository if packages aren't available locally
6003 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
6004 cat > /etc/yum.repos.d/microsoft.repo << EOF
6005 [microsoft]
6006 name=Microsoft Repository
6007 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
6008 enabled=1
6009 gpgcheck=1
6010 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
6011 EOF
6012 # Verify packages are available after adding repo
6013 pkg_mgr_update
6014 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
6015 echo "(*) Moby packages not found in Microsoft repository either"
6016 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
6017 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
6018 exit 1
6019 fi
6020 ;;
6021 *)
6022 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
6023 exit 1
6024 ;;
6025 esac
6026 fi
6027 ;;
6028 esac
6029 else
6030 # Name of licensed engine/cli
6031 engine_package_name="docker-ce"
6032 cli_package_name="docker-ce-cli"
6033 case ${ADJUSTED_ID} in
6034 debian)
6035 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
6036 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
6037 ;;
6038 rhel)
6039 # Docker CE repository setup for RHEL-based systems
6040 setup_docker_ce_repo() {
6041 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
6042 cat > /etc/yum.repos.d/docker-ce.repo << EOF
6043 [docker-ce-stable]
6044 name=Docker CE Stable
6045 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
6046 enabled=1
6047 gpgcheck=1
6048 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
6049 skip_if_unavailable=1
6050 module_hotfixes=1
6051 EOF
6052 }
6053 install_azure_linux_deps() {
6054 echo "(*) Installing device-mapper libraries for Docker CE..."
6055 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
6056 echo "(*) Installing additional Docker CE dependencies..."
6057 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
6058 echo "(*) Some optional dependencies could not be installed, continuing..."
6059 }
6060 }
6061 setup_selinux_context() {
6062 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
6063 echo "(*) Creating minimal SELinux context for Docker compatibility..."
6064 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
6065 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
6066 fi
6067 }
6068
6069 # Special handling for RHEL Docker CE installation
6070 case "${ID}" in
6071 azurelinux|mariner)
6072 echo "(*) ${ID} detected"
6073 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
6074 echo "(*) Setting up Docker CE repository..."
6075
6076 setup_docker_ce_repo
6077 install_azure_linux_deps
6078
6079 if [ "${USE_MOBY}" != "true" ]; then
6080 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
6081 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
6082 setup_selinux_context
6083 else
6084 echo "(*) Using Moby - container-selinux not required"
6085 fi
6086 ;;
6087 *)
6088 # Standard RHEL/CentOS/Fedora approach
6089 if command -v dnf >/dev/null 2>&1; then
6090 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6091 elif command -v yum-config-manager >/dev/null 2>&1; then
6092 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
6093 else
6094 # Manual fallback
6095 setup_docker_ce_repo
6096 fi
6097 ;;
6098 esac
6099 ;;
6100 esac
6101 fi
6102
6103 # Refresh package database
6104 case ${ADJUSTED_ID} in
6105 debian)
6106 apt-get update
6107 ;;
6108 rhel)
6109 pkg_mgr_update
6110 ;;
6111 esac
6112
6113 # Soft version matching
6114 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
6115 # Empty, meaning grab whatever "latest" is in apt repo
6116 engine_version_suffix=""
6117 cli_version_suffix=""
6118 else
6119 case ${ADJUSTED_ID} in
6120 debian)
6121 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
6122 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
6123 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
6124 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
6125 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6126 set +e # Don't exit if finding version fails - will handle gracefully
6127 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6128 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
6129 set -e
6130 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
6131 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6132 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6133 exit 1
6134 fi
6135 ;;
6136 rhel)
6137 # For RHEL-based systems, use dnf/yum to find versions
6138 docker_version_escaped="${DOCKER_VERSION//./\\.}"
6139 set +e # Don't exit if finding version fails - will handle gracefully
6140 if [ "${USE_MOBY}" = "true" ]; then
6141 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6142 else
6143 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
6144 fi
6145 set -e
6146 if [ -n "${available_versions}" ]; then
6147 engine_version_suffix="-${available_versions}"
6148 cli_version_suffix="-${available_versions}"
6149 else
6150 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
6151 engine_version_suffix=""
6152 cli_version_suffix=""
6153 fi
6154 ;;
6155 esac
6156 fi
6157
6158 # Version matching for moby-buildx
6159 if [ "${USE_MOBY}" = "true" ]; then
6160 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
6161 # Empty, meaning grab whatever "latest" is in apt repo
6162 buildx_version_suffix=""
6163 else
6164 case ${ADJUSTED_ID} in
6165 debian)
6166 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6167 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
6168 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
6169 set +e
6170 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
6171 set -e
6172 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
6173 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
6174 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
6175 exit 1
6176 fi
6177 ;;
6178 rhel)
6179 # For RHEL-based systems, try to find buildx version or use latest
6180 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
6181 set +e
6182 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
6183 set -e
6184 if [ -n "${available_buildx}" ]; then
6185 buildx_version_suffix="-${available_buildx}"
6186 else
6187 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
6188 buildx_version_suffix=""
6189 fi
6190 ;;
6191 esac
6192 echo "buildx_version_suffix ${buildx_version_suffix}"
6193 fi
6194 fi
6195
6196 # Install Docker / Moby CLI if not already installed
6197 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
6198 echo "Docker / Moby CLI and Engine already installed."
6199 else
6200 case ${ADJUSTED_ID} in
6201 debian)
6202 if [ "${USE_MOBY}" = "true" ]; then
6203 # Install engine
6204 set +e # Handle error gracefully
6205 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
6206 exit_code=$?
6207 set -e
6208
6209 if [ ${exit_code} -ne 0 ]; then
6210 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
6211 exit 1
6212 fi
6213
6214 # Install compose
6215 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6216 else
6217 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
6218 # Install compose
6219 apt-mark hold docker-ce docker-ce-cli
6220 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6221 fi
6222 ;;
6223 rhel)
6224 if [ "${USE_MOBY}" = "true" ]; then
6225 set +e # Handle error gracefully
6226 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
6227 exit_code=$?
6228 set -e
6229
6230 if [ ${exit_code} -ne 0 ]; then
6231 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
6232 exit 1
6233 fi
6234
6235 # Install compose
6236 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6237 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6238 fi
6239 else
6240 # Special handling for Azure Linux Docker CE installation
6241 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
6242 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
6243
6244 # Use rpm with --force and --nodeps for Azure Linux
6245 set +e # Don't exit on error for this section
6246 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6247 install_result=$?
6248 set -e
6249
6250 if [ $install_result -ne 0 ]; then
6251 echo "(*) Standard installation failed, trying manual installation..."
6252
6253 echo "(*) Standard installation failed, trying manual installation..."
6254
6255 # Create directory for downloading packages
6256 mkdir -p /tmp/docker-ce-install
6257
6258 # Download packages manually using curl since tdnf doesn't support download
6259 echo "(*) Downloading Docker CE packages manually..."
6260
6261 # Get the repository baseurl
6262 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
6263
6264 # Download packages directly
6265 cd /tmp/docker-ce-install
6266
6267 # Get package names with versions
6268 if [ -n "${cli_version_suffix}" ]; then
6269 docker_ce_version="${cli_version_suffix#-}"
6270 docker_cli_version="${engine_version_suffix#-}"
6271 else
6272 # Get latest version from repository
6273 docker_ce_version="latest"
6274 fi
6275
6276 echo "(*) Attempting to download Docker CE packages from repository..."
6277
6278 # Try to download latest packages if specific version fails
6279 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
6280 # Fallback: try to get latest available version
6281 echo "(*) Specific version not found, trying latest..."
6282 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6283 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6284 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
6285
6286 if [ -n "${latest_docker}" ]; then
6287 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
6288 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
6289 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
6290 else
6291 echo "(*) ERROR: Could not find Docker CE packages in repository"
6292 echo "(*) Please check repository configuration or use 'moby': true"
6293 exit 1
6294 fi
6295 fi
6296 # Install systemd libraries required by Docker CE
6297 echo "(*) Installing systemd libraries required by Docker CE..."
6298 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
6299 echo "(*) WARNING: Could not install systemd libraries"
6300 echo "(*) Docker may fail to start without these"
6301 }
6302
6303 # Install with rpm --force --nodeps
6304 echo "(*) Installing Docker CE packages with dependency override..."
6305 rpm -Uvh --force --nodeps *.rpm
6306
6307 # Cleanup
6308 cd /
6309 rm -rf /tmp/docker-ce-install
6310
6311 echo "(*) Docker CE installation completed with dependency bypass"
6312 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
6313 fi
6314 else
6315 # Standard installation for other RHEL-based systems
6316 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
6317 fi
6318 # Install compose
6319 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6320 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
6321 fi
6322 fi
6323 ;;
6324 esac
6325 fi
6326
6327 echo "Finished installing docker / moby!"
6328
6329 docker_home="/usr/libexec/docker"
6330 cli_plugins_dir="${docker_home}/cli-plugins"
6331
6332 # fallback for docker-compose
6333 fallback_compose(){
6334 local url=$1
6335 local repo_url=$(get_github_api_repo_url "$url")
6336 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6337 get_previous_version "${url}" "${repo_url}" compose_version
6338 echo -e "\nAttempting to install v${compose_version}"
6339 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
6340 }
6341
6342 # If 'docker-compose' command is to be included
6343 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
6344 case "${architecture}" in
6345 amd64|x86_64) target_compose_arch=x86_64 ;;
6346 arm64|aarch64) target_compose_arch=aarch64 ;;
6347 *)
6348 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6349 exit 1
6350 esac
6351
6352 docker_compose_path="/usr/local/bin/docker-compose"
6353 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
6354 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
6355 INSTALL_DOCKER_COMPOSE_SWITCH="false"
6356
6357 if [ "${target_compose_arch}" = "x86_64" ]; then
6358 echo "(*) Installing docker compose v1..."
6359 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
6360 chmod +x ${docker_compose_path}
6361
6362 # Download the SHA256 checksum
6363 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
6364 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6365 sha256sum -c docker-compose.sha256sum --ignore-missing
6366 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
6367 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
6368 exit 1
6369 else
6370 # Use pip to get a version that runs on this architecture
6371 check_packages python3-minimal python3-pip libffi-dev python3-venv
6372 echo "(*) Installing docker compose v1 via pip..."
6373 export PYTHONUSERBASE=/usr/local
6374 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
6375 fi
6376 else
6377 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
6378 docker_compose_url="https://github.com/docker/compose"
6379 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
6380 echo "(*) Installing docker-compose ${compose_version}..."
6381 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
6382 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
6383 fallback_compose "$docker_compose_url"
6384 }
6385
6386 chmod +x ${docker_compose_path}
6387
6388 # Download the SHA256 checksum
6389 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
6390 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
6391 sha256sum -c docker-compose.sha256sum --ignore-missing
6392
6393 mkdir -p ${cli_plugins_dir}
6394 cp ${docker_compose_path} ${cli_plugins_dir}
6395 fi
6396 fi
6397
6398 # fallback method for compose-switch
6399 fallback_compose-switch() {
6400 local url=$1
6401 local repo_url=$(get_github_api_repo_url "$url")
6402 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
6403 get_previous_version "$url" "$repo_url" compose_switch_version
6404 echo -e "\nAttempting to install v${compose_switch_version}"
6405 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
6406 }
6407 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
6408 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
6409 if type docker-compose > /dev/null 2>&1; then
6410 echo "(*) Installing compose-switch..."
6411 current_compose_path="$(command -v docker-compose)"
6412 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
6413 compose_switch_version="latest"
6414 compose_switch_url="https://github.com/docker/compose-switch"
6415 # Try to get latest version, fallback to known stable version if GitHub API fails
6416 set +e
6417 find_version_from_git_tags compose_switch_version "$compose_switch_url"
6418 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
6419 echo "(*) GitHub API rate limited or failed, using fallback method"
6420 fallback_compose-switch "$compose_switch_url"
6421 fi
6422 set -e
6423
6424 # Map architecture for compose-switch downloads
6425 case "${architecture}" in
6426 amd64|x86_64) target_switch_arch=amd64 ;;
6427 arm64|aarch64) target_switch_arch=arm64 ;;
6428 *) target_switch_arch=${architecture} ;;
6429 esac
6430 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
6431 chmod +x /usr/local/bin/compose-switch
6432 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
6433 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
6434 mv "${current_compose_path}" "${target_compose_path}"
6435 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
6436 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
6437 else
6438 err "Skipping installation of compose-switch as docker compose is unavailable..."
6439 fi
6440 fi
6441
6442 # If init file already exists, exit
6443 if [ -f "/usr/local/share/docker-init.sh" ]; then
6444 echo "/usr/local/share/docker-init.sh already exists, so exiting."
6445 # Clean up
6446 rm -rf /var/lib/apt/lists/*
6447 exit 0
6448 fi
6449 echo "docker-init doesn't exist, adding..."
6450
6451 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
6452 groupadd -r docker
6453 fi
6454
6455 usermod -aG docker ${USERNAME}
6456
6457 # fallback for docker/buildx
6458 fallback_buildx() {
6459 local url=$1
6460 local repo_url=$(get_github_api_repo_url "$url")
6461 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
6462 get_previous_version "$url" "$repo_url" buildx_version
6463 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6464 echo -e "\nAttempting to install v${buildx_version}"
6465 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
6466 }
6467
6468 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
6469 buildx_version="latest"
6470 docker_buildx_url="https://github.com/docker/buildx"
6471 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
6472 echo "(*) Installing buildx ${buildx_version}..."
6473
6474 # Map architecture for buildx downloads
6475 case "${architecture}" in
6476 amd64|x86_64) target_buildx_arch=amd64 ;;
6477 arm64|aarch64) target_buildx_arch=arm64 ;;
6478 *) target_buildx_arch=${architecture} ;;
6479 esac
6480
6481 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
6482
6483 cd /tmp
6484 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
6485
6486 docker_home="/usr/libexec/docker"
6487 cli_plugins_dir="${docker_home}/cli-plugins"
6488
6489 mkdir -p ${cli_plugins_dir}
6490 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
6491 chmod +x ${cli_plugins_dir}/docker-buildx
6492
6493 chown -R "${USERNAME}:docker" "${docker_home}"
6494 chmod -R g+r+w "${docker_home}"
6495 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
6496 fi
6497
6498 DOCKER_DEFAULT_IP6_TABLES=""
6499 if [ "$DISABLE_IP6_TABLES" == true ]; then
6500 requested_version=""
6501 # checking whether the version requested either is in semver format or just a number denoting the major version
6502 # and, extracting the major version number out of the two scenarios
6503 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
6504 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
6505 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
6506 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
6507 requested_version=$DOCKER_VERSION
6508 fi
6509 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
6510 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
6511 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
6512 fi
6513 fi
6514
6515 if [ ! -d /usr/local/share ]; then
6516 mkdir -p /usr/local/share
6517 fi
6518
6519 tee /usr/local/share/docker-init.sh > /dev/null \
6520 << EOF
6521 #!/bin/sh
6522 #-------------------------------------------------------------------------------------------------------------
6523 # Copyright (c) Microsoft Corporation. All rights reserved.
6524 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6525 #-------------------------------------------------------------------------------------------------------------
6526
6527 set -e
6528
6529 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
6530 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
6531 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
6532 EOF
6533
6534 tee -a /usr/local/share/docker-init.sh > /dev/null \
6535 << 'EOF'
6536 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
6537 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
6538 find /run /var/run -iname 'docker*.pid' -delete || :
6539 find /run /var/run -iname 'container*.pid' -delete || :
6540
6541 # -- Start: dind wrapper script --
6542 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
6543
6544 export container=docker
6545
6546 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
6547 mount -t securityfs none /sys/kernel/security || {
6548 echo >&2 'Could not mount /sys/kernel/security.'
6549 echo >&2 'AppArmor detection and --privileged mode might break.'
6550 }
6551 fi
6552
6553 # Mount /tmp (conditionally)
6554 if ! mountpoint -q /tmp; then
6555 mount -t tmpfs none /tmp
6556 fi
6557
6558 set_cgroup_nesting()
6559 {
6560 # cgroup v2: enable nesting
6561 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
6562 # move the processes from the root group to the /init group,
6563 # otherwise writing subtree_control fails with EBUSY.
6564 # An error during moving non-existent process (i.e., "cat") is ignored.
6565 mkdir -p /sys/fs/cgroup/init
6566 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
6567 # enable controllers
6568 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
6569 > /sys/fs/cgroup/cgroup.subtree_control
6570 fi
6571 }
6572
6573 # Set cgroup nesting, retrying if necessary
6574 retry_cgroup_nesting=0
6575
6576 until [ "${retry_cgroup_nesting}" -eq "5" ];
6577 do
6578 set +e
6579 set_cgroup_nesting
6580
6581 if [ $? -ne 0 ]; then
6582 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
6583 else
6584 break
6585 fi
6586
6587 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
6588 set -e
6589 done
6590
6591 # -- End: dind wrapper script --
6592
6593 # Handle DNS
6594 set +e
6595 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
6596 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
6597 then
6598 echo "Setting dockerd Azure DNS."
6599 CUSTOMDNS="--dns 168.63.129.16"
6600 else
6601 echo "Not setting dockerd DNS manually."
6602 CUSTOMDNS=""
6603 fi
6604 set -e
6605
6606 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
6607 then
6608 DEFAULT_ADDRESS_POOL=""
6609 else
6610 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
6611 fi
6612
6613 # Start docker/moby engine
6614 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
6615 INNEREOF
6616 )"
6617
6618 sudo_if() {
6619 COMMAND="$*"
6620
6621 if [ "$(id -u)" -ne 0 ]; then
6622 sudo $COMMAND
6623 else
6624 $COMMAND
6625 fi
6626 }
6627
6628 retry_docker_start_count=0
6629 docker_ok="false"
6630
6631 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
6632 do
6633 # Start using sudo if not invoked as root
6634 if [ "$(id -u)" -ne 0 ]; then
6635 sudo /bin/sh -c "${dockerd_start}"
6636 else
6637 eval "${dockerd_start}"
6638 fi
6639
6640 retry_count=0
6641 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
6642 do
6643 sleep 1s
6644 set +e
6645 docker info > /dev/null 2>&1 && docker_ok="true"
6646 set -e
6647
6648 retry_count=`expr $retry_count + 1`
6649 done
6650
6651 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
6652 echo "(*) Failed to start docker, retrying..."
6653 set +e
6654 sudo_if pkill dockerd
6655 sudo_if pkill containerd
6656 set -e
6657 fi
6658
6659 retry_docker_start_count=`expr $retry_docker_start_count + 1`
6660 done
6661
6662 # Execute whatever commands were passed in (if any). This allows us
6663 # to set this script to ENTRYPOINT while still executing the default CMD.
6664 exec "$@"
6665 EOF
6666
6667 chmod +x /usr/local/share/docker-init.sh
6668 chown ${USERNAME}:root /usr/local/share/docker-init.sh
6669
6670 # Clean up
6671 rm -rf /var/lib/apt/lists/*
6672
6673 echo 'docker-in-docker-debian script has completed!'"#),
6674 ]).await;
6675
6676 return Ok(http::Response::builder()
6677 .status(200)
6678 .body(AsyncBody::from(response))
6679 .unwrap());
6680 }
6681 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
6682 let response = r#"
6683 {
6684 "schemaVersion": 2,
6685 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6686 "config": {
6687 "mediaType": "application/vnd.devcontainers",
6688 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6689 "size": 2
6690 },
6691 "layers": [
6692 {
6693 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6694 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
6695 "size": 20992,
6696 "annotations": {
6697 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
6698 }
6699 }
6700 ],
6701 "annotations": {
6702 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6703 "com.github.package.type": "devcontainer_feature"
6704 }
6705 }
6706 "#;
6707
6708 return Ok(http::Response::builder()
6709 .status(200)
6710 .body(http_client::AsyncBody::from(response))
6711 .unwrap());
6712 }
6713 if parts.uri.path()
6714 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
6715 {
6716 let response = build_tarball(vec![
6717 ("./devcontainer-feature.json", r#"
6718 {
6719 "id": "go",
6720 "version": "1.3.3",
6721 "name": "Go",
6722 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
6723 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
6724 "options": {
6725 "version": {
6726 "type": "string",
6727 "proposals": [
6728 "latest",
6729 "none",
6730 "1.24",
6731 "1.23"
6732 ],
6733 "default": "latest",
6734 "description": "Select or enter a Go version to install"
6735 },
6736 "golangciLintVersion": {
6737 "type": "string",
6738 "default": "latest",
6739 "description": "Version of golangci-lint to install"
6740 }
6741 },
6742 "init": true,
6743 "customizations": {
6744 "vscode": {
6745 "extensions": [
6746 "golang.Go"
6747 ],
6748 "settings": {
6749 "github.copilot.chat.codeGeneration.instructions": [
6750 {
6751 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
6752 }
6753 ]
6754 }
6755 }
6756 },
6757 "containerEnv": {
6758 "GOROOT": "/usr/local/go",
6759 "GOPATH": "/go",
6760 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
6761 },
6762 "capAdd": [
6763 "SYS_PTRACE"
6764 ],
6765 "securityOpt": [
6766 "seccomp=unconfined"
6767 ],
6768 "installsAfter": [
6769 "ghcr.io/devcontainers/features/common-utils"
6770 ]
6771 }
6772 "#),
6773 ("./install.sh", r#"
6774 #!/usr/bin/env bash
6775 #-------------------------------------------------------------------------------------------------------------
6776 # Copyright (c) Microsoft Corporation. All rights reserved.
6777 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
6778 #-------------------------------------------------------------------------------------------------------------
6779 #
6780 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
6781 # Maintainer: The VS Code and Codespaces Teams
6782
6783 TARGET_GO_VERSION="${VERSION:-"latest"}"
6784 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
6785
6786 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
6787 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
6788 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
6789 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
6790
6791 # https://www.google.com/linuxrepositories/
6792 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
6793
6794 set -e
6795
6796 if [ "$(id -u)" -ne 0 ]; then
6797 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6798 exit 1
6799 fi
6800
6801 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
6802 . /etc/os-release
6803 # Get an adjusted ID independent of distro variants
6804 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
6805 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
6806 ADJUSTED_ID="debian"
6807 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
6808 ADJUSTED_ID="rhel"
6809 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
6810 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
6811 else
6812 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
6813 fi
6814 else
6815 echo "Linux distro ${ID} not supported."
6816 exit 1
6817 fi
6818
6819 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
6820 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
6821 # Update the repo files to reference vault.centos.org.
6822 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
6823 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
6824 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
6825 fi
6826
6827 # Setup INSTALL_CMD & PKG_MGR_CMD
6828 if type apt-get > /dev/null 2>&1; then
6829 PKG_MGR_CMD=apt-get
6830 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
6831 elif type microdnf > /dev/null 2>&1; then
6832 PKG_MGR_CMD=microdnf
6833 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6834 elif type dnf > /dev/null 2>&1; then
6835 PKG_MGR_CMD=dnf
6836 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
6837 else
6838 PKG_MGR_CMD=yum
6839 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
6840 fi
6841
6842 # Clean up
6843 clean_up() {
6844 case ${ADJUSTED_ID} in
6845 debian)
6846 rm -rf /var/lib/apt/lists/*
6847 ;;
6848 rhel)
6849 rm -rf /var/cache/dnf/* /var/cache/yum/*
6850 rm -rf /tmp/yum.log
6851 rm -rf ${GPG_INSTALL_PATH}
6852 ;;
6853 esac
6854 }
6855 clean_up
6856
6857
6858 # Figure out correct version of a three part version number is not passed
6859 find_version_from_git_tags() {
6860 local variable_name=$1
6861 local requested_version=${!variable_name}
6862 if [ "${requested_version}" = "none" ]; then return; fi
6863 local repository=$2
6864 local prefix=${3:-"tags/v"}
6865 local separator=${4:-"."}
6866 local last_part_optional=${5:-"false"}
6867 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
6868 local escaped_separator=${separator//./\\.}
6869 local last_part
6870 if [ "${last_part_optional}" = "true" ]; then
6871 last_part="(${escaped_separator}[0-9]+)?"
6872 else
6873 last_part="${escaped_separator}[0-9]+"
6874 fi
6875 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
6876 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
6877 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
6878 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
6879 else
6880 set +e
6881 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
6882 set -e
6883 fi
6884 fi
6885 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
6886 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
6887 exit 1
6888 fi
6889 echo "${variable_name}=${!variable_name}"
6890 }
6891
6892 pkg_mgr_update() {
6893 case $ADJUSTED_ID in
6894 debian)
6895 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6896 echo "Running apt-get update..."
6897 ${PKG_MGR_CMD} update -y
6898 fi
6899 ;;
6900 rhel)
6901 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6902 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6903 echo "Running ${PKG_MGR_CMD} makecache ..."
6904 ${PKG_MGR_CMD} makecache
6905 fi
6906 else
6907 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6908 echo "Running ${PKG_MGR_CMD} check-update ..."
6909 set +e
6910 ${PKG_MGR_CMD} check-update
6911 rc=$?
6912 if [ $rc != 0 ] && [ $rc != 100 ]; then
6913 exit 1
6914 fi
6915 set -e
6916 fi
6917 fi
6918 ;;
6919 esac
6920 }
6921
6922 # Checks if packages are installed and installs them if not
6923 check_packages() {
6924 case ${ADJUSTED_ID} in
6925 debian)
6926 if ! dpkg -s "$@" > /dev/null 2>&1; then
6927 pkg_mgr_update
6928 ${INSTALL_CMD} "$@"
6929 fi
6930 ;;
6931 rhel)
6932 if ! rpm -q "$@" > /dev/null 2>&1; then
6933 pkg_mgr_update
6934 ${INSTALL_CMD} "$@"
6935 fi
6936 ;;
6937 esac
6938 }
6939
6940 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6941 rm -f /etc/profile.d/00-restore-env.sh
6942 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6943 chmod +x /etc/profile.d/00-restore-env.sh
6944
6945 # Some distributions do not install awk by default (e.g. Mariner)
6946 if ! type awk >/dev/null 2>&1; then
6947 check_packages awk
6948 fi
6949
6950 # Determine the appropriate non-root user
6951 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6952 USERNAME=""
6953 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6954 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6955 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6956 USERNAME=${CURRENT_USER}
6957 break
6958 fi
6959 done
6960 if [ "${USERNAME}" = "" ]; then
6961 USERNAME=root
6962 fi
6963 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6964 USERNAME=root
6965 fi
6966
6967 export DEBIAN_FRONTEND=noninteractive
6968
6969 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6970
6971 if [ $ADJUSTED_ID = "debian" ]; then
6972 check_packages g++ libc6-dev
6973 else
6974 check_packages gcc-c++ glibc-devel
6975 fi
6976 # Install curl, git, other dependencies if missing
6977 if ! type curl > /dev/null 2>&1; then
6978 check_packages curl
6979 fi
6980 if ! type git > /dev/null 2>&1; then
6981 check_packages git
6982 fi
6983 # Some systems, e.g. Mariner, still a few more packages
6984 if ! type as > /dev/null 2>&1; then
6985 check_packages binutils
6986 fi
6987 if ! [ -f /usr/include/linux/errno.h ]; then
6988 check_packages kernel-headers
6989 fi
6990 # Minimal RHEL install may need findutils installed
6991 if ! [ -f /usr/bin/find ]; then
6992 check_packages findutils
6993 fi
6994
6995 # Get closest match for version number specified
6996 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6997
6998 architecture="$(uname -m)"
6999 case $architecture in
7000 x86_64) architecture="amd64";;
7001 aarch64 | armv8*) architecture="arm64";;
7002 aarch32 | armv7* | armvhf*) architecture="armv6l";;
7003 i?86) architecture="386";;
7004 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
7005 esac
7006
7007 # Install Go
7008 umask 0002
7009 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
7010 groupadd -r golang
7011 fi
7012 usermod -a -G golang "${USERNAME}"
7013 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7014
7015 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
7016 # Use a temporary location for gpg keys to avoid polluting image
7017 export GNUPGHOME="/tmp/tmp-gnupg"
7018 mkdir -p ${GNUPGHOME}
7019 chmod 700 ${GNUPGHOME}
7020 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
7021 gpg -q --import /tmp/tmp-gnupg/golang_key
7022 echo "Downloading Go ${TARGET_GO_VERSION}..."
7023 set +e
7024 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7025 exit_code=$?
7026 set -e
7027 if [ "$exit_code" != "0" ]; then
7028 echo "(!) Download failed."
7029 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
7030 set +e
7031 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
7032 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
7033 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
7034 # Handle Go's odd version pattern where "0" releases omit the last part
7035 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
7036 ((minor=minor-1))
7037 TARGET_GO_VERSION="${major}.${minor}"
7038 # Look for latest version from previous minor release
7039 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
7040 else
7041 ((breakfix=breakfix-1))
7042 if [ "${breakfix}" = "0" ]; then
7043 TARGET_GO_VERSION="${major}.${minor}"
7044 else
7045 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
7046 fi
7047 fi
7048 set -e
7049 echo "Trying ${TARGET_GO_VERSION}..."
7050 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
7051 fi
7052 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
7053 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
7054 echo "Extracting Go ${TARGET_GO_VERSION}..."
7055 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
7056 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
7057 else
7058 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
7059 fi
7060
7061 # Install Go tools that are isImportant && !replacedByGopls based on
7062 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
7063 GO_TOOLS="\
7064 golang.org/x/tools/gopls@latest \
7065 honnef.co/go/tools/cmd/staticcheck@latest \
7066 golang.org/x/lint/golint@latest \
7067 github.com/mgechev/revive@latest \
7068 github.com/go-delve/delve/cmd/dlv@latest \
7069 github.com/fatih/gomodifytags@latest \
7070 github.com/haya14busa/goplay/cmd/goplay@latest \
7071 github.com/cweill/gotests/gotests@latest \
7072 github.com/josharian/impl@latest"
7073
7074 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
7075 echo "Installing common Go tools..."
7076 export PATH=${TARGET_GOROOT}/bin:${PATH}
7077 export GOPATH=/tmp/gotools
7078 export GOCACHE="${GOPATH}/cache"
7079
7080 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
7081 cd "${GOPATH}"
7082
7083 # Use go get for versions of go under 1.16
7084 go_install_command=install
7085 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
7086 export GO111MODULE=on
7087 go_install_command=get
7088 echo "Go version < 1.16, using go get."
7089 fi
7090
7091 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
7092
7093 # Move Go tools into path
7094 if [ -d "${GOPATH}/bin" ]; then
7095 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
7096 fi
7097
7098 # Install golangci-lint from precompiled binaries
7099 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
7100 echo "Installing golangci-lint latest..."
7101 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7102 sh -s -- -b "${TARGET_GOPATH}/bin"
7103 else
7104 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
7105 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
7106 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
7107 fi
7108
7109 # Remove Go tools temp directory
7110 rm -rf "${GOPATH}"
7111 fi
7112
7113
7114 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7115 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
7116 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
7117 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
7118
7119 # Clean up
7120 clean_up
7121
7122 echo "Done!"
7123 "#),
7124 ])
7125 .await;
7126 return Ok(http::Response::builder()
7127 .status(200)
7128 .body(AsyncBody::from(response))
7129 .unwrap());
7130 }
7131 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
7132 let response = r#"
7133 {
7134 "schemaVersion": 2,
7135 "mediaType": "application/vnd.oci.image.manifest.v1+json",
7136 "config": {
7137 "mediaType": "application/vnd.devcontainers",
7138 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
7139 "size": 2
7140 },
7141 "layers": [
7142 {
7143 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
7144 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
7145 "size": 19968,
7146 "annotations": {
7147 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
7148 }
7149 }
7150 ],
7151 "annotations": {
7152 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
7153 "com.github.package.type": "devcontainer_feature"
7154 }
7155 }"#;
7156 return Ok(http::Response::builder()
7157 .status(200)
7158 .body(AsyncBody::from(response))
7159 .unwrap());
7160 }
7161 if parts.uri.path()
7162 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
7163 {
7164 let response = build_tarball(vec![
7165 (
7166 "./devcontainer-feature.json",
7167 r#"
7168{
7169 "id": "aws-cli",
7170 "version": "1.1.3",
7171 "name": "AWS CLI",
7172 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
7173 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
7174 "options": {
7175 "version": {
7176 "type": "string",
7177 "proposals": [
7178 "latest"
7179 ],
7180 "default": "latest",
7181 "description": "Select or enter an AWS CLI version."
7182 },
7183 "verbose": {
7184 "type": "boolean",
7185 "default": true,
7186 "description": "Suppress verbose output."
7187 }
7188 },
7189 "customizations": {
7190 "vscode": {
7191 "extensions": [
7192 "AmazonWebServices.aws-toolkit-vscode"
7193 ],
7194 "settings": {
7195 "github.copilot.chat.codeGeneration.instructions": [
7196 {
7197 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
7198 }
7199 ]
7200 }
7201 }
7202 },
7203 "installsAfter": [
7204 "ghcr.io/devcontainers/features/common-utils"
7205 ]
7206}
7207 "#,
7208 ),
7209 (
7210 "./install.sh",
7211 r#"#!/usr/bin/env bash
7212 #-------------------------------------------------------------------------------------------------------------
7213 # Copyright (c) Microsoft Corporation. All rights reserved.
7214 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7215 #-------------------------------------------------------------------------------------------------------------
7216 #
7217 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
7218 # Maintainer: The VS Code and Codespaces Teams
7219
7220 set -e
7221
7222 # Clean up
7223 rm -rf /var/lib/apt/lists/*
7224
7225 VERSION=${VERSION:-"latest"}
7226 VERBOSE=${VERBOSE:-"true"}
7227
7228 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
7229 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
7230
7231 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
7232 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
7233 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
7234 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
7235 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
7236 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
7237 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
7238 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
7239 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
7240 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
7241 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
7242 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
7243 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
7244 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
7245 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
7246 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
7247 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
7248 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
7249 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
7250 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
7251 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
7252 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
7253 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
7254 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
7255 YLZATHZKTJyiqA==
7256 =vYOk
7257 -----END PGP PUBLIC KEY BLOCK-----"
7258
7259 if [ "$(id -u)" -ne 0 ]; then
7260 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
7261 exit 1
7262 fi
7263
7264 apt_get_update()
7265 {
7266 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
7267 echo "Running apt-get update..."
7268 apt-get update -y
7269 fi
7270 }
7271
7272 # Checks if packages are installed and installs them if not
7273 check_packages() {
7274 if ! dpkg -s "$@" > /dev/null 2>&1; then
7275 apt_get_update
7276 apt-get -y install --no-install-recommends "$@"
7277 fi
7278 }
7279
7280 export DEBIAN_FRONTEND=noninteractive
7281
7282 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
7283
7284 verify_aws_cli_gpg_signature() {
7285 local filePath=$1
7286 local sigFilePath=$2
7287 local awsGpgKeyring=aws-cli-public-key.gpg
7288
7289 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
7290 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
7291 local status=$?
7292
7293 rm "./${awsGpgKeyring}"
7294
7295 return ${status}
7296 }
7297
7298 install() {
7299 local scriptZipFile=awscli.zip
7300 local scriptSigFile=awscli.sig
7301
7302 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
7303 if [ "${VERSION}" != "latest" ]; then
7304 local versionStr=-${VERSION}
7305 fi
7306 architecture=$(dpkg --print-architecture)
7307 case "${architecture}" in
7308 amd64) architectureStr=x86_64 ;;
7309 arm64) architectureStr=aarch64 ;;
7310 *)
7311 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
7312 exit 1
7313 esac
7314 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
7315 curl "${scriptUrl}" -o "${scriptZipFile}"
7316 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
7317
7318 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
7319 if (( $? > 0 )); then
7320 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
7321 exit 1
7322 fi
7323
7324 if [ "${VERBOSE}" = "false" ]; then
7325 unzip -q "${scriptZipFile}"
7326 else
7327 unzip "${scriptZipFile}"
7328 fi
7329
7330 ./aws/install
7331
7332 # kubectl bash completion
7333 mkdir -p /etc/bash_completion.d
7334 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
7335
7336 # kubectl zsh completion
7337 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
7338 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
7339 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
7340 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
7341 fi
7342
7343 rm -rf ./aws
7344 }
7345
7346 echo "(*) Installing AWS CLI..."
7347
7348 install
7349
7350 # Clean up
7351 rm -rf /var/lib/apt/lists/*
7352
7353 echo "Done!""#,
7354 ),
7355 ("./scripts/", r#""#),
7356 (
7357 "./scripts/fetch-latest-completer-scripts.sh",
7358 r#"
7359 #!/bin/bash
7360 #-------------------------------------------------------------------------------------------------------------
7361 # Copyright (c) Microsoft Corporation. All rights reserved.
7362 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
7363 #-------------------------------------------------------------------------------------------------------------
7364 #
7365 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
7366 # Maintainer: The Dev Container spec maintainers
7367 #
7368 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
7369 #
7370 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
7371 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
7372 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
7373
7374 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
7375 chmod +x "$BASH_COMPLETER_SCRIPT"
7376
7377 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
7378 chmod +x "$ZSH_COMPLETER_SCRIPT"
7379 "#,
7380 ),
7381 ("./scripts/vendor/", r#""#),
7382 (
7383 "./scripts/vendor/aws_bash_completer",
7384 r#"
7385 # Typically that would be added under one of the following paths:
7386 # - /etc/bash_completion.d
7387 # - /usr/local/etc/bash_completion.d
7388 # - /usr/share/bash-completion/completions
7389
7390 complete -C aws_completer aws
7391 "#,
7392 ),
7393 (
7394 "./scripts/vendor/aws_zsh_completer.sh",
7395 r#"
7396 # Source this file to activate auto completion for zsh using the bash
7397 # compatibility helper. Make sure to run `compinit` before, which should be
7398 # given usually.
7399 #
7400 # % source /path/to/zsh_complete.sh
7401 #
7402 # Typically that would be called somewhere in your .zshrc.
7403 #
7404 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
7405 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7406 #
7407 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
7408 #
7409 # zsh releases prior to that version do not export the required env variables!
7410
7411 autoload -Uz bashcompinit
7412 bashcompinit -i
7413
7414 _bash_complete() {
7415 local ret=1
7416 local -a suf matches
7417 local -x COMP_POINT COMP_CWORD
7418 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
7419 local -x COMP_LINE="$words"
7420 local -A savejobstates savejobtexts
7421
7422 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
7423 (( COMP_CWORD = CURRENT - 1))
7424 COMP_WORDS=( $words )
7425 BASH_VERSINFO=( 2 05b 0 1 release )
7426
7427 savejobstates=( ${(kv)jobstates} )
7428 savejobtexts=( ${(kv)jobtexts} )
7429
7430 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
7431
7432 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
7433
7434 if [[ -n $matches ]]; then
7435 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
7436 compset -P '*/' && matches=( ${matches##*/} )
7437 compset -S '/*' && matches=( ${matches%%/*} )
7438 compadd -Q -f "${suf[@]}" -a matches && ret=0
7439 else
7440 compadd -Q "${suf[@]}" -a matches && ret=0
7441 fi
7442 fi
7443
7444 if (( ret )); then
7445 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
7446 _default "${suf[@]}" && ret=0
7447 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
7448 _directories "${suf[@]}" && ret=0
7449 fi
7450 fi
7451
7452 return ret
7453 }
7454
7455 complete -C aws_completer aws
7456 "#,
7457 ),
7458 ]).await;
7459
7460 return Ok(http::Response::builder()
7461 .status(200)
7462 .body(AsyncBody::from(response))
7463 .unwrap());
7464 }
7465
7466 Ok(http::Response::builder()
7467 .status(404)
7468 .body(http_client::AsyncBody::default())
7469 .unwrap())
7470 })
7471 }
7472}