1use std::{
2 collections::HashMap,
3 fmt::Debug,
4 hash::{DefaultHasher, Hash, Hasher},
5 path::{Path, PathBuf},
6 sync::Arc,
7};
8
9use fs::Fs;
10use http_client::HttpClient;
11use util::{ResultExt, command::Command};
12
13use crate::{
14 DevContainerConfig, DevContainerContext,
15 command_json::{CommandRunner, DefaultCommandRunner},
16 devcontainer_api::{DevContainerError, DevContainerUp},
17 devcontainer_json::{
18 DevContainer, DevContainerBuildType, FeatureOptions, ForwardPort, MountDefinition,
19 deserialize_devcontainer_json,
20 },
21 docker::{
22 Docker, DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
23 DockerComposeVolume, DockerInspect, DockerPs, get_remote_dir_from_config,
24 },
25 features::{DevContainerFeatureJson, FeatureManifest, parse_oci_feature_ref},
26 get_oci_token,
27 oci::{TokenResponse, download_oci_tarball, get_oci_manifest},
28 safe_id_lower,
29};
30
31enum ConfigStatus {
32 Deserialized(DevContainer),
33 VariableParsed(DevContainer),
34}
35
36#[derive(Debug, Clone, Eq, PartialEq, Default)]
37pub(crate) struct DockerComposeResources {
38 files: Vec<PathBuf>,
39 config: DockerComposeConfig,
40}
41
42struct DevContainerManifest {
43 http_client: Arc<dyn HttpClient>,
44 fs: Arc<dyn Fs>,
45 docker_client: Arc<dyn DockerClient>,
46 command_runner: Arc<dyn CommandRunner>,
47 raw_config: String,
48 config: ConfigStatus,
49 local_environment: HashMap<String, String>,
50 local_project_directory: PathBuf,
51 config_directory: PathBuf,
52 file_name: String,
53 root_image: Option<DockerInspect>,
54 features_build_info: Option<FeaturesBuildInfo>,
55 features: Vec<FeatureManifest>,
56}
57const DEFAULT_REMOTE_PROJECT_DIR: &str = "/workspaces/";
58impl DevContainerManifest {
59 async fn new(
60 context: &DevContainerContext,
61 environment: HashMap<String, String>,
62 docker_client: Arc<dyn DockerClient>,
63 command_runner: Arc<dyn CommandRunner>,
64 local_config: DevContainerConfig,
65 local_project_path: &Path,
66 ) -> Result<Self, DevContainerError> {
67 let config_path = local_project_path.join(local_config.config_path.clone());
68 log::debug!("parsing devcontainer json found in {:?}", &config_path);
69 let devcontainer_contents = context.fs.load(&config_path).await.map_err(|e| {
70 log::error!("Unable to read devcontainer contents: {e}");
71 DevContainerError::DevContainerParseFailed
72 })?;
73
74 let devcontainer = deserialize_devcontainer_json(&devcontainer_contents)?;
75
76 let devcontainer_directory = config_path.parent().ok_or_else(|| {
77 log::error!("Dev container file should be in a directory");
78 DevContainerError::NotInValidProject
79 })?;
80 let file_name = config_path
81 .file_name()
82 .and_then(|f| f.to_str())
83 .ok_or_else(|| {
84 log::error!("Dev container file has no file name, or is invalid unicode");
85 DevContainerError::DevContainerParseFailed
86 })?;
87
88 Ok(Self {
89 fs: context.fs.clone(),
90 http_client: context.http_client.clone(),
91 docker_client,
92 command_runner,
93 raw_config: devcontainer_contents,
94 config: ConfigStatus::Deserialized(devcontainer),
95 local_project_directory: local_project_path.to_path_buf(),
96 local_environment: environment,
97 config_directory: devcontainer_directory.to_path_buf(),
98 file_name: file_name.to_string(),
99 root_image: None,
100 features_build_info: None,
101 features: Vec::new(),
102 })
103 }
104
105 fn devcontainer_id(&self) -> String {
106 let mut labels = self.identifying_labels();
107 labels.sort_by_key(|(key, _)| *key);
108
109 let mut hasher = DefaultHasher::new();
110 for (key, value) in &labels {
111 key.hash(&mut hasher);
112 value.hash(&mut hasher);
113 }
114
115 format!("{:016x}", hasher.finish())
116 }
117
118 fn identifying_labels(&self) -> Vec<(&str, String)> {
119 let labels = vec![
120 (
121 "devcontainer.local_folder",
122 (self.local_project_directory.display()).to_string(),
123 ),
124 (
125 "devcontainer.config_file",
126 (self.config_file().display()).to_string(),
127 ),
128 ];
129 labels
130 }
131
132 fn parse_nonremote_vars_for_content(&self, content: &str) -> Result<String, DevContainerError> {
133 let mut replaced_content = content
134 .replace("${devcontainerId}", &self.devcontainer_id())
135 .replace(
136 "${containerWorkspaceFolderBasename}",
137 &self.remote_workspace_base_name().unwrap_or_default(),
138 )
139 .replace(
140 "${localWorkspaceFolderBasename}",
141 &self.local_workspace_base_name()?,
142 )
143 .replace(
144 "${containerWorkspaceFolder}",
145 &self
146 .remote_workspace_folder()
147 .map(|path| path.display().to_string())
148 .unwrap_or_default()
149 .replace('\\', "/"),
150 )
151 .replace(
152 "${localWorkspaceFolder}",
153 &self.local_workspace_folder().replace('\\', "/"),
154 );
155 for (k, v) in &self.local_environment {
156 let find = format!("${{localEnv:{k}}}");
157 replaced_content = replaced_content.replace(&find, &v.replace('\\', "/"));
158 }
159
160 Ok(replaced_content)
161 }
162
163 fn parse_nonremote_vars(&mut self) -> Result<(), DevContainerError> {
164 let replaced_content = self.parse_nonremote_vars_for_content(&self.raw_config)?;
165 let parsed_config = deserialize_devcontainer_json(&replaced_content)?;
166
167 self.config = ConfigStatus::VariableParsed(parsed_config);
168
169 Ok(())
170 }
171
172 fn runtime_remote_env(
173 &self,
174 container_env: &HashMap<String, String>,
175 ) -> Result<HashMap<String, String>, DevContainerError> {
176 let mut merged_remote_env = container_env.clone();
177 // HOME is user-specific, and we will often not run as the image user
178 merged_remote_env.remove("HOME");
179 if let Some(remote_env) = self.dev_container().remote_env.clone() {
180 let mut raw = serde_json_lenient::to_string(&remote_env).map_err(|e| {
181 log::error!(
182 "Unexpected error serializing dev container remote_env: {e} - {:?}",
183 remote_env
184 );
185 DevContainerError::DevContainerParseFailed
186 })?;
187 for (k, v) in container_env {
188 raw = raw.replace(&format!("${{containerEnv:{k}}}"), v);
189 }
190 let reserialized: HashMap<String, String> = serde_json_lenient::from_str(&raw)
191 .map_err(|e| {
192 log::error!(
193 "Unexpected error reserializing dev container remote env: {e} - {:?}",
194 &raw
195 );
196 DevContainerError::DevContainerParseFailed
197 })?;
198 for (k, v) in reserialized {
199 merged_remote_env.insert(k, v);
200 }
201 }
202 Ok(merged_remote_env)
203 }
204
205 fn config_file(&self) -> PathBuf {
206 self.config_directory.join(&self.file_name)
207 }
208
209 fn dev_container(&self) -> &DevContainer {
210 match &self.config {
211 ConfigStatus::Deserialized(dev_container) => dev_container,
212 ConfigStatus::VariableParsed(dev_container) => dev_container,
213 }
214 }
215
216 async fn dockerfile_location(&self) -> Option<PathBuf> {
217 let dev_container = self.dev_container();
218 match dev_container.build_type() {
219 DevContainerBuildType::Image => None,
220 DevContainerBuildType::Dockerfile => dev_container
221 .build
222 .as_ref()
223 .map(|build| self.config_directory.join(&build.dockerfile)),
224 DevContainerBuildType::DockerCompose => {
225 let Ok(docker_compose_manifest) = self.docker_compose_manifest().await else {
226 return None;
227 };
228 let Ok((_, main_service)) = find_primary_service(&docker_compose_manifest, self)
229 else {
230 return None;
231 };
232 main_service
233 .build
234 .and_then(|b| b.dockerfile)
235 .map(|dockerfile| self.config_directory.join(dockerfile))
236 }
237 DevContainerBuildType::None => None,
238 }
239 }
240
241 fn generate_features_image_tag(&self, dockerfile_build_path: String) -> String {
242 let mut hasher = DefaultHasher::new();
243 let prefix = match &self.dev_container().name {
244 Some(name) => &safe_id_lower(name),
245 None => "zed-dc",
246 };
247 let prefix = prefix.get(..6).unwrap_or(prefix);
248
249 dockerfile_build_path.hash(&mut hasher);
250
251 let hash = hasher.finish();
252 format!("{}-{:x}-features", prefix, hash)
253 }
254
255 /// Gets the base image from the devcontainer with the following precedence:
256 /// - The devcontainer image if an image is specified
257 /// - The image sourced in the Dockerfile if a Dockerfile is specified
258 /// - The image sourced in the docker-compose main service, if one is specified
259 /// - The image sourced in the docker-compose main service dockerfile, if one is specified
260 /// If no such image is available, return an error
261 async fn get_base_image_from_config(&self) -> Result<String, DevContainerError> {
262 if let Some(image) = &self.dev_container().image {
263 return Ok(image.to_string());
264 }
265 if let Some(dockerfile) = self.dev_container().build.as_ref().map(|b| &b.dockerfile) {
266 let dockerfile_contents = self
267 .fs
268 .load(&self.config_directory.join(dockerfile))
269 .await
270 .map_err(|e| {
271 log::error!("Error reading dockerfile: {e}");
272 DevContainerError::DevContainerParseFailed
273 })?;
274 return image_from_dockerfile(self, dockerfile_contents);
275 }
276 if self.dev_container().docker_compose_file.is_some() {
277 let docker_compose_manifest = self.docker_compose_manifest().await?;
278 let (_, main_service) = find_primary_service(&docker_compose_manifest, &self)?;
279
280 if let Some(dockerfile) = main_service
281 .build
282 .as_ref()
283 .and_then(|b| b.dockerfile.as_ref())
284 {
285 let dockerfile_contents = self
286 .fs
287 .load(&self.config_directory.join(dockerfile))
288 .await
289 .map_err(|e| {
290 log::error!("Error reading dockerfile: {e}");
291 DevContainerError::DevContainerParseFailed
292 })?;
293 return image_from_dockerfile(self, dockerfile_contents);
294 }
295 if let Some(image) = &main_service.image {
296 return Ok(image.to_string());
297 }
298
299 log::error!("No valid base image found in docker-compose configuration");
300 return Err(DevContainerError::DevContainerParseFailed);
301 }
302 log::error!("No valid base image found in dev container configuration");
303 Err(DevContainerError::DevContainerParseFailed)
304 }
305
306 async fn download_feature_and_dockerfile_resources(&mut self) -> Result<(), DevContainerError> {
307 let dev_container = match &self.config {
308 ConfigStatus::Deserialized(_) => {
309 log::error!(
310 "Dev container has not yet been parsed for variable expansion. Cannot yet download resources"
311 );
312 return Err(DevContainerError::DevContainerParseFailed);
313 }
314 ConfigStatus::VariableParsed(dev_container) => dev_container,
315 };
316 let root_image_tag = self.get_base_image_from_config().await?;
317 let root_image = self.docker_client.inspect(&root_image_tag).await?;
318
319 if dev_container.build_type() == DevContainerBuildType::Image
320 && !dev_container.has_features()
321 {
322 log::debug!("No resources to download. Proceeding with just the image");
323 return Ok(());
324 }
325
326 let temp_base = std::env::temp_dir().join("devcontainer-zed");
327 let timestamp = std::time::SystemTime::now()
328 .duration_since(std::time::UNIX_EPOCH)
329 .map(|d| d.as_millis())
330 .unwrap_or(0);
331
332 let features_content_dir = temp_base.join(format!("container-features-{}", timestamp));
333 let empty_context_dir = temp_base.join("empty-folder");
334
335 self.fs
336 .create_dir(&features_content_dir)
337 .await
338 .map_err(|e| {
339 log::error!("Failed to create features content dir: {e}");
340 DevContainerError::FilesystemError
341 })?;
342
343 self.fs.create_dir(&empty_context_dir).await.map_err(|e| {
344 log::error!("Failed to create empty context dir: {e}");
345 DevContainerError::FilesystemError
346 })?;
347
348 let dockerfile_path = features_content_dir.join("Dockerfile.extended");
349 let image_tag =
350 self.generate_features_image_tag(dockerfile_path.clone().display().to_string());
351
352 let build_info = FeaturesBuildInfo {
353 dockerfile_path,
354 features_content_dir,
355 empty_context_dir,
356 build_image: dev_container.image.clone(),
357 image_tag,
358 };
359
360 let features = match &dev_container.features {
361 Some(features) => features,
362 None => &HashMap::new(),
363 };
364
365 let container_user = get_container_user_from_config(&root_image, self)?;
366 let remote_user = get_remote_user_from_config(&root_image, self)?;
367
368 let builtin_env_content = format!(
369 "_CONTAINER_USER={}\n_REMOTE_USER={}\n",
370 container_user, remote_user
371 );
372
373 let builtin_env_path = build_info
374 .features_content_dir
375 .join("devcontainer-features.builtin.env");
376
377 self.fs
378 .write(&builtin_env_path, &builtin_env_content.as_bytes())
379 .await
380 .map_err(|e| {
381 log::error!("Failed to write builtin env file: {e}");
382 DevContainerError::FilesystemError
383 })?;
384
385 let ordered_features =
386 resolve_feature_order(features, &dev_container.override_feature_install_order);
387
388 for (index, (feature_ref, options)) in ordered_features.iter().enumerate() {
389 if matches!(options, FeatureOptions::Bool(false)) {
390 log::debug!(
391 "Feature '{}' is disabled (set to false), skipping",
392 feature_ref
393 );
394 continue;
395 }
396
397 let feature_id = extract_feature_id(feature_ref);
398 let consecutive_id = format!("{}_{}", feature_id, index);
399 let feature_dir = build_info.features_content_dir.join(&consecutive_id);
400
401 self.fs.create_dir(&feature_dir).await.map_err(|e| {
402 log::error!(
403 "Failed to create feature directory for {}: {e}",
404 feature_ref
405 );
406 DevContainerError::FilesystemError
407 })?;
408
409 let oci_ref = parse_oci_feature_ref(feature_ref).ok_or_else(|| {
410 log::error!(
411 "Feature '{}' is not a supported OCI feature reference",
412 feature_ref
413 );
414 DevContainerError::DevContainerParseFailed
415 })?;
416 let TokenResponse { token } =
417 get_oci_token(&oci_ref.registry, &oci_ref.path, &self.http_client)
418 .await
419 .map_err(|e| {
420 log::error!("Failed to get OCI token for feature '{}': {e}", feature_ref);
421 DevContainerError::ResourceFetchFailed
422 })?;
423 let manifest = get_oci_manifest(
424 &oci_ref.registry,
425 &oci_ref.path,
426 &token,
427 &self.http_client,
428 &oci_ref.version,
429 None,
430 )
431 .await
432 .map_err(|e| {
433 log::error!(
434 "Failed to fetch OCI manifest for feature '{}': {e}",
435 feature_ref
436 );
437 DevContainerError::ResourceFetchFailed
438 })?;
439 let digest = &manifest
440 .layers
441 .first()
442 .ok_or_else(|| {
443 log::error!(
444 "OCI manifest for feature '{}' contains no layers",
445 feature_ref
446 );
447 DevContainerError::ResourceFetchFailed
448 })?
449 .digest;
450 download_oci_tarball(
451 &token,
452 &oci_ref.registry,
453 &oci_ref.path,
454 digest,
455 "application/vnd.devcontainers.layer.v1+tar",
456 &feature_dir,
457 &self.http_client,
458 &self.fs,
459 None,
460 )
461 .await?;
462
463 let feature_json_path = &feature_dir.join("devcontainer-feature.json");
464 if !self.fs.is_file(feature_json_path).await {
465 let message = format!(
466 "No devcontainer-feature.json found in {:?}, no defaults to apply",
467 feature_json_path
468 );
469 log::error!("{}", &message);
470 return Err(DevContainerError::ResourceFetchFailed);
471 }
472
473 let contents = self.fs.load(&feature_json_path).await.map_err(|e| {
474 log::error!("error reading devcontainer-feature.json: {:?}", e);
475 DevContainerError::FilesystemError
476 })?;
477
478 let contents_parsed = self.parse_nonremote_vars_for_content(&contents)?;
479
480 let feature_json: DevContainerFeatureJson =
481 serde_json_lenient::from_str(&contents_parsed).map_err(|e| {
482 log::error!("Failed to parse devcontainer-feature.json: {e}");
483 DevContainerError::ResourceFetchFailed
484 })?;
485
486 let feature_manifest = FeatureManifest::new(consecutive_id, feature_dir, feature_json);
487
488 log::debug!("Downloaded OCI feature content for '{}'", feature_ref);
489
490 let env_content = feature_manifest
491 .write_feature_env(&self.fs, options)
492 .await?;
493
494 let wrapper_content = generate_install_wrapper(feature_ref, feature_id, &env_content)?;
495
496 self.fs
497 .write(
498 &feature_manifest
499 .file_path()
500 .join("devcontainer-features-install.sh"),
501 &wrapper_content.as_bytes(),
502 )
503 .await
504 .map_err(|e| {
505 log::error!("Failed to write install wrapper for {}: {e}", feature_ref);
506 DevContainerError::FilesystemError
507 })?;
508
509 self.features.push(feature_manifest);
510 }
511
512 // --- Phase 3: Generate extended Dockerfile from the inflated manifests ---
513
514 let is_compose = dev_container.build_type() == DevContainerBuildType::DockerCompose;
515 let use_buildkit = self.docker_client.supports_compose_buildkit() || !is_compose;
516
517 let dockerfile_base_content = if let Some(location) = &self.dockerfile_location().await {
518 self.fs.load(location).await.log_err()
519 } else {
520 None
521 };
522
523 let dockerfile_content = self.generate_dockerfile_extended(
524 &container_user,
525 &remote_user,
526 dockerfile_base_content,
527 use_buildkit,
528 );
529
530 self.fs
531 .write(&build_info.dockerfile_path, &dockerfile_content.as_bytes())
532 .await
533 .map_err(|e| {
534 log::error!("Failed to write Dockerfile.extended: {e}");
535 DevContainerError::FilesystemError
536 })?;
537
538 log::debug!(
539 "Features build resources written to {:?}",
540 build_info.features_content_dir
541 );
542
543 self.root_image = Some(root_image);
544 self.features_build_info = Some(build_info);
545
546 Ok(())
547 }
548
549 fn generate_dockerfile_extended(
550 &self,
551 container_user: &str,
552 remote_user: &str,
553 dockerfile_content: Option<String>,
554 use_buildkit: bool,
555 ) -> String {
556 #[cfg(not(target_os = "windows"))]
557 let update_remote_user_uid = self.dev_container().update_remote_user_uid.unwrap_or(true);
558 #[cfg(target_os = "windows")]
559 let update_remote_user_uid = false;
560 let feature_layers: String = self
561 .features
562 .iter()
563 .map(|manifest| {
564 manifest.generate_dockerfile_feature_layer(
565 use_buildkit,
566 FEATURES_CONTAINER_TEMP_DEST_FOLDER,
567 )
568 })
569 .collect();
570
571 let container_home_cmd = get_ent_passwd_shell_command(container_user);
572 let remote_home_cmd = get_ent_passwd_shell_command(remote_user);
573
574 let dockerfile_content = dockerfile_content
575 .map(|content| {
576 if dockerfile_alias(&content).is_some() {
577 content
578 } else {
579 dockerfile_inject_alias(&content, "dev_container_auto_added_stage_label")
580 }
581 })
582 .unwrap_or("".to_string());
583
584 let dest = FEATURES_CONTAINER_TEMP_DEST_FOLDER;
585
586 let feature_content_source_stage = if use_buildkit {
587 "".to_string()
588 } else {
589 "\nFROM dev_container_feature_content_temp as dev_containers_feature_content_source\n"
590 .to_string()
591 };
592
593 let builtin_env_source_path = if use_buildkit {
594 "./devcontainer-features.builtin.env"
595 } else {
596 "/tmp/build-features/devcontainer-features.builtin.env"
597 };
598
599 let mut extended_dockerfile = format!(
600 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
601
602{dockerfile_content}
603{feature_content_source_stage}
604FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
605USER root
606COPY --from=dev_containers_feature_content_source {builtin_env_source_path} /tmp/build-features/
607RUN chmod -R 0755 /tmp/build-features/
608
609FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
610
611USER root
612
613RUN mkdir -p {dest}
614COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ {dest}
615
616RUN \
617echo "_CONTAINER_USER_HOME=$({container_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env && \
618echo "_REMOTE_USER_HOME=$({remote_home_cmd} | cut -d: -f6)" >> {dest}/devcontainer-features.builtin.env
619
620{feature_layers}
621
622ARG _DEV_CONTAINERS_IMAGE_USER=root
623USER $_DEV_CONTAINERS_IMAGE_USER
624"#
625 );
626
627 // If we're not adding a uid update layer, then we should add env vars to this layer instead
628 if !update_remote_user_uid {
629 extended_dockerfile = format!(
630 r#"{extended_dockerfile}
631# Ensure that /etc/profile does not clobber the existing path
632RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${{PATH:-\3}}/g' /etc/profile || true
633"#
634 );
635
636 for feature in &self.features {
637 let container_env_layer = feature.generate_dockerfile_env();
638 extended_dockerfile = format!("{extended_dockerfile}\n{container_env_layer}");
639 }
640
641 if let Some(env) = &self.dev_container().container_env {
642 for (key, value) in env {
643 extended_dockerfile = format!("{extended_dockerfile}ENV {key}={value}\n");
644 }
645 }
646 }
647
648 extended_dockerfile
649 }
650
651 fn build_merged_resources(
652 &self,
653 base_image: DockerInspect,
654 ) -> Result<DockerBuildResources, DevContainerError> {
655 let dev_container = match &self.config {
656 ConfigStatus::Deserialized(_) => {
657 log::error!(
658 "Dev container has not yet been parsed for variable expansion. Cannot yet merge resources"
659 );
660 return Err(DevContainerError::DevContainerParseFailed);
661 }
662 ConfigStatus::VariableParsed(dev_container) => dev_container,
663 };
664 let mut mounts = dev_container.mounts.clone().unwrap_or(Vec::new());
665
666 let mut feature_mounts = self.features.iter().flat_map(|f| f.mounts()).collect();
667
668 mounts.append(&mut feature_mounts);
669
670 let privileged = dev_container.privileged.unwrap_or(false)
671 || self.features.iter().any(|f| f.privileged());
672
673 let mut entrypoint_script_lines = vec![
674 "echo Container started".to_string(),
675 "trap \"exit 0\" 15".to_string(),
676 ];
677
678 for entrypoint in self.features.iter().filter_map(|f| f.entrypoint()) {
679 entrypoint_script_lines.push(entrypoint.clone());
680 }
681 entrypoint_script_lines.append(&mut vec![
682 "exec \"$@\"".to_string(),
683 "while sleep 1 & wait $!; do :; done".to_string(),
684 ]);
685
686 Ok(DockerBuildResources {
687 image: base_image,
688 additional_mounts: mounts,
689 privileged,
690 entrypoint_script: entrypoint_script_lines.join("\n").trim().to_string(),
691 })
692 }
693
694 async fn build_resources(&self) -> Result<DevContainerBuildResources, DevContainerError> {
695 if let ConfigStatus::Deserialized(_) = &self.config {
696 log::error!(
697 "Dev container has not yet been parsed for variable expansion. Cannot yet build resources"
698 );
699 return Err(DevContainerError::DevContainerParseFailed);
700 }
701 let dev_container = self.dev_container();
702 match dev_container.build_type() {
703 DevContainerBuildType::Image | DevContainerBuildType::Dockerfile => {
704 let built_docker_image = self.build_docker_image().await?;
705 let built_docker_image = self
706 .update_remote_user_uid(built_docker_image, None)
707 .await?;
708
709 let resources = self.build_merged_resources(built_docker_image)?;
710 Ok(DevContainerBuildResources::Docker(resources))
711 }
712 DevContainerBuildType::DockerCompose => {
713 log::debug!("Using docker compose. Building extended compose files");
714 let docker_compose_resources = self.build_and_extend_compose_files().await?;
715
716 return Ok(DevContainerBuildResources::DockerCompose(
717 docker_compose_resources,
718 ));
719 }
720 DevContainerBuildType::None => {
721 return Err(DevContainerError::DevContainerParseFailed);
722 }
723 }
724 }
725
726 async fn run_dev_container(
727 &self,
728 build_resources: DevContainerBuildResources,
729 ) -> Result<DevContainerUp, DevContainerError> {
730 let ConfigStatus::VariableParsed(_) = &self.config else {
731 log::error!(
732 "Variables have not been parsed; cannot proceed with running the dev container"
733 );
734 return Err(DevContainerError::DevContainerParseFailed);
735 };
736 let running_container = match build_resources {
737 DevContainerBuildResources::DockerCompose(resources) => {
738 self.run_docker_compose(resources).await?
739 }
740 DevContainerBuildResources::Docker(resources) => {
741 self.run_docker_image(resources).await?
742 }
743 };
744
745 let remote_user = get_remote_user_from_config(&running_container, self)?;
746 let remote_workspace_folder = get_remote_dir_from_config(
747 &running_container,
748 (&self.local_project_directory.display()).to_string(),
749 )?;
750
751 let remote_env = self.runtime_remote_env(&running_container.config.env_as_map()?)?;
752
753 Ok(DevContainerUp {
754 container_id: running_container.id,
755 remote_user,
756 remote_workspace_folder,
757 extension_ids: self.extension_ids(),
758 remote_env,
759 })
760 }
761
762 async fn docker_compose_manifest(&self) -> Result<DockerComposeResources, DevContainerError> {
763 let dev_container = match &self.config {
764 ConfigStatus::Deserialized(_) => {
765 log::error!(
766 "Dev container has not yet been parsed for variable expansion. Cannot yet get docker compose files"
767 );
768 return Err(DevContainerError::DevContainerParseFailed);
769 }
770 ConfigStatus::VariableParsed(dev_container) => dev_container,
771 };
772 let Some(docker_compose_files) = dev_container.docker_compose_file.clone() else {
773 return Err(DevContainerError::DevContainerParseFailed);
774 };
775 let docker_compose_full_paths = docker_compose_files
776 .iter()
777 .map(|relative| self.config_directory.join(relative))
778 .collect::<Vec<PathBuf>>();
779
780 let Some(config) = self
781 .docker_client
782 .get_docker_compose_config(&docker_compose_full_paths)
783 .await?
784 else {
785 log::error!("Output could not deserialize into DockerComposeConfig");
786 return Err(DevContainerError::DevContainerParseFailed);
787 };
788 Ok(DockerComposeResources {
789 files: docker_compose_full_paths,
790 config,
791 })
792 }
793
794 async fn build_and_extend_compose_files(
795 &self,
796 ) -> Result<DockerComposeResources, DevContainerError> {
797 let dev_container = match &self.config {
798 ConfigStatus::Deserialized(_) => {
799 log::error!(
800 "Dev container has not yet been parsed for variable expansion. Cannot yet build from compose files"
801 );
802 return Err(DevContainerError::DevContainerParseFailed);
803 }
804 ConfigStatus::VariableParsed(dev_container) => dev_container,
805 };
806
807 let Some(features_build_info) = &self.features_build_info else {
808 log::error!(
809 "Cannot build and extend compose files: features build info is not yet constructed"
810 );
811 return Err(DevContainerError::DevContainerParseFailed);
812 };
813 let mut docker_compose_resources = self.docker_compose_manifest().await?;
814 let supports_buildkit = self.docker_client.supports_compose_buildkit();
815
816 let (main_service_name, main_service) =
817 find_primary_service(&docker_compose_resources, self)?;
818 let built_service_image = if main_service
819 .build
820 .as_ref()
821 .map(|b| b.dockerfile.as_ref())
822 .is_some()
823 {
824 if !supports_buildkit {
825 self.build_feature_content_image().await?;
826 }
827
828 let dockerfile_path = &features_build_info.dockerfile_path;
829
830 let build_args = if !supports_buildkit {
831 HashMap::from([
832 (
833 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
834 "dev_container_auto_added_stage_label".to_string(),
835 ),
836 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
837 ])
838 } else {
839 HashMap::from([
840 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
841 (
842 "_DEV_CONTAINERS_BASE_IMAGE".to_string(),
843 "dev_container_auto_added_stage_label".to_string(),
844 ),
845 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
846 ])
847 };
848
849 let additional_contexts = if !supports_buildkit {
850 None
851 } else {
852 Some(HashMap::from([(
853 "dev_containers_feature_content_source".to_string(),
854 features_build_info
855 .features_content_dir
856 .display()
857 .to_string(),
858 )]))
859 };
860
861 let build_override = DockerComposeConfig {
862 name: None,
863 services: HashMap::from([(
864 main_service_name.clone(),
865 DockerComposeService {
866 image: Some(features_build_info.image_tag.clone()),
867 entrypoint: None,
868 cap_add: None,
869 security_opt: None,
870 labels: None,
871 build: Some(DockerComposeServiceBuild {
872 context: Some(
873 features_build_info.empty_context_dir.display().to_string(),
874 ),
875 dockerfile: Some(dockerfile_path.display().to_string()),
876 args: Some(build_args),
877 additional_contexts,
878 }),
879 volumes: Vec::new(),
880 ..Default::default()
881 },
882 )]),
883 volumes: HashMap::new(),
884 };
885
886 let temp_base = std::env::temp_dir().join("devcontainer-zed");
887 let config_location = temp_base.join("docker_compose_build.json");
888
889 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
890 log::error!("Error serializing docker compose runtime override: {e}");
891 DevContainerError::DevContainerParseFailed
892 })?;
893
894 self.fs
895 .write(&config_location, config_json.as_bytes())
896 .await
897 .map_err(|e| {
898 log::error!("Error writing the runtime override file: {e}");
899 DevContainerError::FilesystemError
900 })?;
901
902 docker_compose_resources.files.push(config_location);
903
904 self.docker_client
905 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
906 .await?;
907 self.docker_client
908 .inspect(&features_build_info.image_tag)
909 .await?
910 } else if let Some(image) = &main_service.image {
911 if dev_container
912 .features
913 .as_ref()
914 .is_none_or(|features| features.is_empty())
915 {
916 self.docker_client.inspect(image).await?
917 } else {
918 if !supports_buildkit {
919 self.build_feature_content_image().await?;
920 }
921
922 let dockerfile_path = &features_build_info.dockerfile_path;
923
924 let build_args = if !supports_buildkit {
925 HashMap::from([
926 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
927 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
928 ])
929 } else {
930 HashMap::from([
931 ("BUILDKIT_INLINE_CACHE".to_string(), "1".to_string()),
932 ("_DEV_CONTAINERS_BASE_IMAGE".to_string(), image.clone()),
933 ("_DEV_CONTAINERS_IMAGE_USER".to_string(), "root".to_string()),
934 ])
935 };
936
937 let additional_contexts = if !supports_buildkit {
938 None
939 } else {
940 Some(HashMap::from([(
941 "dev_containers_feature_content_source".to_string(),
942 features_build_info
943 .features_content_dir
944 .display()
945 .to_string(),
946 )]))
947 };
948
949 let build_override = DockerComposeConfig {
950 name: None,
951 services: HashMap::from([(
952 main_service_name.clone(),
953 DockerComposeService {
954 image: Some(features_build_info.image_tag.clone()),
955 entrypoint: None,
956 cap_add: None,
957 security_opt: None,
958 labels: None,
959 build: Some(DockerComposeServiceBuild {
960 context: Some(
961 features_build_info.empty_context_dir.display().to_string(),
962 ),
963 dockerfile: Some(dockerfile_path.display().to_string()),
964 args: Some(build_args),
965 additional_contexts,
966 }),
967 volumes: Vec::new(),
968 ..Default::default()
969 },
970 )]),
971 volumes: HashMap::new(),
972 };
973
974 let temp_base = std::env::temp_dir().join("devcontainer-zed");
975 let config_location = temp_base.join("docker_compose_build.json");
976
977 let config_json = serde_json_lenient::to_string(&build_override).map_err(|e| {
978 log::error!("Error serializing docker compose runtime override: {e}");
979 DevContainerError::DevContainerParseFailed
980 })?;
981
982 self.fs
983 .write(&config_location, config_json.as_bytes())
984 .await
985 .map_err(|e| {
986 log::error!("Error writing the runtime override file: {e}");
987 DevContainerError::FilesystemError
988 })?;
989
990 docker_compose_resources.files.push(config_location);
991
992 self.docker_client
993 .docker_compose_build(&docker_compose_resources.files, &self.project_name())
994 .await?;
995
996 self.docker_client
997 .inspect(&features_build_info.image_tag)
998 .await?
999 }
1000 } else {
1001 log::error!("Docker compose must have either image or dockerfile defined");
1002 return Err(DevContainerError::DevContainerParseFailed);
1003 };
1004
1005 let built_service_image = self
1006 .update_remote_user_uid(built_service_image, Some(&features_build_info.image_tag))
1007 .await?;
1008
1009 let resources = self.build_merged_resources(built_service_image)?;
1010
1011 let network_mode = main_service.network_mode.as_ref();
1012 let network_mode_service = network_mode.and_then(|mode| mode.strip_prefix("service:"));
1013 let runtime_override_file = self
1014 .write_runtime_override_file(&main_service_name, network_mode_service, resources)
1015 .await?;
1016
1017 docker_compose_resources.files.push(runtime_override_file);
1018
1019 Ok(docker_compose_resources)
1020 }
1021
1022 async fn write_runtime_override_file(
1023 &self,
1024 main_service_name: &str,
1025 network_mode_service: Option<&str>,
1026 resources: DockerBuildResources,
1027 ) -> Result<PathBuf, DevContainerError> {
1028 let config =
1029 self.build_runtime_override(main_service_name, network_mode_service, resources)?;
1030 let temp_base = std::env::temp_dir().join("devcontainer-zed");
1031 let config_location = temp_base.join("docker_compose_runtime.json");
1032
1033 let config_json = serde_json_lenient::to_string(&config).map_err(|e| {
1034 log::error!("Error serializing docker compose runtime override: {e}");
1035 DevContainerError::DevContainerParseFailed
1036 })?;
1037
1038 self.fs
1039 .write(&config_location, config_json.as_bytes())
1040 .await
1041 .map_err(|e| {
1042 log::error!("Error writing the runtime override file: {e}");
1043 DevContainerError::FilesystemError
1044 })?;
1045
1046 Ok(config_location)
1047 }
1048
1049 fn build_runtime_override(
1050 &self,
1051 main_service_name: &str,
1052 network_mode_service: Option<&str>,
1053 resources: DockerBuildResources,
1054 ) -> Result<DockerComposeConfig, DevContainerError> {
1055 let mut runtime_labels = vec![];
1056
1057 if let Some(metadata) = &resources.image.config.labels.metadata {
1058 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1059 log::error!("Error serializing docker image metadata: {e}");
1060 DevContainerError::ContainerNotValid(resources.image.id.clone())
1061 })?;
1062
1063 runtime_labels.push(format!(
1064 "{}={}",
1065 "devcontainer.metadata", serialized_metadata
1066 ));
1067 }
1068
1069 for (k, v) in self.identifying_labels() {
1070 runtime_labels.push(format!("{}={}", k, v));
1071 }
1072
1073 let config_volumes: HashMap<String, DockerComposeVolume> = resources
1074 .additional_mounts
1075 .iter()
1076 .filter_map(|mount| {
1077 if let Some(mount_type) = &mount.mount_type
1078 && mount_type.to_lowercase() == "volume"
1079 {
1080 Some((
1081 mount.source.clone(),
1082 DockerComposeVolume {
1083 name: mount.source.clone(),
1084 },
1085 ))
1086 } else {
1087 None
1088 }
1089 })
1090 .collect();
1091
1092 let volumes: Vec<MountDefinition> = resources
1093 .additional_mounts
1094 .iter()
1095 .map(|v| MountDefinition {
1096 source: v.source.clone(),
1097 target: v.target.clone(),
1098 mount_type: v.mount_type.clone(),
1099 })
1100 .collect();
1101
1102 let mut main_service = DockerComposeService {
1103 entrypoint: Some(vec![
1104 "/bin/sh".to_string(),
1105 "-c".to_string(),
1106 resources.entrypoint_script,
1107 "-".to_string(),
1108 ]),
1109 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
1110 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
1111 labels: Some(runtime_labels),
1112 volumes,
1113 privileged: Some(resources.privileged),
1114 ..Default::default()
1115 };
1116 // let mut extra_service_port_declarations: Vec<(String, DockerComposeService)> = Vec::new();
1117 let mut service_declarations: HashMap<String, DockerComposeService> = HashMap::new();
1118 if let Some(forward_ports) = &self.dev_container().forward_ports {
1119 let main_service_ports: Vec<String> = forward_ports
1120 .iter()
1121 .filter_map(|f| match f {
1122 ForwardPort::Number(port) => Some(port.to_string()),
1123 ForwardPort::String(port) => {
1124 let parts: Vec<&str> = port.split(":").collect();
1125 if parts.len() <= 1 {
1126 Some(port.to_string())
1127 } else if parts.len() == 2 {
1128 if parts[0] == main_service_name {
1129 Some(parts[1].to_string())
1130 } else {
1131 None
1132 }
1133 } else {
1134 None
1135 }
1136 }
1137 })
1138 .collect();
1139 for port in main_service_ports {
1140 // If the main service uses a different service's network bridge, append to that service's ports instead
1141 if let Some(network_service_name) = network_mode_service {
1142 if let Some(service) = service_declarations.get_mut(network_service_name) {
1143 service.ports.push(format!("{port}:{port}"));
1144 } else {
1145 service_declarations.insert(
1146 network_service_name.to_string(),
1147 DockerComposeService {
1148 ports: vec![format!("{port}:{port}")],
1149 ..Default::default()
1150 },
1151 );
1152 }
1153 } else {
1154 main_service.ports.push(format!("{port}:{port}"));
1155 }
1156 }
1157 let other_service_ports: Vec<(&str, &str)> = forward_ports
1158 .iter()
1159 .filter_map(|f| match f {
1160 ForwardPort::Number(_) => None,
1161 ForwardPort::String(port) => {
1162 let parts: Vec<&str> = port.split(":").collect();
1163 if parts.len() != 2 {
1164 None
1165 } else {
1166 if parts[0] == main_service_name {
1167 None
1168 } else {
1169 Some((parts[0], parts[1]))
1170 }
1171 }
1172 }
1173 })
1174 .collect();
1175 for (service_name, port) in other_service_ports {
1176 if let Some(service) = service_declarations.get_mut(service_name) {
1177 service.ports.push(format!("{port}:{port}"));
1178 } else {
1179 service_declarations.insert(
1180 service_name.to_string(),
1181 DockerComposeService {
1182 ports: vec![format!("{port}:{port}")],
1183 ..Default::default()
1184 },
1185 );
1186 }
1187 }
1188 }
1189 if let Some(port) = &self.dev_container().app_port {
1190 if let Some(network_service_name) = network_mode_service {
1191 if let Some(service) = service_declarations.get_mut(network_service_name) {
1192 service.ports.push(format!("{port}:{port}"));
1193 } else {
1194 service_declarations.insert(
1195 network_service_name.to_string(),
1196 DockerComposeService {
1197 ports: vec![format!("{port}:{port}")],
1198 ..Default::default()
1199 },
1200 );
1201 }
1202 } else {
1203 main_service.ports.push(format!("{port}:{port}"));
1204 }
1205 }
1206
1207 service_declarations.insert(main_service_name.to_string(), main_service);
1208 let new_docker_compose_config = DockerComposeConfig {
1209 name: None,
1210 services: service_declarations,
1211 volumes: config_volumes,
1212 };
1213
1214 Ok(new_docker_compose_config)
1215 }
1216
1217 async fn build_docker_image(&self) -> Result<DockerInspect, DevContainerError> {
1218 let dev_container = match &self.config {
1219 ConfigStatus::Deserialized(_) => {
1220 log::error!(
1221 "Dev container has not yet been parsed for variable expansion. Cannot yet build image"
1222 );
1223 return Err(DevContainerError::DevContainerParseFailed);
1224 }
1225 ConfigStatus::VariableParsed(dev_container) => dev_container,
1226 };
1227
1228 match dev_container.build_type() {
1229 DevContainerBuildType::Image => {
1230 let Some(image_tag) = &dev_container.image else {
1231 return Err(DevContainerError::DevContainerParseFailed);
1232 };
1233 let base_image = self.docker_client.inspect(image_tag).await?;
1234 if dev_container
1235 .features
1236 .as_ref()
1237 .is_none_or(|features| features.is_empty())
1238 {
1239 log::debug!("No features to add. Using base image");
1240 return Ok(base_image);
1241 }
1242 }
1243 DevContainerBuildType::Dockerfile => {}
1244 DevContainerBuildType::DockerCompose | DevContainerBuildType::None => {
1245 return Err(DevContainerError::DevContainerParseFailed);
1246 }
1247 };
1248
1249 let mut command = self.create_docker_build()?;
1250
1251 let output = self
1252 .command_runner
1253 .run_command(&mut command)
1254 .await
1255 .map_err(|e| {
1256 log::error!("Error building docker image: {e}");
1257 DevContainerError::CommandFailed(command.get_program().display().to_string())
1258 })?;
1259
1260 if !output.status.success() {
1261 let stderr = String::from_utf8_lossy(&output.stderr);
1262 log::error!("docker buildx build failed: {stderr}");
1263 return Err(DevContainerError::CommandFailed(
1264 command.get_program().display().to_string(),
1265 ));
1266 }
1267
1268 // After a successful build, inspect the newly tagged image to get its metadata
1269 let Some(features_build_info) = &self.features_build_info else {
1270 log::error!("Features build info expected, but not created");
1271 return Err(DevContainerError::DevContainerParseFailed);
1272 };
1273 let image = self
1274 .docker_client
1275 .inspect(&features_build_info.image_tag)
1276 .await?;
1277
1278 Ok(image)
1279 }
1280
1281 #[cfg(target_os = "windows")]
1282 async fn update_remote_user_uid(
1283 &self,
1284 image: DockerInspect,
1285 _override_tag: Option<&str>,
1286 ) -> Result<DockerInspect, DevContainerError> {
1287 Ok(image)
1288 }
1289 #[cfg(not(target_os = "windows"))]
1290 async fn update_remote_user_uid(
1291 &self,
1292 image: DockerInspect,
1293 override_tag: Option<&str>,
1294 ) -> Result<DockerInspect, DevContainerError> {
1295 let dev_container = self.dev_container();
1296
1297 let Some(features_build_info) = &self.features_build_info else {
1298 return Ok(image);
1299 };
1300
1301 // updateRemoteUserUID defaults to true per the devcontainers spec
1302 if dev_container.update_remote_user_uid == Some(false) {
1303 return Ok(image);
1304 }
1305
1306 let remote_user = get_remote_user_from_config(&image, self)?;
1307 if remote_user == "root" || remote_user.chars().all(|c| c.is_ascii_digit()) {
1308 return Ok(image);
1309 }
1310
1311 let image_user = image
1312 .config
1313 .image_user
1314 .as_deref()
1315 .unwrap_or("root")
1316 .to_string();
1317
1318 let host_uid = Command::new("id")
1319 .arg("-u")
1320 .output()
1321 .await
1322 .map_err(|e| {
1323 log::error!("Failed to get host UID: {e}");
1324 DevContainerError::CommandFailed("id -u".to_string())
1325 })
1326 .and_then(|output| {
1327 String::from_utf8_lossy(&output.stdout)
1328 .trim()
1329 .parse::<u32>()
1330 .map_err(|e| {
1331 log::error!("Failed to parse host UID: {e}");
1332 DevContainerError::CommandFailed("id -u".to_string())
1333 })
1334 })?;
1335
1336 let host_gid = Command::new("id")
1337 .arg("-g")
1338 .output()
1339 .await
1340 .map_err(|e| {
1341 log::error!("Failed to get host GID: {e}");
1342 DevContainerError::CommandFailed("id -g".to_string())
1343 })
1344 .and_then(|output| {
1345 String::from_utf8_lossy(&output.stdout)
1346 .trim()
1347 .parse::<u32>()
1348 .map_err(|e| {
1349 log::error!("Failed to parse host GID: {e}");
1350 DevContainerError::CommandFailed("id -g".to_string())
1351 })
1352 })?;
1353
1354 let dockerfile_content = self.generate_update_uid_dockerfile();
1355
1356 let dockerfile_path = features_build_info
1357 .features_content_dir
1358 .join("updateUID.Dockerfile");
1359 self.fs
1360 .write(&dockerfile_path, dockerfile_content.as_bytes())
1361 .await
1362 .map_err(|e| {
1363 log::error!("Failed to write updateUID Dockerfile: {e}");
1364 DevContainerError::FilesystemError
1365 })?;
1366
1367 let updated_image_tag = override_tag
1368 .map(|t| t.to_string())
1369 .unwrap_or_else(|| format!("{}-uid", features_build_info.image_tag));
1370
1371 let mut command = Command::new(self.docker_client.docker_cli());
1372 command.args(["build"]);
1373 command.args(["-f", &dockerfile_path.display().to_string()]);
1374 command.args(["-t", &updated_image_tag]);
1375 command.args([
1376 "--build-arg",
1377 &format!("BASE_IMAGE={}", features_build_info.image_tag),
1378 ]);
1379 command.args(["--build-arg", &format!("REMOTE_USER={}", remote_user)]);
1380 command.args(["--build-arg", &format!("NEW_UID={}", host_uid)]);
1381 command.args(["--build-arg", &format!("NEW_GID={}", host_gid)]);
1382 command.args(["--build-arg", &format!("IMAGE_USER={}", image_user)]);
1383 command.arg(features_build_info.empty_context_dir.display().to_string());
1384
1385 let output = self
1386 .command_runner
1387 .run_command(&mut command)
1388 .await
1389 .map_err(|e| {
1390 log::error!("Error building UID update image: {e}");
1391 DevContainerError::CommandFailed(command.get_program().display().to_string())
1392 })?;
1393
1394 if !output.status.success() {
1395 let stderr = String::from_utf8_lossy(&output.stderr);
1396 log::error!("UID update build failed: {stderr}");
1397 return Err(DevContainerError::CommandFailed(
1398 command.get_program().display().to_string(),
1399 ));
1400 }
1401
1402 self.docker_client.inspect(&updated_image_tag).await
1403 }
1404
1405 #[cfg(not(target_os = "windows"))]
1406 fn generate_update_uid_dockerfile(&self) -> String {
1407 let mut dockerfile = r#"ARG BASE_IMAGE
1408FROM $BASE_IMAGE
1409
1410USER root
1411
1412ARG REMOTE_USER
1413ARG NEW_UID
1414ARG NEW_GID
1415SHELL ["/bin/sh", "-c"]
1416RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
1417 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
1418 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
1419 if [ -z "$OLD_UID" ]; then \
1420 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
1421 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
1422 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
1423 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
1424 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
1425 else \
1426 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
1427 FREE_GID=65532; \
1428 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
1429 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
1430 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
1431 fi; \
1432 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
1433 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
1434 if [ "$OLD_GID" != "$NEW_GID" ]; then \
1435 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
1436 fi; \
1437 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
1438 fi;
1439
1440ARG IMAGE_USER
1441USER $IMAGE_USER
1442
1443# Ensure that /etc/profile does not clobber the existing path
1444RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
1445"#.to_string();
1446 for feature in &self.features {
1447 let container_env_layer = feature.generate_dockerfile_env();
1448 dockerfile = format!("{dockerfile}\n{container_env_layer}");
1449 }
1450
1451 if let Some(env) = &self.dev_container().container_env {
1452 for (key, value) in env {
1453 dockerfile = format!("{dockerfile}ENV {key}={value}\n");
1454 }
1455 }
1456 dockerfile
1457 }
1458
1459 async fn build_feature_content_image(&self) -> Result<(), DevContainerError> {
1460 let Some(features_build_info) = &self.features_build_info else {
1461 log::error!("Features build info not available for building feature content image");
1462 return Err(DevContainerError::DevContainerParseFailed);
1463 };
1464 let features_content_dir = &features_build_info.features_content_dir;
1465
1466 let dockerfile_content = "FROM scratch\nCOPY . /tmp/build-features/\n";
1467 let dockerfile_path = features_content_dir.join("Dockerfile.feature-content");
1468
1469 self.fs
1470 .write(&dockerfile_path, dockerfile_content.as_bytes())
1471 .await
1472 .map_err(|e| {
1473 log::error!("Failed to write feature content Dockerfile: {e}");
1474 DevContainerError::FilesystemError
1475 })?;
1476
1477 let mut command = Command::new(self.docker_client.docker_cli());
1478 command.args([
1479 "build",
1480 "-t",
1481 "dev_container_feature_content_temp",
1482 "-f",
1483 &dockerfile_path.display().to_string(),
1484 &features_content_dir.display().to_string(),
1485 ]);
1486
1487 let output = self
1488 .command_runner
1489 .run_command(&mut command)
1490 .await
1491 .map_err(|e| {
1492 log::error!("Error building feature content image: {e}");
1493 DevContainerError::CommandFailed(self.docker_client.docker_cli())
1494 })?;
1495
1496 if !output.status.success() {
1497 let stderr = String::from_utf8_lossy(&output.stderr);
1498 log::error!("Feature content image build failed: {stderr}");
1499 return Err(DevContainerError::CommandFailed(
1500 self.docker_client.docker_cli(),
1501 ));
1502 }
1503
1504 Ok(())
1505 }
1506
1507 fn create_docker_build(&self) -> Result<Command, DevContainerError> {
1508 let dev_container = match &self.config {
1509 ConfigStatus::Deserialized(_) => {
1510 log::error!(
1511 "Dev container has not yet been parsed for variable expansion. Cannot yet proceed with docker build"
1512 );
1513 return Err(DevContainerError::DevContainerParseFailed);
1514 }
1515 ConfigStatus::VariableParsed(dev_container) => dev_container,
1516 };
1517
1518 let Some(features_build_info) = &self.features_build_info else {
1519 log::error!(
1520 "Cannot create docker build command; features build info has not been constructed"
1521 );
1522 return Err(DevContainerError::DevContainerParseFailed);
1523 };
1524 let mut command = Command::new(self.docker_client.docker_cli());
1525
1526 command.args(["buildx", "build"]);
1527
1528 // --load is short for --output=docker, loading the built image into the local docker images
1529 command.arg("--load");
1530
1531 // BuildKit build context: provides the features content directory as a named context
1532 // that the Dockerfile.extended can COPY from via `--from=dev_containers_feature_content_source`
1533 command.args([
1534 "--build-context",
1535 &format!(
1536 "dev_containers_feature_content_source={}",
1537 features_build_info.features_content_dir.display()
1538 ),
1539 ]);
1540
1541 // Build args matching the CLI reference implementation's `getFeaturesBuildOptions`
1542 if let Some(build_image) = &features_build_info.build_image {
1543 command.args([
1544 "--build-arg",
1545 &format!("_DEV_CONTAINERS_BASE_IMAGE={}", build_image),
1546 ]);
1547 } else {
1548 command.args([
1549 "--build-arg",
1550 "_DEV_CONTAINERS_BASE_IMAGE=dev_container_auto_added_stage_label",
1551 ]);
1552 }
1553
1554 command.args([
1555 "--build-arg",
1556 &format!(
1557 "_DEV_CONTAINERS_IMAGE_USER={}",
1558 self.root_image
1559 .as_ref()
1560 .and_then(|docker_image| docker_image.config.image_user.as_ref())
1561 .unwrap_or(&"root".to_string())
1562 ),
1563 ]);
1564
1565 command.args([
1566 "--build-arg",
1567 "_DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp",
1568 ]);
1569
1570 if let Some(args) = dev_container.build.as_ref().and_then(|b| b.args.as_ref()) {
1571 for (key, value) in args {
1572 command.args(["--build-arg", &format!("{}={}", key, value)]);
1573 }
1574 }
1575
1576 command.args(["--target", "dev_containers_target_stage"]);
1577
1578 command.args([
1579 "-f",
1580 &features_build_info.dockerfile_path.display().to_string(),
1581 ]);
1582
1583 command.args(["-t", &features_build_info.image_tag]);
1584
1585 if dev_container.build_type() == DevContainerBuildType::Dockerfile {
1586 command.arg(self.config_directory.display().to_string());
1587 } else {
1588 // Use an empty folder as the build context to avoid pulling in unneeded files.
1589 // The actual feature content is supplied via the BuildKit build context above.
1590 command.arg(features_build_info.empty_context_dir.display().to_string());
1591 }
1592
1593 Ok(command)
1594 }
1595
1596 async fn run_docker_compose(
1597 &self,
1598 resources: DockerComposeResources,
1599 ) -> Result<DockerInspect, DevContainerError> {
1600 let mut command = Command::new(self.docker_client.docker_cli());
1601 command.args(&["compose", "--project-name", &self.project_name()]);
1602 for docker_compose_file in resources.files {
1603 command.args(&["-f", &docker_compose_file.display().to_string()]);
1604 }
1605 command.args(&["up", "-d"]);
1606
1607 let output = self
1608 .command_runner
1609 .run_command(&mut command)
1610 .await
1611 .map_err(|e| {
1612 log::error!("Error running docker compose up: {e}");
1613 DevContainerError::CommandFailed(command.get_program().display().to_string())
1614 })?;
1615
1616 if !output.status.success() {
1617 let stderr = String::from_utf8_lossy(&output.stderr);
1618 log::error!("Non-success status from docker compose up: {}", stderr);
1619 return Err(DevContainerError::CommandFailed(
1620 command.get_program().display().to_string(),
1621 ));
1622 }
1623
1624 if let Some(docker_ps) = self.check_for_existing_container().await? {
1625 log::debug!("Found newly created dev container");
1626 return self.docker_client.inspect(&docker_ps.id).await;
1627 }
1628
1629 log::error!("Could not find existing container after docker compose up");
1630
1631 Err(DevContainerError::DevContainerParseFailed)
1632 }
1633
1634 async fn run_docker_image(
1635 &self,
1636 build_resources: DockerBuildResources,
1637 ) -> Result<DockerInspect, DevContainerError> {
1638 let mut docker_run_command = self.create_docker_run_command(build_resources)?;
1639
1640 let output = self
1641 .command_runner
1642 .run_command(&mut docker_run_command)
1643 .await
1644 .map_err(|e| {
1645 log::error!("Error running docker run: {e}");
1646 DevContainerError::CommandFailed(
1647 docker_run_command.get_program().display().to_string(),
1648 )
1649 })?;
1650
1651 if !output.status.success() {
1652 let std_err = String::from_utf8_lossy(&output.stderr);
1653 log::error!("Non-success status from docker run. StdErr: {std_err}");
1654 return Err(DevContainerError::CommandFailed(
1655 docker_run_command.get_program().display().to_string(),
1656 ));
1657 }
1658
1659 log::debug!("Checking for container that was started");
1660 let Some(docker_ps) = self.check_for_existing_container().await? else {
1661 log::error!("Could not locate container just created");
1662 return Err(DevContainerError::DevContainerParseFailed);
1663 };
1664 self.docker_client.inspect(&docker_ps.id).await
1665 }
1666
1667 fn local_workspace_folder(&self) -> String {
1668 self.local_project_directory.display().to_string()
1669 }
1670 fn local_workspace_base_name(&self) -> Result<String, DevContainerError> {
1671 self.local_project_directory
1672 .file_name()
1673 .map(|f| f.display().to_string())
1674 .ok_or(DevContainerError::DevContainerParseFailed)
1675 }
1676
1677 fn remote_workspace_folder(&self) -> Result<PathBuf, DevContainerError> {
1678 self.dev_container()
1679 .workspace_folder
1680 .as_ref()
1681 .map(|folder| PathBuf::from(folder))
1682 .or(Some(
1683 PathBuf::from(DEFAULT_REMOTE_PROJECT_DIR).join(self.local_workspace_base_name()?),
1684 ))
1685 .ok_or(DevContainerError::DevContainerParseFailed)
1686 }
1687 fn remote_workspace_base_name(&self) -> Result<String, DevContainerError> {
1688 self.remote_workspace_folder().and_then(|f| {
1689 f.file_name()
1690 .map(|file_name| file_name.display().to_string())
1691 .ok_or(DevContainerError::DevContainerParseFailed)
1692 })
1693 }
1694
1695 fn remote_workspace_mount(&self) -> Result<MountDefinition, DevContainerError> {
1696 if let Some(mount) = &self.dev_container().workspace_mount {
1697 return Ok(mount.clone());
1698 }
1699 let Some(project_directory_name) = self.local_project_directory.file_name() else {
1700 return Err(DevContainerError::DevContainerParseFailed);
1701 };
1702
1703 Ok(MountDefinition {
1704 source: self.local_workspace_folder(),
1705 target: format!("/workspaces/{}", project_directory_name.display()),
1706 mount_type: None,
1707 })
1708 }
1709
1710 fn create_docker_run_command(
1711 &self,
1712 build_resources: DockerBuildResources,
1713 ) -> Result<Command, DevContainerError> {
1714 let remote_workspace_mount = self.remote_workspace_mount()?;
1715
1716 let docker_cli = self.docker_client.docker_cli();
1717 let mut command = Command::new(&docker_cli);
1718
1719 command.arg("run");
1720
1721 if build_resources.privileged {
1722 command.arg("--privileged");
1723 }
1724
1725 if &docker_cli == "podman" {
1726 command.args(&["--security-opt", "label=disable", "--userns=keep-id"]);
1727 }
1728
1729 command.arg("--sig-proxy=false");
1730 command.arg("-d");
1731 command.arg("--mount");
1732 command.arg(remote_workspace_mount.to_string());
1733
1734 for mount in &build_resources.additional_mounts {
1735 command.arg("--mount");
1736 command.arg(mount.to_string());
1737 }
1738
1739 for (key, val) in self.identifying_labels() {
1740 command.arg("-l");
1741 command.arg(format!("{}={}", key, val));
1742 }
1743
1744 if let Some(metadata) = &build_resources.image.config.labels.metadata {
1745 let serialized_metadata = serde_json_lenient::to_string(metadata).map_err(|e| {
1746 log::error!("Problem serializing image metadata: {e}");
1747 DevContainerError::ContainerNotValid(build_resources.image.id.clone())
1748 })?;
1749 command.arg("-l");
1750 command.arg(format!(
1751 "{}={}",
1752 "devcontainer.metadata", serialized_metadata
1753 ));
1754 }
1755
1756 if let Some(forward_ports) = &self.dev_container().forward_ports {
1757 for port in forward_ports {
1758 if let ForwardPort::Number(port_number) = port {
1759 command.arg("-p");
1760 command.arg(format!("{port_number}:{port_number}"));
1761 }
1762 }
1763 }
1764 if let Some(app_port) = &self.dev_container().app_port {
1765 command.arg("-p");
1766 command.arg(format!("{app_port}:{app_port}"));
1767 }
1768
1769 command.arg("--entrypoint");
1770 command.arg("/bin/sh");
1771 command.arg(&build_resources.image.id);
1772 command.arg("-c");
1773
1774 command.arg(build_resources.entrypoint_script);
1775 command.arg("-");
1776
1777 Ok(command)
1778 }
1779
1780 fn extension_ids(&self) -> Vec<String> {
1781 self.dev_container()
1782 .customizations
1783 .as_ref()
1784 .map(|c| c.zed.extensions.clone())
1785 .unwrap_or_default()
1786 }
1787
1788 async fn build_and_run(&mut self) -> Result<DevContainerUp, DevContainerError> {
1789 self.run_initialize_commands().await?;
1790
1791 self.download_feature_and_dockerfile_resources().await?;
1792
1793 let build_resources = self.build_resources().await?;
1794
1795 let devcontainer_up = self.run_dev_container(build_resources).await?;
1796
1797 self.run_remote_scripts(&devcontainer_up, true).await?;
1798
1799 Ok(devcontainer_up)
1800 }
1801
1802 async fn run_remote_scripts(
1803 &self,
1804 devcontainer_up: &DevContainerUp,
1805 new_container: bool,
1806 ) -> Result<(), DevContainerError> {
1807 let ConfigStatus::VariableParsed(config) = &self.config else {
1808 log::error!("Config not yet parsed, cannot proceed with remote scripts");
1809 return Err(DevContainerError::DevContainerScriptsFailed);
1810 };
1811 let remote_folder = self.remote_workspace_folder()?.display().to_string();
1812
1813 if new_container {
1814 if let Some(on_create_command) = &config.on_create_command {
1815 for (command_name, command) in on_create_command.script_commands() {
1816 log::debug!("Running on create command {command_name}");
1817 self.docker_client
1818 .run_docker_exec(
1819 &devcontainer_up.container_id,
1820 &remote_folder,
1821 "root",
1822 &devcontainer_up.remote_env,
1823 command,
1824 )
1825 .await?;
1826 }
1827 }
1828 if let Some(update_content_command) = &config.update_content_command {
1829 for (command_name, command) in update_content_command.script_commands() {
1830 log::debug!("Running update content command {command_name}");
1831 self.docker_client
1832 .run_docker_exec(
1833 &devcontainer_up.container_id,
1834 &remote_folder,
1835 "root",
1836 &devcontainer_up.remote_env,
1837 command,
1838 )
1839 .await?;
1840 }
1841 }
1842
1843 if let Some(post_create_command) = &config.post_create_command {
1844 for (command_name, command) in post_create_command.script_commands() {
1845 log::debug!("Running post create command {command_name}");
1846 self.docker_client
1847 .run_docker_exec(
1848 &devcontainer_up.container_id,
1849 &remote_folder,
1850 &devcontainer_up.remote_user,
1851 &devcontainer_up.remote_env,
1852 command,
1853 )
1854 .await?;
1855 }
1856 }
1857 if let Some(post_start_command) = &config.post_start_command {
1858 for (command_name, command) in post_start_command.script_commands() {
1859 log::debug!("Running post start command {command_name}");
1860 self.docker_client
1861 .run_docker_exec(
1862 &devcontainer_up.container_id,
1863 &remote_folder,
1864 &devcontainer_up.remote_user,
1865 &devcontainer_up.remote_env,
1866 command,
1867 )
1868 .await?;
1869 }
1870 }
1871 }
1872 if let Some(post_attach_command) = &config.post_attach_command {
1873 for (command_name, command) in post_attach_command.script_commands() {
1874 log::debug!("Running post attach command {command_name}");
1875 self.docker_client
1876 .run_docker_exec(
1877 &devcontainer_up.container_id,
1878 &remote_folder,
1879 &devcontainer_up.remote_user,
1880 &devcontainer_up.remote_env,
1881 command,
1882 )
1883 .await?;
1884 }
1885 }
1886
1887 Ok(())
1888 }
1889
1890 async fn run_initialize_commands(&self) -> Result<(), DevContainerError> {
1891 let ConfigStatus::VariableParsed(config) = &self.config else {
1892 log::error!("Config not yet parsed, cannot proceed with initializeCommand");
1893 return Err(DevContainerError::DevContainerParseFailed);
1894 };
1895
1896 if let Some(initialize_command) = &config.initialize_command {
1897 log::debug!("Running initialize command");
1898 initialize_command
1899 .run(&self.command_runner, &self.local_project_directory)
1900 .await
1901 } else {
1902 log::warn!("No initialize command found");
1903 Ok(())
1904 }
1905 }
1906
1907 async fn check_for_existing_devcontainer(
1908 &self,
1909 ) -> Result<Option<DevContainerUp>, DevContainerError> {
1910 if let Some(docker_ps) = self.check_for_existing_container().await? {
1911 log::debug!("Dev container already found. Proceeding with it");
1912
1913 let docker_inspect = self.docker_client.inspect(&docker_ps.id).await?;
1914
1915 if !docker_inspect.is_running() {
1916 log::debug!("Container not running. Will attempt to start, and then proceed");
1917 self.docker_client.start_container(&docker_ps.id).await?;
1918 }
1919
1920 let remote_user = get_remote_user_from_config(&docker_inspect, self)?;
1921
1922 let remote_folder = get_remote_dir_from_config(
1923 &docker_inspect,
1924 (&self.local_project_directory.display()).to_string(),
1925 )?;
1926
1927 let remote_env = self.runtime_remote_env(&docker_inspect.config.env_as_map()?)?;
1928
1929 let dev_container_up = DevContainerUp {
1930 container_id: docker_ps.id,
1931 remote_user: remote_user,
1932 remote_workspace_folder: remote_folder,
1933 extension_ids: self.extension_ids(),
1934 remote_env,
1935 };
1936
1937 self.run_remote_scripts(&dev_container_up, false).await?;
1938
1939 Ok(Some(dev_container_up))
1940 } else {
1941 log::debug!("Existing container not found.");
1942
1943 Ok(None)
1944 }
1945 }
1946
1947 async fn check_for_existing_container(&self) -> Result<Option<DockerPs>, DevContainerError> {
1948 self.docker_client
1949 .find_process_by_filters(
1950 self.identifying_labels()
1951 .iter()
1952 .map(|(k, v)| format!("label={k}={v}"))
1953 .collect(),
1954 )
1955 .await
1956 }
1957
1958 fn project_name(&self) -> String {
1959 if let Some(name) = &self.dev_container().name {
1960 safe_id_lower(name)
1961 } else {
1962 let alternate_name = &self
1963 .local_workspace_base_name()
1964 .unwrap_or(self.local_workspace_folder());
1965 safe_id_lower(alternate_name)
1966 }
1967 }
1968}
1969
1970/// Holds all the information needed to construct a `docker buildx build` command
1971/// that extends a base image with dev container features.
1972///
1973/// This mirrors the `ImageBuildOptions` interface in the CLI reference implementation
1974/// (cli/src/spec-node/containerFeatures.ts).
1975#[derive(Debug, Eq, PartialEq)]
1976pub(crate) struct FeaturesBuildInfo {
1977 /// Path to the generated Dockerfile.extended
1978 pub dockerfile_path: PathBuf,
1979 /// Path to the features content directory (used as a BuildKit build context)
1980 pub features_content_dir: PathBuf,
1981 /// Path to an empty directory used as the Docker build context
1982 pub empty_context_dir: PathBuf,
1983 /// The base image name (e.g. "mcr.microsoft.com/devcontainers/rust:2-1-bookworm")
1984 pub build_image: Option<String>,
1985 /// The tag to apply to the built image (e.g. "vsc-myproject-features")
1986 pub image_tag: String,
1987}
1988
1989pub(crate) async fn read_devcontainer_configuration(
1990 config: DevContainerConfig,
1991 context: &DevContainerContext,
1992 environment: HashMap<String, String>,
1993) -> Result<DevContainer, DevContainerError> {
1994 let docker = if context.use_podman {
1995 Docker::new("podman")
1996 } else {
1997 Docker::new("docker")
1998 };
1999 let mut dev_container = DevContainerManifest::new(
2000 context,
2001 environment,
2002 Arc::new(docker),
2003 Arc::new(DefaultCommandRunner::new()),
2004 config,
2005 &context.project_directory.as_ref(),
2006 )
2007 .await?;
2008 dev_container.parse_nonremote_vars()?;
2009 Ok(dev_container.dev_container().clone())
2010}
2011
2012pub(crate) async fn spawn_dev_container(
2013 context: &DevContainerContext,
2014 environment: HashMap<String, String>,
2015 config: DevContainerConfig,
2016 local_project_path: &Path,
2017) -> Result<DevContainerUp, DevContainerError> {
2018 let docker = if context.use_podman {
2019 Docker::new("podman")
2020 } else {
2021 Docker::new("docker")
2022 };
2023 let mut devcontainer_manifest = DevContainerManifest::new(
2024 context,
2025 environment,
2026 Arc::new(docker),
2027 Arc::new(DefaultCommandRunner::new()),
2028 config,
2029 local_project_path,
2030 )
2031 .await?;
2032
2033 devcontainer_manifest.parse_nonremote_vars()?;
2034
2035 log::debug!("Checking for existing container");
2036 if let Some(devcontainer) = devcontainer_manifest
2037 .check_for_existing_devcontainer()
2038 .await?
2039 {
2040 Ok(devcontainer)
2041 } else {
2042 log::debug!("Existing container not found. Building");
2043
2044 devcontainer_manifest.build_and_run().await
2045 }
2046}
2047
2048#[derive(Debug)]
2049struct DockerBuildResources {
2050 image: DockerInspect,
2051 additional_mounts: Vec<MountDefinition>,
2052 privileged: bool,
2053 entrypoint_script: String,
2054}
2055
2056#[derive(Debug)]
2057enum DevContainerBuildResources {
2058 DockerCompose(DockerComposeResources),
2059 Docker(DockerBuildResources),
2060}
2061
2062fn find_primary_service(
2063 docker_compose: &DockerComposeResources,
2064 devcontainer: &DevContainerManifest,
2065) -> Result<(String, DockerComposeService), DevContainerError> {
2066 let Some(service_name) = &devcontainer.dev_container().service else {
2067 return Err(DevContainerError::DevContainerParseFailed);
2068 };
2069
2070 match docker_compose.config.services.get(service_name) {
2071 Some(service) => Ok((service_name.clone(), service.clone())),
2072 None => Err(DevContainerError::DevContainerParseFailed),
2073 }
2074}
2075
2076/// Destination folder inside the container where feature content is staged during build.
2077/// Mirrors the CLI's `FEATURES_CONTAINER_TEMP_DEST_FOLDER`.
2078const FEATURES_CONTAINER_TEMP_DEST_FOLDER: &str = "/tmp/dev-container-features";
2079
2080/// Escapes regex special characters in a string.
2081fn escape_regex_chars(input: &str) -> String {
2082 let mut result = String::with_capacity(input.len() * 2);
2083 for c in input.chars() {
2084 if ".*+?^${}()|[]\\".contains(c) {
2085 result.push('\\');
2086 }
2087 result.push(c);
2088 }
2089 result
2090}
2091
2092/// Extracts the short feature ID from a full feature reference string.
2093///
2094/// Examples:
2095/// - `ghcr.io/devcontainers/features/aws-cli:1` → `aws-cli`
2096/// - `ghcr.io/user/repo/go` → `go`
2097/// - `ghcr.io/devcontainers/features/rust@sha256:abc` → `rust`
2098/// - `./myFeature` → `myFeature`
2099fn extract_feature_id(feature_ref: &str) -> &str {
2100 let without_version = if let Some(at_idx) = feature_ref.rfind('@') {
2101 &feature_ref[..at_idx]
2102 } else {
2103 let last_slash = feature_ref.rfind('/');
2104 let last_colon = feature_ref.rfind(':');
2105 match (last_slash, last_colon) {
2106 (Some(slash), Some(colon)) if colon > slash => &feature_ref[..colon],
2107 _ => feature_ref,
2108 }
2109 };
2110 match without_version.rfind('/') {
2111 Some(idx) => &without_version[idx + 1..],
2112 None => without_version,
2113 }
2114}
2115
2116/// Generates a shell command that looks up a user's passwd entry.
2117///
2118/// Mirrors the CLI's `getEntPasswdShellCommand` in `commonUtils.ts`.
2119/// Tries `getent passwd` first, then falls back to grepping `/etc/passwd`.
2120fn get_ent_passwd_shell_command(user: &str) -> String {
2121 let escaped_for_shell = user.replace('\\', "\\\\").replace('\'', "\\'");
2122 let escaped_for_regex = escape_regex_chars(user).replace('\'', "\\'");
2123 format!(
2124 " (command -v getent >/dev/null 2>&1 && getent passwd '{shell}' || grep -E '^{re}|^[^:]*:[^:]*:{re}:' /etc/passwd || true)",
2125 shell = escaped_for_shell,
2126 re = escaped_for_regex,
2127 )
2128}
2129
2130/// Determines feature installation order, respecting `overrideFeatureInstallOrder`.
2131///
2132/// Features listed in the override come first (in the specified order), followed
2133/// by any remaining features sorted lexicographically by their full reference ID.
2134fn resolve_feature_order<'a>(
2135 features: &'a HashMap<String, FeatureOptions>,
2136 override_order: &Option<Vec<String>>,
2137) -> Vec<(&'a String, &'a FeatureOptions)> {
2138 if let Some(order) = override_order {
2139 let mut ordered: Vec<(&'a String, &'a FeatureOptions)> = Vec::new();
2140 for ordered_id in order {
2141 if let Some((key, options)) = features.get_key_value(ordered_id) {
2142 ordered.push((key, options));
2143 }
2144 }
2145 let mut remaining: Vec<_> = features
2146 .iter()
2147 .filter(|(id, _)| !order.iter().any(|o| o == *id))
2148 .collect();
2149 remaining.sort_by_key(|(id, _)| id.as_str());
2150 ordered.extend(remaining);
2151 ordered
2152 } else {
2153 let mut entries: Vec<_> = features.iter().collect();
2154 entries.sort_by_key(|(id, _)| id.as_str());
2155 entries
2156 }
2157}
2158
2159/// Generates the `devcontainer-features-install.sh` wrapper script for one feature.
2160///
2161/// Mirrors the CLI's `getFeatureInstallWrapperScript` in
2162/// `containerFeaturesConfiguration.ts`.
2163fn generate_install_wrapper(
2164 feature_ref: &str,
2165 feature_id: &str,
2166 env_variables: &str,
2167) -> Result<String, DevContainerError> {
2168 let escaped_id = shlex::try_quote(feature_ref).map_err(|e| {
2169 log::error!("Error escaping feature ref {feature_ref}: {e}");
2170 DevContainerError::DevContainerParseFailed
2171 })?;
2172 let escaped_name = shlex::try_quote(feature_id).map_err(|e| {
2173 log::error!("Error escaping feature {feature_id}: {e}");
2174 DevContainerError::DevContainerParseFailed
2175 })?;
2176 let options_indented: String = env_variables
2177 .lines()
2178 .filter(|l| !l.is_empty())
2179 .map(|l| format!(" {}", l))
2180 .collect::<Vec<_>>()
2181 .join("\n");
2182 let escaped_options = shlex::try_quote(&options_indented).map_err(|e| {
2183 log::error!("Error escaping options {options_indented}: {e}");
2184 DevContainerError::DevContainerParseFailed
2185 })?;
2186
2187 let script = format!(
2188 r#"#!/bin/sh
2189set -e
2190
2191on_exit () {{
2192 [ $? -eq 0 ] && exit
2193 echo 'ERROR: Feature "{escaped_name}" ({escaped_id}) failed to install!'
2194}}
2195
2196trap on_exit EXIT
2197
2198echo ===========================================================================
2199echo 'Feature : {escaped_name}'
2200echo 'Id : {escaped_id}'
2201echo 'Options :'
2202echo {escaped_options}
2203echo ===========================================================================
2204
2205set -a
2206. ../devcontainer-features.builtin.env
2207. ./devcontainer-features.env
2208set +a
2209
2210chmod +x ./install.sh
2211./install.sh
2212"#
2213 );
2214
2215 Ok(script)
2216}
2217
2218// Dockerfile actions need to be moved to their own file
2219fn dockerfile_alias(dockerfile_content: &str) -> Option<String> {
2220 dockerfile_content
2221 .lines()
2222 .find(|line| line.starts_with("FROM"))
2223 .and_then(|line| {
2224 let words: Vec<&str> = line.split(" ").collect();
2225 if words.len() > 2 && words[words.len() - 2].to_lowercase() == "as" {
2226 return Some(words[words.len() - 1].to_string());
2227 } else {
2228 return None;
2229 }
2230 })
2231}
2232
2233fn dockerfile_inject_alias(dockerfile_content: &str, alias: &str) -> String {
2234 if dockerfile_alias(dockerfile_content).is_some() {
2235 dockerfile_content.to_string()
2236 } else {
2237 dockerfile_content
2238 .lines()
2239 .map(|line| {
2240 if line.starts_with("FROM") {
2241 format!("{} AS {}", line, alias)
2242 } else {
2243 line.to_string()
2244 }
2245 })
2246 .collect::<Vec<String>>()
2247 .join("\n")
2248 }
2249}
2250
2251fn image_from_dockerfile(
2252 devcontainer: &DevContainerManifest,
2253 dockerfile_contents: String,
2254) -> Result<String, DevContainerError> {
2255 let mut raw_contents = dockerfile_contents
2256 .lines()
2257 .find(|line| line.starts_with("FROM"))
2258 .and_then(|from_line| {
2259 from_line
2260 .split(' ')
2261 .collect::<Vec<&str>>()
2262 .get(1)
2263 .map(|s| s.to_string())
2264 })
2265 .ok_or_else(|| {
2266 log::error!("Could not find an image definition in dockerfile");
2267 DevContainerError::DevContainerParseFailed
2268 })?;
2269
2270 for (k, v) in devcontainer
2271 .dev_container()
2272 .build
2273 .as_ref()
2274 .and_then(|b| b.args.as_ref())
2275 .unwrap_or(&HashMap::new())
2276 {
2277 raw_contents = raw_contents.replace(&format!("${{{}}}", k), v);
2278 }
2279 Ok(raw_contents)
2280}
2281
2282// Container user things
2283// This should come from spec - see the docs
2284fn get_remote_user_from_config(
2285 docker_config: &DockerInspect,
2286 devcontainer: &DevContainerManifest,
2287) -> Result<String, DevContainerError> {
2288 if let DevContainer {
2289 remote_user: Some(user),
2290 ..
2291 } = &devcontainer.dev_container()
2292 {
2293 return Ok(user.clone());
2294 }
2295 let Some(metadata) = &docker_config.config.labels.metadata else {
2296 log::error!("Could not locate metadata");
2297 return Err(DevContainerError::ContainerNotValid(
2298 docker_config.id.clone(),
2299 ));
2300 };
2301 for metadatum in metadata {
2302 if let Some(remote_user) = metadatum.get("remoteUser") {
2303 if let Some(remote_user_str) = remote_user.as_str() {
2304 return Ok(remote_user_str.to_string());
2305 }
2306 }
2307 }
2308 log::error!("Could not locate the remote user");
2309 Err(DevContainerError::ContainerNotValid(
2310 docker_config.id.clone(),
2311 ))
2312}
2313
2314// This should come from spec - see the docs
2315fn get_container_user_from_config(
2316 docker_config: &DockerInspect,
2317 devcontainer: &DevContainerManifest,
2318) -> Result<String, DevContainerError> {
2319 if let Some(user) = &devcontainer.dev_container().container_user {
2320 return Ok(user.to_string());
2321 }
2322 if let Some(metadata) = &docker_config.config.labels.metadata {
2323 for metadatum in metadata {
2324 if let Some(container_user) = metadatum.get("containerUser") {
2325 if let Some(container_user_str) = container_user.as_str() {
2326 return Ok(container_user_str.to_string());
2327 }
2328 }
2329 }
2330 }
2331 if let Some(image_user) = &docker_config.config.image_user {
2332 return Ok(image_user.to_string());
2333 }
2334
2335 Err(DevContainerError::DevContainerParseFailed)
2336}
2337
2338#[cfg(test)]
2339mod test {
2340 use std::{
2341 collections::HashMap,
2342 ffi::OsStr,
2343 path::PathBuf,
2344 process::{ExitStatus, Output},
2345 sync::{Arc, Mutex},
2346 };
2347
2348 use async_trait::async_trait;
2349 use fs::{FakeFs, Fs};
2350 use gpui::{AppContext, TestAppContext};
2351 use http_client::{AsyncBody, FakeHttpClient, HttpClient};
2352 use project::{
2353 ProjectEnvironment,
2354 worktree_store::{WorktreeIdCounter, WorktreeStore},
2355 };
2356 use serde_json_lenient::Value;
2357 use util::{command::Command, paths::SanitizedPath};
2358
2359 use crate::{
2360 DevContainerConfig, DevContainerContext,
2361 command_json::CommandRunner,
2362 devcontainer_api::DevContainerError,
2363 devcontainer_json::MountDefinition,
2364 devcontainer_manifest::{
2365 ConfigStatus, DevContainerManifest, DockerBuildResources, DockerComposeResources,
2366 DockerInspect, extract_feature_id, find_primary_service, get_remote_user_from_config,
2367 },
2368 docker::{
2369 DockerClient, DockerComposeConfig, DockerComposeService, DockerComposeServiceBuild,
2370 DockerComposeVolume, DockerConfigLabels, DockerInspectConfig, DockerInspectMount,
2371 DockerPs,
2372 },
2373 oci::TokenResponse,
2374 };
2375 const TEST_PROJECT_PATH: &str = "/path/to/local/project";
2376
2377 async fn build_tarball(content: Vec<(&str, &str)>) -> Vec<u8> {
2378 let buffer = futures::io::Cursor::new(Vec::new());
2379 let mut builder = async_tar::Builder::new(buffer);
2380 for (file_name, content) in content {
2381 if content.is_empty() {
2382 let mut header = async_tar::Header::new_gnu();
2383 header.set_size(0);
2384 header.set_mode(0o755);
2385 header.set_entry_type(async_tar::EntryType::Directory);
2386 header.set_cksum();
2387 builder
2388 .append_data(&mut header, file_name, &[] as &[u8])
2389 .await
2390 .unwrap();
2391 } else {
2392 let data = content.as_bytes();
2393 let mut header = async_tar::Header::new_gnu();
2394 header.set_size(data.len() as u64);
2395 header.set_mode(0o755);
2396 header.set_entry_type(async_tar::EntryType::Regular);
2397 header.set_cksum();
2398 builder
2399 .append_data(&mut header, file_name, data)
2400 .await
2401 .unwrap();
2402 }
2403 }
2404 let buffer = builder.into_inner().await.unwrap();
2405 buffer.into_inner()
2406 }
2407
2408 fn test_project_filename() -> String {
2409 PathBuf::from(TEST_PROJECT_PATH)
2410 .file_name()
2411 .expect("is valid")
2412 .display()
2413 .to_string()
2414 }
2415
2416 async fn init_devcontainer_config(
2417 fs: &Arc<FakeFs>,
2418 devcontainer_contents: &str,
2419 ) -> DevContainerConfig {
2420 fs.insert_tree(
2421 format!("{TEST_PROJECT_PATH}/.devcontainer"),
2422 serde_json::json!({"devcontainer.json": devcontainer_contents}),
2423 )
2424 .await;
2425
2426 DevContainerConfig::default_config()
2427 }
2428
2429 struct TestDependencies {
2430 fs: Arc<FakeFs>,
2431 _http_client: Arc<dyn HttpClient>,
2432 docker: Arc<FakeDocker>,
2433 command_runner: Arc<TestCommandRunner>,
2434 }
2435
2436 async fn init_default_devcontainer_manifest(
2437 cx: &mut TestAppContext,
2438 devcontainer_contents: &str,
2439 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2440 let fs = FakeFs::new(cx.executor());
2441 let http_client = fake_http_client();
2442 let command_runner = Arc::new(TestCommandRunner::new());
2443 let docker = Arc::new(FakeDocker::new());
2444 let environment = HashMap::new();
2445
2446 init_devcontainer_manifest(
2447 cx,
2448 fs,
2449 http_client,
2450 docker,
2451 command_runner,
2452 environment,
2453 devcontainer_contents,
2454 )
2455 .await
2456 }
2457
2458 async fn init_devcontainer_manifest(
2459 cx: &mut TestAppContext,
2460 fs: Arc<FakeFs>,
2461 http_client: Arc<dyn HttpClient>,
2462 docker_client: Arc<FakeDocker>,
2463 command_runner: Arc<TestCommandRunner>,
2464 environment: HashMap<String, String>,
2465 devcontainer_contents: &str,
2466 ) -> Result<(TestDependencies, DevContainerManifest), DevContainerError> {
2467 let local_config = init_devcontainer_config(&fs, devcontainer_contents).await;
2468 let project_path = SanitizedPath::new_arc(&PathBuf::from(TEST_PROJECT_PATH));
2469 let worktree_store =
2470 cx.new(|_cx| WorktreeStore::local(false, fs.clone(), WorktreeIdCounter::default()));
2471 let project_environment =
2472 cx.new(|cx| ProjectEnvironment::new(None, worktree_store.downgrade(), None, false, cx));
2473
2474 let context = DevContainerContext {
2475 project_directory: SanitizedPath::cast_arc(project_path),
2476 use_podman: false,
2477 fs: fs.clone(),
2478 http_client: http_client.clone(),
2479 environment: project_environment.downgrade(),
2480 };
2481
2482 let test_dependencies = TestDependencies {
2483 fs: fs.clone(),
2484 _http_client: http_client.clone(),
2485 docker: docker_client.clone(),
2486 command_runner: command_runner.clone(),
2487 };
2488 let manifest = DevContainerManifest::new(
2489 &context,
2490 environment,
2491 docker_client,
2492 command_runner,
2493 local_config,
2494 &PathBuf::from(TEST_PROJECT_PATH),
2495 )
2496 .await?;
2497
2498 Ok((test_dependencies, manifest))
2499 }
2500
2501 #[gpui::test]
2502 async fn should_get_remote_user_from_devcontainer_if_available(cx: &mut TestAppContext) {
2503 let (_, devcontainer_manifest) = init_default_devcontainer_manifest(
2504 cx,
2505 r#"
2506// These are some external comments. serde_lenient should handle them
2507{
2508 // These are some internal comments
2509 "image": "image",
2510 "remoteUser": "root",
2511}
2512 "#,
2513 )
2514 .await
2515 .unwrap();
2516
2517 let mut metadata = HashMap::new();
2518 metadata.insert(
2519 "remoteUser".to_string(),
2520 serde_json_lenient::Value::String("vsCode".to_string()),
2521 );
2522 let given_docker_config = DockerInspect {
2523 id: "docker_id".to_string(),
2524 config: DockerInspectConfig {
2525 labels: DockerConfigLabels {
2526 metadata: Some(vec![metadata]),
2527 },
2528 image_user: None,
2529 env: Vec::new(),
2530 },
2531 mounts: None,
2532 state: None,
2533 };
2534
2535 let remote_user =
2536 get_remote_user_from_config(&given_docker_config, &devcontainer_manifest).unwrap();
2537
2538 assert_eq!(remote_user, "root".to_string())
2539 }
2540
2541 #[gpui::test]
2542 async fn should_get_remote_user_from_docker_config(cx: &mut TestAppContext) {
2543 let (_, devcontainer_manifest) =
2544 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2545 let mut metadata = HashMap::new();
2546 metadata.insert(
2547 "remoteUser".to_string(),
2548 serde_json_lenient::Value::String("vsCode".to_string()),
2549 );
2550 let given_docker_config = DockerInspect {
2551 id: "docker_id".to_string(),
2552 config: DockerInspectConfig {
2553 labels: DockerConfigLabels {
2554 metadata: Some(vec![metadata]),
2555 },
2556 image_user: None,
2557 env: Vec::new(),
2558 },
2559 mounts: None,
2560 state: None,
2561 };
2562
2563 let remote_user = get_remote_user_from_config(&given_docker_config, &devcontainer_manifest);
2564
2565 assert!(remote_user.is_ok());
2566 let remote_user = remote_user.expect("ok");
2567 assert_eq!(&remote_user, "vsCode")
2568 }
2569
2570 #[test]
2571 fn should_extract_feature_id_from_references() {
2572 assert_eq!(
2573 extract_feature_id("ghcr.io/devcontainers/features/aws-cli:1"),
2574 "aws-cli"
2575 );
2576 assert_eq!(
2577 extract_feature_id("ghcr.io/devcontainers/features/go"),
2578 "go"
2579 );
2580 assert_eq!(extract_feature_id("ghcr.io/user/repo/node:18.0.0"), "node");
2581 assert_eq!(extract_feature_id("./myFeature"), "myFeature");
2582 assert_eq!(
2583 extract_feature_id("ghcr.io/devcontainers/features/rust@sha256:abc123"),
2584 "rust"
2585 );
2586 }
2587
2588 #[gpui::test]
2589 async fn should_create_correct_docker_run_command(cx: &mut TestAppContext) {
2590 let mut metadata = HashMap::new();
2591 metadata.insert(
2592 "remoteUser".to_string(),
2593 serde_json_lenient::Value::String("vsCode".to_string()),
2594 );
2595
2596 let (_, devcontainer_manifest) =
2597 init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2598 let build_resources = DockerBuildResources {
2599 image: DockerInspect {
2600 id: "mcr.microsoft.com/devcontainers/base:ubuntu".to_string(),
2601 config: DockerInspectConfig {
2602 labels: DockerConfigLabels { metadata: None },
2603 image_user: None,
2604 env: Vec::new(),
2605 },
2606 mounts: None,
2607 state: None,
2608 },
2609 additional_mounts: vec![],
2610 privileged: false,
2611 entrypoint_script: "echo Container started\n trap \"exit 0\" 15\n exec \"$@\"\n while sleep 1 & wait $!; do :; done".to_string(),
2612 };
2613 let docker_run_command = devcontainer_manifest.create_docker_run_command(build_resources);
2614
2615 assert!(docker_run_command.is_ok());
2616 let docker_run_command = docker_run_command.expect("ok");
2617
2618 assert_eq!(docker_run_command.get_program(), "docker");
2619 let expected_config_file_label = PathBuf::from(TEST_PROJECT_PATH)
2620 .join(".devcontainer")
2621 .join("devcontainer.json");
2622 let expected_config_file_label = expected_config_file_label.display();
2623 assert_eq!(
2624 docker_run_command.get_args().collect::<Vec<&OsStr>>(),
2625 vec![
2626 OsStr::new("run"),
2627 OsStr::new("--sig-proxy=false"),
2628 OsStr::new("-d"),
2629 OsStr::new("--mount"),
2630 OsStr::new(
2631 "type=bind,source=/path/to/local/project,target=/workspaces/project,consistency=cached"
2632 ),
2633 OsStr::new("-l"),
2634 OsStr::new("devcontainer.local_folder=/path/to/local/project"),
2635 OsStr::new("-l"),
2636 OsStr::new(&format!(
2637 "devcontainer.config_file={expected_config_file_label}"
2638 )),
2639 OsStr::new("--entrypoint"),
2640 OsStr::new("/bin/sh"),
2641 OsStr::new("mcr.microsoft.com/devcontainers/base:ubuntu"),
2642 OsStr::new("-c"),
2643 OsStr::new(
2644 "
2645 echo Container started
2646 trap \"exit 0\" 15
2647 exec \"$@\"
2648 while sleep 1 & wait $!; do :; done
2649 "
2650 .trim()
2651 ),
2652 OsStr::new("-"),
2653 ]
2654 )
2655 }
2656
2657 #[gpui::test]
2658 async fn should_find_primary_service_in_docker_compose(cx: &mut TestAppContext) {
2659 // State where service not defined in dev container
2660 let (_, given_dev_container) = init_default_devcontainer_manifest(cx, "{}").await.unwrap();
2661 let given_docker_compose_config = DockerComposeResources {
2662 config: DockerComposeConfig {
2663 name: Some("devcontainers".to_string()),
2664 services: HashMap::new(),
2665 ..Default::default()
2666 },
2667 ..Default::default()
2668 };
2669
2670 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2671
2672 assert!(bad_result.is_err());
2673
2674 // State where service defined in devcontainer, not found in DockerCompose config
2675 let (_, given_dev_container) =
2676 init_default_devcontainer_manifest(cx, r#"{"service": "not_found_service"}"#)
2677 .await
2678 .unwrap();
2679 let given_docker_compose_config = DockerComposeResources {
2680 config: DockerComposeConfig {
2681 name: Some("devcontainers".to_string()),
2682 services: HashMap::new(),
2683 ..Default::default()
2684 },
2685 ..Default::default()
2686 };
2687
2688 let bad_result = find_primary_service(&given_docker_compose_config, &given_dev_container);
2689
2690 assert!(bad_result.is_err());
2691 // State where service defined in devcontainer and in DockerCompose config
2692
2693 let (_, given_dev_container) =
2694 init_default_devcontainer_manifest(cx, r#"{"service": "found_service"}"#)
2695 .await
2696 .unwrap();
2697 let given_docker_compose_config = DockerComposeResources {
2698 config: DockerComposeConfig {
2699 name: Some("devcontainers".to_string()),
2700 services: HashMap::from([(
2701 "found_service".to_string(),
2702 DockerComposeService {
2703 ..Default::default()
2704 },
2705 )]),
2706 ..Default::default()
2707 },
2708 ..Default::default()
2709 };
2710
2711 let (service_name, _) =
2712 find_primary_service(&given_docker_compose_config, &given_dev_container).unwrap();
2713
2714 assert_eq!(service_name, "found_service".to_string());
2715 }
2716
2717 #[gpui::test]
2718 async fn test_nonremote_variable_replacement_with_default_mount(cx: &mut TestAppContext) {
2719 let fs = FakeFs::new(cx.executor());
2720 let given_devcontainer_contents = r#"
2721// These are some external comments. serde_lenient should handle them
2722{
2723 // These are some internal comments
2724 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2725 "name": "myDevContainer-${devcontainerId}",
2726 "remoteUser": "root",
2727 "remoteEnv": {
2728 "DEVCONTAINER_ID": "${devcontainerId}",
2729 "MYVAR2": "myvarothervalue",
2730 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2731 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2732 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2733 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}",
2734 "LOCAL_ENV_VAR_1": "${localEnv:local_env_1}",
2735 "LOCAL_ENV_VAR_2": "${localEnv:my_other_env}"
2736
2737 }
2738}
2739 "#;
2740 let (_, mut devcontainer_manifest) = init_devcontainer_manifest(
2741 cx,
2742 fs,
2743 fake_http_client(),
2744 Arc::new(FakeDocker::new()),
2745 Arc::new(TestCommandRunner::new()),
2746 HashMap::from([
2747 ("local_env_1".to_string(), "local_env_value1".to_string()),
2748 ("my_other_env".to_string(), "THISVALUEHERE".to_string()),
2749 ]),
2750 given_devcontainer_contents,
2751 )
2752 .await
2753 .unwrap();
2754
2755 devcontainer_manifest.parse_nonremote_vars().unwrap();
2756
2757 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2758 &devcontainer_manifest.config
2759 else {
2760 panic!("Config not parsed");
2761 };
2762
2763 // ${devcontainerId}
2764 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2765 assert_eq!(
2766 variable_replaced_devcontainer.name,
2767 Some(format!("myDevContainer-{devcontainer_id}"))
2768 );
2769 assert_eq!(
2770 variable_replaced_devcontainer
2771 .remote_env
2772 .as_ref()
2773 .and_then(|env| env.get("DEVCONTAINER_ID")),
2774 Some(&devcontainer_id)
2775 );
2776
2777 // ${containerWorkspaceFolderBasename}
2778 assert_eq!(
2779 variable_replaced_devcontainer
2780 .remote_env
2781 .as_ref()
2782 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2783 Some(&test_project_filename())
2784 );
2785
2786 // ${localWorkspaceFolderBasename}
2787 assert_eq!(
2788 variable_replaced_devcontainer
2789 .remote_env
2790 .as_ref()
2791 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2792 Some(&test_project_filename())
2793 );
2794
2795 // ${containerWorkspaceFolder}
2796 assert_eq!(
2797 variable_replaced_devcontainer
2798 .remote_env
2799 .as_ref()
2800 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2801 Some(&format!("/workspaces/{}", test_project_filename()))
2802 );
2803
2804 // ${localWorkspaceFolder}
2805 assert_eq!(
2806 variable_replaced_devcontainer
2807 .remote_env
2808 .as_ref()
2809 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2810 Some(&TEST_PROJECT_PATH.to_string())
2811 );
2812
2813 // ${localEnv:VARIABLE_NAME}
2814 assert_eq!(
2815 variable_replaced_devcontainer
2816 .remote_env
2817 .as_ref()
2818 .and_then(|env| env.get("LOCAL_ENV_VAR_1")),
2819 Some(&"local_env_value1".to_string())
2820 );
2821 assert_eq!(
2822 variable_replaced_devcontainer
2823 .remote_env
2824 .as_ref()
2825 .and_then(|env| env.get("LOCAL_ENV_VAR_2")),
2826 Some(&"THISVALUEHERE".to_string())
2827 );
2828 }
2829
2830 #[gpui::test]
2831 async fn test_nonremote_variable_replacement_with_explicit_mount(cx: &mut TestAppContext) {
2832 let given_devcontainer_contents = r#"
2833 // These are some external comments. serde_lenient should handle them
2834 {
2835 // These are some internal comments
2836 "image": "mcr.microsoft.com/devcontainers/base:ubuntu",
2837 "name": "myDevContainer-${devcontainerId}",
2838 "remoteUser": "root",
2839 "remoteEnv": {
2840 "DEVCONTAINER_ID": "${devcontainerId}",
2841 "MYVAR2": "myvarothervalue",
2842 "REMOTE_WORKSPACE_FOLDER_BASENAME": "${containerWorkspaceFolderBasename}",
2843 "LOCAL_WORKSPACE_FOLDER_BASENAME": "${localWorkspaceFolderBasename}",
2844 "REMOTE_WORKSPACE_FOLDER": "${containerWorkspaceFolder}",
2845 "LOCAL_WORKSPACE_FOLDER": "${localWorkspaceFolder}"
2846
2847 },
2848 "workspaceMount": "source=/local/folder,target=/workspace/subfolder,type=bind,consistency=cached",
2849 "workspaceFolder": "/workspace/customfolder"
2850 }
2851 "#;
2852
2853 let (_, mut devcontainer_manifest) =
2854 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
2855 .await
2856 .unwrap();
2857
2858 devcontainer_manifest.parse_nonremote_vars().unwrap();
2859
2860 let ConfigStatus::VariableParsed(variable_replaced_devcontainer) =
2861 &devcontainer_manifest.config
2862 else {
2863 panic!("Config not parsed");
2864 };
2865
2866 // ${devcontainerId}
2867 let devcontainer_id = devcontainer_manifest.devcontainer_id();
2868 assert_eq!(
2869 variable_replaced_devcontainer.name,
2870 Some(format!("myDevContainer-{devcontainer_id}"))
2871 );
2872 assert_eq!(
2873 variable_replaced_devcontainer
2874 .remote_env
2875 .as_ref()
2876 .and_then(|env| env.get("DEVCONTAINER_ID")),
2877 Some(&devcontainer_id)
2878 );
2879
2880 // ${containerWorkspaceFolderBasename}
2881 assert_eq!(
2882 variable_replaced_devcontainer
2883 .remote_env
2884 .as_ref()
2885 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER_BASENAME")),
2886 Some(&"customfolder".to_string())
2887 );
2888
2889 // ${localWorkspaceFolderBasename}
2890 assert_eq!(
2891 variable_replaced_devcontainer
2892 .remote_env
2893 .as_ref()
2894 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER_BASENAME")),
2895 Some(&"project".to_string())
2896 );
2897
2898 // ${containerWorkspaceFolder}
2899 assert_eq!(
2900 variable_replaced_devcontainer
2901 .remote_env
2902 .as_ref()
2903 .and_then(|env| env.get("REMOTE_WORKSPACE_FOLDER")),
2904 Some(&"/workspace/customfolder".to_string())
2905 );
2906
2907 // ${localWorkspaceFolder}
2908 assert_eq!(
2909 variable_replaced_devcontainer
2910 .remote_env
2911 .as_ref()
2912 .and_then(|env| env.get("LOCAL_WORKSPACE_FOLDER")),
2913 Some(&TEST_PROJECT_PATH.to_string())
2914 );
2915 }
2916
2917 // updateRemoteUserUID is treated as false in Windows, so this test will fail
2918 // It is covered by test_spawns_devcontainer_with_dockerfile_and_no_update_uid
2919 #[cfg(not(target_os = "windows"))]
2920 #[gpui::test]
2921 async fn test_spawns_devcontainer_with_dockerfile_and_features(cx: &mut TestAppContext) {
2922 cx.executor().allow_parking();
2923 env_logger::try_init().ok();
2924 let given_devcontainer_contents = r#"
2925 /*---------------------------------------------------------------------------------------------
2926 * Copyright (c) Microsoft Corporation. All rights reserved.
2927 * Licensed under the MIT License. See License.txt in the project root for license information.
2928 *--------------------------------------------------------------------------------------------*/
2929 {
2930 "name": "cli-${devcontainerId}",
2931 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
2932 "build": {
2933 "dockerfile": "Dockerfile",
2934 "args": {
2935 "VARIANT": "18-bookworm",
2936 "FOO": "bar",
2937 },
2938 },
2939 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
2940 "workspaceFolder": "/workspace2",
2941 "mounts": [
2942 // Keep command history across instances
2943 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
2944 ],
2945
2946 "forwardPorts": [
2947 8082,
2948 8083,
2949 ],
2950 "appPort": "8084",
2951
2952 "containerEnv": {
2953 "VARIABLE_VALUE": "value",
2954 },
2955
2956 "initializeCommand": "touch IAM.md",
2957
2958 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
2959
2960 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
2961
2962 "postCreateCommand": {
2963 "yarn": "yarn install",
2964 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2965 },
2966
2967 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
2968
2969 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
2970
2971 "remoteUser": "node",
2972
2973 "remoteEnv": {
2974 "PATH": "${containerEnv:PATH}:/some/other/path",
2975 "OTHER_ENV": "other_env_value"
2976 },
2977
2978 "features": {
2979 "ghcr.io/devcontainers/features/docker-in-docker:2": {
2980 "moby": false,
2981 },
2982 "ghcr.io/devcontainers/features/go:1": {},
2983 },
2984
2985 "customizations": {
2986 "vscode": {
2987 "extensions": [
2988 "dbaeumer.vscode-eslint",
2989 "GitHub.vscode-pull-request-github",
2990 ],
2991 },
2992 "zed": {
2993 "extensions": ["vue", "ruby"],
2994 },
2995 "codespaces": {
2996 "repositories": {
2997 "devcontainers/features": {
2998 "permissions": {
2999 "contents": "write",
3000 "workflows": "write",
3001 },
3002 },
3003 },
3004 },
3005 },
3006 }
3007 "#;
3008
3009 let (test_dependencies, mut devcontainer_manifest) =
3010 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3011 .await
3012 .unwrap();
3013
3014 test_dependencies
3015 .fs
3016 .atomic_write(
3017 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3018 r#"
3019# Copyright (c) Microsoft Corporation. All rights reserved.
3020# Licensed under the MIT License. See License.txt in the project root for license information.
3021ARG VARIANT="16-bullseye"
3022FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
3023
3024RUN mkdir -p /workspaces && chown node:node /workspaces
3025
3026ARG USERNAME=node
3027USER $USERNAME
3028
3029# Save command line history
3030RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3031&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3032&& mkdir -p /home/$USERNAME/commandhistory \
3033&& touch /home/$USERNAME/commandhistory/.bash_history \
3034&& chown -R $USERNAME /home/$USERNAME/commandhistory
3035 "#.trim().to_string(),
3036 )
3037 .await
3038 .unwrap();
3039
3040 devcontainer_manifest.parse_nonremote_vars().unwrap();
3041
3042 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3043
3044 assert_eq!(
3045 devcontainer_up.extension_ids,
3046 vec!["vue".to_string(), "ruby".to_string()]
3047 );
3048
3049 let files = test_dependencies.fs.files();
3050 let feature_dockerfile = files
3051 .iter()
3052 .find(|f| {
3053 f.file_name()
3054 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3055 })
3056 .expect("to be found");
3057 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3058 assert_eq!(
3059 &feature_dockerfile,
3060 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3061
3062# Copyright (c) Microsoft Corporation. All rights reserved.
3063# Licensed under the MIT License. See License.txt in the project root for license information.
3064ARG VARIANT="16-bullseye"
3065FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
3066
3067RUN mkdir -p /workspaces && chown node:node /workspaces
3068
3069ARG USERNAME=node
3070USER $USERNAME
3071
3072# Save command line history
3073RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
3074&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
3075&& mkdir -p /home/$USERNAME/commandhistory \
3076&& touch /home/$USERNAME/commandhistory/.bash_history \
3077&& chown -R $USERNAME /home/$USERNAME/commandhistory
3078
3079FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3080USER root
3081COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3082RUN chmod -R 0755 /tmp/build-features/
3083
3084FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3085
3086USER root
3087
3088RUN mkdir -p /tmp/dev-container-features
3089COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3090
3091RUN \
3092echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3093echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3094
3095
3096RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
3097cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
3098&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
3099&& cd /tmp/dev-container-features/docker-in-docker_0 \
3100&& chmod +x ./devcontainer-features-install.sh \
3101&& ./devcontainer-features-install.sh \
3102&& rm -rf /tmp/dev-container-features/docker-in-docker_0
3103
3104RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
3105cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
3106&& chmod -R 0755 /tmp/dev-container-features/go_1 \
3107&& cd /tmp/dev-container-features/go_1 \
3108&& chmod +x ./devcontainer-features-install.sh \
3109&& ./devcontainer-features-install.sh \
3110&& rm -rf /tmp/dev-container-features/go_1
3111
3112
3113ARG _DEV_CONTAINERS_IMAGE_USER=root
3114USER $_DEV_CONTAINERS_IMAGE_USER
3115"#
3116 );
3117
3118 let uid_dockerfile = files
3119 .iter()
3120 .find(|f| {
3121 f.file_name()
3122 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3123 })
3124 .expect("to be found");
3125 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3126
3127 assert_eq!(
3128 &uid_dockerfile,
3129 r#"ARG BASE_IMAGE
3130FROM $BASE_IMAGE
3131
3132USER root
3133
3134ARG REMOTE_USER
3135ARG NEW_UID
3136ARG NEW_GID
3137SHELL ["/bin/sh", "-c"]
3138RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3139 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3140 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3141 if [ -z "$OLD_UID" ]; then \
3142 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3143 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3144 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3145 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3146 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3147 else \
3148 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3149 FREE_GID=65532; \
3150 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3151 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3152 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3153 fi; \
3154 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3155 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3156 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3157 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3158 fi; \
3159 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3160 fi;
3161
3162ARG IMAGE_USER
3163USER $IMAGE_USER
3164
3165# Ensure that /etc/profile does not clobber the existing path
3166RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3167
3168ENV DOCKER_BUILDKIT=1
3169
3170ENV GOPATH=/go
3171ENV GOROOT=/usr/local/go
3172ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
3173ENV VARIABLE_VALUE=value
3174"#
3175 );
3176
3177 let golang_install_wrapper = files
3178 .iter()
3179 .find(|f| {
3180 f.file_name()
3181 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
3182 && f.to_str().is_some_and(|s| s.contains("/go_"))
3183 })
3184 .expect("to be found");
3185 let golang_install_wrapper = test_dependencies
3186 .fs
3187 .load(golang_install_wrapper)
3188 .await
3189 .unwrap();
3190 assert_eq!(
3191 &golang_install_wrapper,
3192 r#"#!/bin/sh
3193set -e
3194
3195on_exit () {
3196 [ $? -eq 0 ] && exit
3197 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
3198}
3199
3200trap on_exit EXIT
3201
3202echo ===========================================================================
3203echo 'Feature : go'
3204echo 'Id : ghcr.io/devcontainers/features/go:1'
3205echo 'Options :'
3206echo ' GOLANGCILINTVERSION=latest
3207 VERSION=latest'
3208echo ===========================================================================
3209
3210set -a
3211. ../devcontainer-features.builtin.env
3212. ./devcontainer-features.env
3213set +a
3214
3215chmod +x ./install.sh
3216./install.sh
3217"#
3218 );
3219
3220 let docker_commands = test_dependencies
3221 .command_runner
3222 .commands_by_program("docker");
3223
3224 let docker_run_command = docker_commands
3225 .iter()
3226 .find(|c| c.args.get(0).is_some_and(|a| a == "run"))
3227 .expect("found");
3228
3229 assert_eq!(
3230 docker_run_command.args,
3231 vec![
3232 "run".to_string(),
3233 "--privileged".to_string(),
3234 "--sig-proxy=false".to_string(),
3235 "-d".to_string(),
3236 "--mount".to_string(),
3237 "type=bind,source=/path/to/local/project,target=/workspace2,consistency=cached".to_string(),
3238 "--mount".to_string(),
3239 "type=volume,source=dev-containers-cli-bashhistory,target=/home/node/commandhistory,consistency=cached".to_string(),
3240 "--mount".to_string(),
3241 "type=volume,source=dind-var-lib-docker-42dad4b4ca7b8ced,target=/var/lib/docker,consistency=cached".to_string(),
3242 "-l".to_string(),
3243 "devcontainer.local_folder=/path/to/local/project".to_string(),
3244 "-l".to_string(),
3245 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string(),
3246 "-l".to_string(),
3247 "devcontainer.metadata=[{\"remoteUser\":\"node\"}]".to_string(),
3248 "-p".to_string(),
3249 "8082:8082".to_string(),
3250 "-p".to_string(),
3251 "8083:8083".to_string(),
3252 "-p".to_string(),
3253 "8084:8084".to_string(),
3254 "--entrypoint".to_string(),
3255 "/bin/sh".to_string(),
3256 "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105".to_string(),
3257 "-c".to_string(),
3258 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3259 "-".to_string()
3260 ]
3261 );
3262
3263 let docker_exec_commands = test_dependencies
3264 .docker
3265 .exec_commands_recorded
3266 .lock()
3267 .unwrap();
3268
3269 assert!(docker_exec_commands.iter().all(|exec| {
3270 exec.env
3271 == HashMap::from([
3272 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
3273 (
3274 "PATH".to_string(),
3275 "/initial/path:/some/other/path".to_string(),
3276 ),
3277 ])
3278 }))
3279 }
3280
3281 // updateRemoteUserUID is treated as false in Windows, so this test will fail
3282 // It is covered by test_spawns_devcontainer_with_docker_compose_and_no_update_uid
3283 #[cfg(not(target_os = "windows"))]
3284 #[gpui::test]
3285 async fn test_spawns_devcontainer_with_docker_compose(cx: &mut TestAppContext) {
3286 cx.executor().allow_parking();
3287 env_logger::try_init().ok();
3288 let given_devcontainer_contents = r#"
3289 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3290 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3291 {
3292 "features": {
3293 "ghcr.io/devcontainers/features/aws-cli:1": {},
3294 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3295 },
3296 "name": "Rust and PostgreSQL",
3297 "dockerComposeFile": "docker-compose.yml",
3298 "service": "app",
3299 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3300
3301 // Features to add to the dev container. More info: https://containers.dev/features.
3302 // "features": {},
3303
3304 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3305 "forwardPorts": [
3306 8083,
3307 "db:5432",
3308 "db:1234",
3309 ],
3310 "appPort": "8084",
3311
3312 // Use 'postCreateCommand' to run commands after the container is created.
3313 // "postCreateCommand": "rustc --version",
3314
3315 // Configure tool-specific properties.
3316 // "customizations": {},
3317
3318 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3319 // "remoteUser": "root"
3320 }
3321 "#;
3322 let (test_dependencies, mut devcontainer_manifest) =
3323 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3324 .await
3325 .unwrap();
3326
3327 test_dependencies
3328 .fs
3329 .atomic_write(
3330 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3331 r#"
3332version: '3.8'
3333
3334volumes:
3335 postgres-data:
3336
3337services:
3338 app:
3339 build:
3340 context: .
3341 dockerfile: Dockerfile
3342 env_file:
3343 # Ensure that the variables in .env match the same variables in devcontainer.json
3344 - .env
3345
3346 volumes:
3347 - ../..:/workspaces:cached
3348
3349 # Overrides default command so things don't shut down after the process ends.
3350 command: sleep infinity
3351
3352 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3353 network_mode: service:db
3354
3355 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3356 # (Adding the "ports" property to this file will not forward from a Codespace.)
3357
3358 db:
3359 image: postgres:14.1
3360 restart: unless-stopped
3361 volumes:
3362 - postgres-data:/var/lib/postgresql/data
3363 env_file:
3364 # Ensure that the variables in .env match the same variables in devcontainer.json
3365 - .env
3366
3367 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3368 # (Adding the "ports" property to this file will not forward from a Codespace.)
3369 "#.trim().to_string(),
3370 )
3371 .await
3372 .unwrap();
3373
3374 test_dependencies.fs.atomic_write(
3375 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3376 r#"
3377FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3378
3379# Include lld linker to improve build times either by using environment variable
3380# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3381RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3382 && apt-get -y install clang lld \
3383 && apt-get autoremove -y && apt-get clean -y
3384 "#.trim().to_string()).await.unwrap();
3385
3386 devcontainer_manifest.parse_nonremote_vars().unwrap();
3387
3388 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3389
3390 let files = test_dependencies.fs.files();
3391 let feature_dockerfile = files
3392 .iter()
3393 .find(|f| {
3394 f.file_name()
3395 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3396 })
3397 .expect("to be found");
3398 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3399 assert_eq!(
3400 &feature_dockerfile,
3401 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3402
3403FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3404
3405# Include lld linker to improve build times either by using environment variable
3406# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3407RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3408 && apt-get -y install clang lld \
3409 && apt-get autoremove -y && apt-get clean -y
3410
3411FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3412USER root
3413COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3414RUN chmod -R 0755 /tmp/build-features/
3415
3416FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3417
3418USER root
3419
3420RUN mkdir -p /tmp/dev-container-features
3421COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3422
3423RUN \
3424echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3425echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3426
3427
3428RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3429cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3430&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3431&& cd /tmp/dev-container-features/aws-cli_0 \
3432&& chmod +x ./devcontainer-features-install.sh \
3433&& ./devcontainer-features-install.sh \
3434&& rm -rf /tmp/dev-container-features/aws-cli_0
3435
3436RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3437cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3438&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3439&& cd /tmp/dev-container-features/docker-in-docker_1 \
3440&& chmod +x ./devcontainer-features-install.sh \
3441&& ./devcontainer-features-install.sh \
3442&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3443
3444
3445ARG _DEV_CONTAINERS_IMAGE_USER=root
3446USER $_DEV_CONTAINERS_IMAGE_USER
3447"#
3448 );
3449
3450 let uid_dockerfile = files
3451 .iter()
3452 .find(|f| {
3453 f.file_name()
3454 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3455 })
3456 .expect("to be found");
3457 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3458
3459 assert_eq!(
3460 &uid_dockerfile,
3461 r#"ARG BASE_IMAGE
3462FROM $BASE_IMAGE
3463
3464USER root
3465
3466ARG REMOTE_USER
3467ARG NEW_UID
3468ARG NEW_GID
3469SHELL ["/bin/sh", "-c"]
3470RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3471 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3472 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3473 if [ -z "$OLD_UID" ]; then \
3474 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3475 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3476 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3477 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3478 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3479 else \
3480 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3481 FREE_GID=65532; \
3482 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3483 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3484 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3485 fi; \
3486 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3487 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3488 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3489 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3490 fi; \
3491 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3492 fi;
3493
3494ARG IMAGE_USER
3495USER $IMAGE_USER
3496
3497# Ensure that /etc/profile does not clobber the existing path
3498RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3499
3500
3501ENV DOCKER_BUILDKIT=1
3502"#
3503 );
3504
3505 let runtime_override = files
3506 .iter()
3507 .find(|f| {
3508 f.file_name()
3509 .is_some_and(|s| s.display().to_string() == "docker_compose_runtime.json")
3510 })
3511 .expect("to be found");
3512 let runtime_override = test_dependencies.fs.load(runtime_override).await.unwrap();
3513
3514 let expected_runtime_override = DockerComposeConfig {
3515 name: None,
3516 services: HashMap::from([
3517 (
3518 "app".to_string(),
3519 DockerComposeService {
3520 entrypoint: Some(vec![
3521 "/bin/sh".to_string(),
3522 "-c".to_string(),
3523 "echo Container started\ntrap \"exit 0\" 15\n/usr/local/share/docker-init.sh\nexec \"$@\"\nwhile sleep 1 & wait $!; do :; done".to_string(),
3524 "-".to_string(),
3525 ]),
3526 cap_add: Some(vec!["SYS_PTRACE".to_string()]),
3527 security_opt: Some(vec!["seccomp=unconfined".to_string()]),
3528 privileged: Some(true),
3529 labels: Some(vec![
3530 "devcontainer.metadata=[{\"remoteUser\":\"vscode\"}]".to_string(),
3531 "devcontainer.local_folder=/path/to/local/project".to_string(),
3532 "devcontainer.config_file=/path/to/local/project/.devcontainer/devcontainer.json".to_string()
3533 ]),
3534 volumes: vec![
3535 MountDefinition {
3536 source: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3537 target: "/var/lib/docker".to_string(),
3538 mount_type: Some("volume".to_string())
3539 }
3540 ],
3541 ..Default::default()
3542 },
3543 ),
3544 (
3545 "db".to_string(),
3546 DockerComposeService {
3547 ports: vec![
3548 "8083:8083".to_string(),
3549 "5432:5432".to_string(),
3550 "1234:1234".to_string(),
3551 "8084:8084".to_string()
3552 ],
3553 ..Default::default()
3554 },
3555 ),
3556 ]),
3557 volumes: HashMap::from([(
3558 "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3559 DockerComposeVolume {
3560 name: "dind-var-lib-docker-42dad4b4ca7b8ced".to_string(),
3561 },
3562 )]),
3563 };
3564
3565 assert_eq!(
3566 serde_json_lenient::from_str::<DockerComposeConfig>(&runtime_override).unwrap(),
3567 expected_runtime_override
3568 )
3569 }
3570
3571 #[gpui::test]
3572 async fn test_spawns_devcontainer_with_docker_compose_and_no_update_uid(
3573 cx: &mut TestAppContext,
3574 ) {
3575 cx.executor().allow_parking();
3576 env_logger::try_init().ok();
3577 let given_devcontainer_contents = r#"
3578 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3579 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3580 {
3581 "features": {
3582 "ghcr.io/devcontainers/features/aws-cli:1": {},
3583 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3584 },
3585 "name": "Rust and PostgreSQL",
3586 "dockerComposeFile": "docker-compose.yml",
3587 "service": "app",
3588 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3589
3590 // Features to add to the dev container. More info: https://containers.dev/features.
3591 // "features": {},
3592
3593 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3594 "forwardPorts": [
3595 8083,
3596 "db:5432",
3597 "db:1234",
3598 ],
3599 "updateRemoteUserUID": false,
3600 "appPort": "8084",
3601
3602 // Use 'postCreateCommand' to run commands after the container is created.
3603 // "postCreateCommand": "rustc --version",
3604
3605 // Configure tool-specific properties.
3606 // "customizations": {},
3607
3608 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3609 // "remoteUser": "root"
3610 }
3611 "#;
3612 let (test_dependencies, mut devcontainer_manifest) =
3613 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
3614 .await
3615 .unwrap();
3616
3617 test_dependencies
3618 .fs
3619 .atomic_write(
3620 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3621 r#"
3622version: '3.8'
3623
3624volumes:
3625postgres-data:
3626
3627services:
3628app:
3629 build:
3630 context: .
3631 dockerfile: Dockerfile
3632 env_file:
3633 # Ensure that the variables in .env match the same variables in devcontainer.json
3634 - .env
3635
3636 volumes:
3637 - ../..:/workspaces:cached
3638
3639 # Overrides default command so things don't shut down after the process ends.
3640 command: sleep infinity
3641
3642 # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3643 network_mode: service:db
3644
3645 # Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3646 # (Adding the "ports" property to this file will not forward from a Codespace.)
3647
3648db:
3649 image: postgres:14.1
3650 restart: unless-stopped
3651 volumes:
3652 - postgres-data:/var/lib/postgresql/data
3653 env_file:
3654 # Ensure that the variables in .env match the same variables in devcontainer.json
3655 - .env
3656
3657 # Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3658 # (Adding the "ports" property to this file will not forward from a Codespace.)
3659 "#.trim().to_string(),
3660 )
3661 .await
3662 .unwrap();
3663
3664 test_dependencies.fs.atomic_write(
3665 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3666 r#"
3667FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3668
3669# Include lld linker to improve build times either by using environment variable
3670# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3671RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3672&& apt-get -y install clang lld \
3673&& apt-get autoremove -y && apt-get clean -y
3674 "#.trim().to_string()).await.unwrap();
3675
3676 devcontainer_manifest.parse_nonremote_vars().unwrap();
3677
3678 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3679
3680 let files = test_dependencies.fs.files();
3681 let feature_dockerfile = files
3682 .iter()
3683 .find(|f| {
3684 f.file_name()
3685 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3686 })
3687 .expect("to be found");
3688 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3689 assert_eq!(
3690 &feature_dockerfile,
3691 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3692
3693FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3694
3695# Include lld linker to improve build times either by using environment variable
3696# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3697RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3698&& apt-get -y install clang lld \
3699&& apt-get autoremove -y && apt-get clean -y
3700
3701FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3702USER root
3703COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
3704RUN chmod -R 0755 /tmp/build-features/
3705
3706FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3707
3708USER root
3709
3710RUN mkdir -p /tmp/dev-container-features
3711COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3712
3713RUN \
3714echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3715echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3716
3717
3718RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./aws-cli_0,target=/tmp/build-features-src/aws-cli_0 \
3719cp -ar /tmp/build-features-src/aws-cli_0 /tmp/dev-container-features \
3720&& chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3721&& cd /tmp/dev-container-features/aws-cli_0 \
3722&& chmod +x ./devcontainer-features-install.sh \
3723&& ./devcontainer-features-install.sh \
3724&& rm -rf /tmp/dev-container-features/aws-cli_0
3725
3726RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_1,target=/tmp/build-features-src/docker-in-docker_1 \
3727cp -ar /tmp/build-features-src/docker-in-docker_1 /tmp/dev-container-features \
3728&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3729&& cd /tmp/dev-container-features/docker-in-docker_1 \
3730&& chmod +x ./devcontainer-features-install.sh \
3731&& ./devcontainer-features-install.sh \
3732&& rm -rf /tmp/dev-container-features/docker-in-docker_1
3733
3734
3735ARG _DEV_CONTAINERS_IMAGE_USER=root
3736USER $_DEV_CONTAINERS_IMAGE_USER
3737
3738# Ensure that /etc/profile does not clobber the existing path
3739RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3740
3741
3742ENV DOCKER_BUILDKIT=1
3743"#
3744 );
3745 }
3746
3747 #[cfg(not(target_os = "windows"))]
3748 #[gpui::test]
3749 async fn test_spawns_devcontainer_with_docker_compose_and_podman(cx: &mut TestAppContext) {
3750 cx.executor().allow_parking();
3751 env_logger::try_init().ok();
3752 let given_devcontainer_contents = r#"
3753 // For format details, see https://aka.ms/devcontainer.json. For config options, see the
3754 // README at: https://github.com/devcontainers/templates/tree/main/src/rust-postgres
3755 {
3756 "features": {
3757 "ghcr.io/devcontainers/features/aws-cli:1": {},
3758 "ghcr.io/devcontainers/features/docker-in-docker:2": {},
3759 },
3760 "name": "Rust and PostgreSQL",
3761 "dockerComposeFile": "docker-compose.yml",
3762 "service": "app",
3763 "workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
3764
3765 // Features to add to the dev container. More info: https://containers.dev/features.
3766 // "features": {},
3767
3768 // Use 'forwardPorts' to make a list of ports inside the container available locally.
3769 // "forwardPorts": [5432],
3770
3771 // Use 'postCreateCommand' to run commands after the container is created.
3772 // "postCreateCommand": "rustc --version",
3773
3774 // Configure tool-specific properties.
3775 // "customizations": {},
3776
3777 // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
3778 // "remoteUser": "root"
3779 }
3780 "#;
3781 let mut fake_docker = FakeDocker::new();
3782 fake_docker.set_podman(true);
3783 let (test_dependencies, mut devcontainer_manifest) = init_devcontainer_manifest(
3784 cx,
3785 FakeFs::new(cx.executor()),
3786 fake_http_client(),
3787 Arc::new(fake_docker),
3788 Arc::new(TestCommandRunner::new()),
3789 HashMap::new(),
3790 given_devcontainer_contents,
3791 )
3792 .await
3793 .unwrap();
3794
3795 test_dependencies
3796 .fs
3797 .atomic_write(
3798 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/docker-compose.yml"),
3799 r#"
3800version: '3.8'
3801
3802volumes:
3803postgres-data:
3804
3805services:
3806app:
3807build:
3808 context: .
3809 dockerfile: Dockerfile
3810env_file:
3811 # Ensure that the variables in .env match the same variables in devcontainer.json
3812 - .env
3813
3814volumes:
3815 - ../..:/workspaces:cached
3816
3817# Overrides default command so things don't shut down after the process ends.
3818command: sleep infinity
3819
3820# Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function.
3821network_mode: service:db
3822
3823# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
3824# (Adding the "ports" property to this file will not forward from a Codespace.)
3825
3826db:
3827image: postgres:14.1
3828restart: unless-stopped
3829volumes:
3830 - postgres-data:/var/lib/postgresql/data
3831env_file:
3832 # Ensure that the variables in .env match the same variables in devcontainer.json
3833 - .env
3834
3835# Add "forwardPorts": ["5432"] to **devcontainer.json** to forward PostgreSQL locally.
3836# (Adding the "ports" property to this file will not forward from a Codespace.)
3837 "#.trim().to_string(),
3838 )
3839 .await
3840 .unwrap();
3841
3842 test_dependencies.fs.atomic_write(
3843 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
3844 r#"
3845FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm
3846
3847# Include lld linker to improve build times either by using environment variable
3848# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3849RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3850&& apt-get -y install clang lld \
3851&& apt-get autoremove -y && apt-get clean -y
3852 "#.trim().to_string()).await.unwrap();
3853
3854 devcontainer_manifest.parse_nonremote_vars().unwrap();
3855
3856 let _devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
3857
3858 let files = test_dependencies.fs.files();
3859
3860 let feature_dockerfile = files
3861 .iter()
3862 .find(|f| {
3863 f.file_name()
3864 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
3865 })
3866 .expect("to be found");
3867 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
3868 assert_eq!(
3869 &feature_dockerfile,
3870 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
3871
3872FROM mcr.microsoft.com/devcontainers/rust:2-1-bookworm AS dev_container_auto_added_stage_label
3873
3874# Include lld linker to improve build times either by using environment variable
3875# RUSTFLAGS="-C link-arg=-fuse-ld=lld" or with Cargo's configuration file (i.e see .cargo/config.toml).
3876RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
3877&& apt-get -y install clang lld \
3878&& apt-get autoremove -y && apt-get clean -y
3879
3880FROM dev_container_feature_content_temp as dev_containers_feature_content_source
3881
3882FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
3883USER root
3884COPY --from=dev_containers_feature_content_source /tmp/build-features/devcontainer-features.builtin.env /tmp/build-features/
3885RUN chmod -R 0755 /tmp/build-features/
3886
3887FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
3888
3889USER root
3890
3891RUN mkdir -p /tmp/dev-container-features
3892COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
3893
3894RUN \
3895echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
3896echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'vscode' || grep -E '^vscode|^[^:]*:[^:]*:vscode:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
3897
3898
3899COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/aws-cli_0 /tmp/dev-container-features/aws-cli_0
3900RUN chmod -R 0755 /tmp/dev-container-features/aws-cli_0 \
3901&& cd /tmp/dev-container-features/aws-cli_0 \
3902&& chmod +x ./devcontainer-features-install.sh \
3903&& ./devcontainer-features-install.sh
3904
3905COPY --chown=root:root --from=dev_containers_feature_content_source /tmp/build-features/docker-in-docker_1 /tmp/dev-container-features/docker-in-docker_1
3906RUN chmod -R 0755 /tmp/dev-container-features/docker-in-docker_1 \
3907&& cd /tmp/dev-container-features/docker-in-docker_1 \
3908&& chmod +x ./devcontainer-features-install.sh \
3909&& ./devcontainer-features-install.sh
3910
3911
3912ARG _DEV_CONTAINERS_IMAGE_USER=root
3913USER $_DEV_CONTAINERS_IMAGE_USER
3914"#
3915 );
3916
3917 let uid_dockerfile = files
3918 .iter()
3919 .find(|f| {
3920 f.file_name()
3921 .is_some_and(|s| s.display().to_string() == "updateUID.Dockerfile")
3922 })
3923 .expect("to be found");
3924 let uid_dockerfile = test_dependencies.fs.load(uid_dockerfile).await.unwrap();
3925
3926 assert_eq!(
3927 &uid_dockerfile,
3928 r#"ARG BASE_IMAGE
3929FROM $BASE_IMAGE
3930
3931USER root
3932
3933ARG REMOTE_USER
3934ARG NEW_UID
3935ARG NEW_GID
3936SHELL ["/bin/sh", "-c"]
3937RUN eval $(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd); \
3938 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd); \
3939 eval $(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group); \
3940 if [ -z "$OLD_UID" ]; then \
3941 echo "Remote user not found in /etc/passwd ($REMOTE_USER)."; \
3942 elif [ "$OLD_UID" = "$NEW_UID" -a "$OLD_GID" = "$NEW_GID" ]; then \
3943 echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID)."; \
3944 elif [ "$OLD_UID" != "$NEW_UID" -a -n "$EXISTING_USER" ]; then \
3945 echo "User with UID exists ($EXISTING_USER=$NEW_UID)."; \
3946 else \
3947 if [ "$OLD_GID" != "$NEW_GID" -a -n "$EXISTING_GROUP" ]; then \
3948 FREE_GID=65532; \
3949 while grep -q ":[^:]*:${FREE_GID}:" /etc/group; do FREE_GID=$((FREE_GID - 1)); done; \
3950 echo "Reassigning group $EXISTING_GROUP from GID $NEW_GID to $FREE_GID."; \
3951 sed -i -e "s/\(${EXISTING_GROUP}:[^:]*:\)${NEW_GID}:/\1${FREE_GID}:/" /etc/group; \
3952 fi; \
3953 echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID."; \
3954 sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd; \
3955 if [ "$OLD_GID" != "$NEW_GID" ]; then \
3956 sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group; \
3957 fi; \
3958 chown -R $NEW_UID:$NEW_GID $HOME_FOLDER; \
3959 fi;
3960
3961ARG IMAGE_USER
3962USER $IMAGE_USER
3963
3964# Ensure that /etc/profile does not clobber the existing path
3965RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
3966
3967
3968ENV DOCKER_BUILDKIT=1
3969"#
3970 );
3971 }
3972
3973 #[gpui::test]
3974 async fn test_spawns_devcontainer_with_dockerfile_and_no_update_uid(cx: &mut TestAppContext) {
3975 cx.executor().allow_parking();
3976 env_logger::try_init().ok();
3977 let given_devcontainer_contents = r#"
3978 /*---------------------------------------------------------------------------------------------
3979 * Copyright (c) Microsoft Corporation. All rights reserved.
3980 * Licensed under the MIT License. See License.txt in the project root for license information.
3981 *--------------------------------------------------------------------------------------------*/
3982 {
3983 "name": "cli-${devcontainerId}",
3984 // "image": "mcr.microsoft.com/devcontainers/typescript-node:16-bullseye",
3985 "build": {
3986 "dockerfile": "Dockerfile",
3987 "args": {
3988 "VARIANT": "18-bookworm",
3989 "FOO": "bar",
3990 },
3991 },
3992 "workspaceMount": "source=${localWorkspaceFolder},target=${containerWorkspaceFolder},type=bind,consistency=cached",
3993 "workspaceFolder": "/workspace2",
3994 "mounts": [
3995 // Keep command history across instances
3996 "source=dev-containers-cli-bashhistory,target=/home/node/commandhistory",
3997 ],
3998
3999 "forwardPorts": [
4000 8082,
4001 8083,
4002 ],
4003 "appPort": "8084",
4004 "updateRemoteUserUID": false,
4005
4006 "containerEnv": {
4007 "VARIABLE_VALUE": "value",
4008 },
4009
4010 "initializeCommand": "touch IAM.md",
4011
4012 "onCreateCommand": "echo 'onCreateCommand' >> ON_CREATE_COMMAND.md",
4013
4014 "updateContentCommand": "echo 'updateContentCommand' >> UPDATE_CONTENT_COMMAND.md",
4015
4016 "postCreateCommand": {
4017 "yarn": "yarn install",
4018 "debug": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4019 },
4020
4021 "postStartCommand": "echo 'postStartCommand' >> POST_START_COMMAND.md",
4022
4023 "postAttachCommand": "echo 'postAttachCommand' >> POST_ATTACH_COMMAND.md",
4024
4025 "remoteUser": "node",
4026
4027 "remoteEnv": {
4028 "PATH": "${containerEnv:PATH}:/some/other/path",
4029 "OTHER_ENV": "other_env_value"
4030 },
4031
4032 "features": {
4033 "ghcr.io/devcontainers/features/docker-in-docker:2": {
4034 "moby": false,
4035 },
4036 "ghcr.io/devcontainers/features/go:1": {},
4037 },
4038
4039 "customizations": {
4040 "vscode": {
4041 "extensions": [
4042 "dbaeumer.vscode-eslint",
4043 "GitHub.vscode-pull-request-github",
4044 ],
4045 },
4046 "zed": {
4047 "extensions": ["vue", "ruby"],
4048 },
4049 "codespaces": {
4050 "repositories": {
4051 "devcontainers/features": {
4052 "permissions": {
4053 "contents": "write",
4054 "workflows": "write",
4055 },
4056 },
4057 },
4058 },
4059 },
4060 }
4061 "#;
4062
4063 let (test_dependencies, mut devcontainer_manifest) =
4064 init_default_devcontainer_manifest(cx, given_devcontainer_contents)
4065 .await
4066 .unwrap();
4067
4068 test_dependencies
4069 .fs
4070 .atomic_write(
4071 PathBuf::from(TEST_PROJECT_PATH).join(".devcontainer/Dockerfile"),
4072 r#"
4073# Copyright (c) Microsoft Corporation. All rights reserved.
4074# Licensed under the MIT License. See License.txt in the project root for license information.
4075ARG VARIANT="16-bullseye"
4076FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT}
4077
4078RUN mkdir -p /workspaces && chown node:node /workspaces
4079
4080ARG USERNAME=node
4081USER $USERNAME
4082
4083# Save command line history
4084RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4085&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4086&& mkdir -p /home/$USERNAME/commandhistory \
4087&& touch /home/$USERNAME/commandhistory/.bash_history \
4088&& chown -R $USERNAME /home/$USERNAME/commandhistory
4089 "#.trim().to_string(),
4090 )
4091 .await
4092 .unwrap();
4093
4094 devcontainer_manifest.parse_nonremote_vars().unwrap();
4095
4096 let devcontainer_up = devcontainer_manifest.build_and_run().await.unwrap();
4097
4098 assert_eq!(
4099 devcontainer_up.extension_ids,
4100 vec!["vue".to_string(), "ruby".to_string()]
4101 );
4102
4103 let files = test_dependencies.fs.files();
4104 let feature_dockerfile = files
4105 .iter()
4106 .find(|f| {
4107 f.file_name()
4108 .is_some_and(|s| s.display().to_string() == "Dockerfile.extended")
4109 })
4110 .expect("to be found");
4111 let feature_dockerfile = test_dependencies.fs.load(feature_dockerfile).await.unwrap();
4112 assert_eq!(
4113 &feature_dockerfile,
4114 r#"ARG _DEV_CONTAINERS_BASE_IMAGE=placeholder
4115
4116# Copyright (c) Microsoft Corporation. All rights reserved.
4117# Licensed under the MIT License. See License.txt in the project root for license information.
4118ARG VARIANT="16-bullseye"
4119FROM mcr.microsoft.com/devcontainers/typescript-node:1-${VARIANT} AS dev_container_auto_added_stage_label
4120
4121RUN mkdir -p /workspaces && chown node:node /workspaces
4122
4123ARG USERNAME=node
4124USER $USERNAME
4125
4126# Save command line history
4127RUN echo "export HISTFILE=/home/$USERNAME/commandhistory/.bash_history" >> "/home/$USERNAME/.bashrc" \
4128&& echo "export PROMPT_COMMAND='history -a'" >> "/home/$USERNAME/.bashrc" \
4129&& mkdir -p /home/$USERNAME/commandhistory \
4130&& touch /home/$USERNAME/commandhistory/.bash_history \
4131&& chown -R $USERNAME /home/$USERNAME/commandhistory
4132
4133FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_feature_content_normalize
4134USER root
4135COPY --from=dev_containers_feature_content_source ./devcontainer-features.builtin.env /tmp/build-features/
4136RUN chmod -R 0755 /tmp/build-features/
4137
4138FROM $_DEV_CONTAINERS_BASE_IMAGE AS dev_containers_target_stage
4139
4140USER root
4141
4142RUN mkdir -p /tmp/dev-container-features
4143COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
4144
4145RUN \
4146echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && \
4147echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
4148
4149
4150RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 \
4151cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features \
4152&& chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 \
4153&& cd /tmp/dev-container-features/docker-in-docker_0 \
4154&& chmod +x ./devcontainer-features-install.sh \
4155&& ./devcontainer-features-install.sh \
4156&& rm -rf /tmp/dev-container-features/docker-in-docker_0
4157
4158RUN --mount=type=bind,from=dev_containers_feature_content_source,source=./go_1,target=/tmp/build-features-src/go_1 \
4159cp -ar /tmp/build-features-src/go_1 /tmp/dev-container-features \
4160&& chmod -R 0755 /tmp/dev-container-features/go_1 \
4161&& cd /tmp/dev-container-features/go_1 \
4162&& chmod +x ./devcontainer-features-install.sh \
4163&& ./devcontainer-features-install.sh \
4164&& rm -rf /tmp/dev-container-features/go_1
4165
4166
4167ARG _DEV_CONTAINERS_IMAGE_USER=root
4168USER $_DEV_CONTAINERS_IMAGE_USER
4169
4170# Ensure that /etc/profile does not clobber the existing path
4171RUN sed -i -E 's/((^|\s)PATH=)([^\$]*)$/\1\${PATH:-\3}/g' /etc/profile || true
4172
4173ENV DOCKER_BUILDKIT=1
4174
4175ENV GOPATH=/go
4176ENV GOROOT=/usr/local/go
4177ENV PATH=/usr/local/go/bin:/go/bin:${PATH}
4178ENV VARIABLE_VALUE=value
4179"#
4180 );
4181
4182 let golang_install_wrapper = files
4183 .iter()
4184 .find(|f| {
4185 f.file_name()
4186 .is_some_and(|s| s.display().to_string() == "devcontainer-features-install.sh")
4187 && f.to_str().is_some_and(|s| s.contains("go_"))
4188 })
4189 .expect("to be found");
4190 let golang_install_wrapper = test_dependencies
4191 .fs
4192 .load(golang_install_wrapper)
4193 .await
4194 .unwrap();
4195 assert_eq!(
4196 &golang_install_wrapper,
4197 r#"#!/bin/sh
4198set -e
4199
4200on_exit () {
4201 [ $? -eq 0 ] && exit
4202 echo 'ERROR: Feature "go" (ghcr.io/devcontainers/features/go:1) failed to install!'
4203}
4204
4205trap on_exit EXIT
4206
4207echo ===========================================================================
4208echo 'Feature : go'
4209echo 'Id : ghcr.io/devcontainers/features/go:1'
4210echo 'Options :'
4211echo ' GOLANGCILINTVERSION=latest
4212 VERSION=latest'
4213echo ===========================================================================
4214
4215set -a
4216. ../devcontainer-features.builtin.env
4217. ./devcontainer-features.env
4218set +a
4219
4220chmod +x ./install.sh
4221./install.sh
4222"#
4223 );
4224
4225 let docker_commands = test_dependencies
4226 .command_runner
4227 .commands_by_program("docker");
4228
4229 let docker_run_command = docker_commands
4230 .iter()
4231 .find(|c| c.args.get(0).is_some_and(|a| a == "run"));
4232
4233 assert!(docker_run_command.is_some());
4234
4235 let docker_exec_commands = test_dependencies
4236 .docker
4237 .exec_commands_recorded
4238 .lock()
4239 .unwrap();
4240
4241 assert!(docker_exec_commands.iter().all(|exec| {
4242 exec.env
4243 == HashMap::from([
4244 ("OTHER_ENV".to_string(), "other_env_value".to_string()),
4245 (
4246 "PATH".to_string(),
4247 "/initial/path:/some/other/path".to_string(),
4248 ),
4249 ])
4250 }))
4251 }
4252
4253 pub(crate) struct RecordedExecCommand {
4254 pub(crate) _container_id: String,
4255 pub(crate) _remote_folder: String,
4256 pub(crate) _user: String,
4257 pub(crate) env: HashMap<String, String>,
4258 pub(crate) _inner_command: Command,
4259 }
4260
4261 pub(crate) struct FakeDocker {
4262 exec_commands_recorded: Mutex<Vec<RecordedExecCommand>>,
4263 podman: bool,
4264 }
4265
4266 impl FakeDocker {
4267 pub(crate) fn new() -> Self {
4268 Self {
4269 podman: false,
4270 exec_commands_recorded: Mutex::new(Vec::new()),
4271 }
4272 }
4273 #[cfg(not(target_os = "windows"))]
4274 fn set_podman(&mut self, podman: bool) {
4275 self.podman = podman;
4276 }
4277 }
4278
4279 #[async_trait]
4280 impl DockerClient for FakeDocker {
4281 async fn inspect(&self, id: &String) -> Result<DockerInspect, DevContainerError> {
4282 if id == "mcr.microsoft.com/devcontainers/typescript-node:1-18-bookworm" {
4283 return Ok(DockerInspect {
4284 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc104"
4285 .to_string(),
4286 config: DockerInspectConfig {
4287 labels: DockerConfigLabels {
4288 metadata: Some(vec![HashMap::from([(
4289 "remoteUser".to_string(),
4290 Value::String("node".to_string()),
4291 )])]),
4292 },
4293 env: Vec::new(),
4294 image_user: Some("root".to_string()),
4295 },
4296 mounts: None,
4297 state: None,
4298 });
4299 }
4300 if id == "mcr.microsoft.com/devcontainers/rust:2-1-bookworm" {
4301 return Ok(DockerInspect {
4302 id: "sha256:39ad1c7264794d60e3bc449d9d8877a8e486d19ad8fba80f5369def6a2408392"
4303 .to_string(),
4304 config: DockerInspectConfig {
4305 labels: DockerConfigLabels {
4306 metadata: Some(vec![HashMap::from([(
4307 "remoteUser".to_string(),
4308 Value::String("vscode".to_string()),
4309 )])]),
4310 },
4311 image_user: Some("root".to_string()),
4312 env: Vec::new(),
4313 },
4314 mounts: None,
4315 state: None,
4316 });
4317 }
4318 if id.starts_with("cli_") {
4319 return Ok(DockerInspect {
4320 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4321 .to_string(),
4322 config: DockerInspectConfig {
4323 labels: DockerConfigLabels {
4324 metadata: Some(vec![HashMap::from([(
4325 "remoteUser".to_string(),
4326 Value::String("node".to_string()),
4327 )])]),
4328 },
4329 image_user: Some("root".to_string()),
4330 env: vec!["PATH=/initial/path".to_string()],
4331 },
4332 mounts: None,
4333 state: None,
4334 });
4335 }
4336 if id == "found_docker_ps" {
4337 return Ok(DockerInspect {
4338 id: "sha256:610e6cfca95280188b021774f8cf69dd6f49bdb6eebc34c5ee2010f4d51cc105"
4339 .to_string(),
4340 config: DockerInspectConfig {
4341 labels: DockerConfigLabels {
4342 metadata: Some(vec![HashMap::from([(
4343 "remoteUser".to_string(),
4344 Value::String("node".to_string()),
4345 )])]),
4346 },
4347 image_user: Some("root".to_string()),
4348 env: vec!["PATH=/initial/path".to_string()],
4349 },
4350 mounts: Some(vec![DockerInspectMount {
4351 source: "/path/to/local/project".to_string(),
4352 destination: "/workspaces/project".to_string(),
4353 }]),
4354 state: None,
4355 });
4356 }
4357 if id.starts_with("rust_a-") {
4358 return Ok(DockerInspect {
4359 id: "sha256:9da65c34ab809e763b13d238fd7a0f129fcabd533627d340f293308cb63620a0"
4360 .to_string(),
4361 config: DockerInspectConfig {
4362 labels: DockerConfigLabels {
4363 metadata: Some(vec![HashMap::from([(
4364 "remoteUser".to_string(),
4365 Value::String("vscode".to_string()),
4366 )])]),
4367 },
4368 image_user: Some("root".to_string()),
4369 env: Vec::new(),
4370 },
4371 mounts: None,
4372 state: None,
4373 });
4374 }
4375
4376 Err(DevContainerError::DockerNotAvailable)
4377 }
4378 async fn get_docker_compose_config(
4379 &self,
4380 config_files: &Vec<PathBuf>,
4381 ) -> Result<Option<DockerComposeConfig>, DevContainerError> {
4382 if config_files.len() == 1
4383 && config_files.get(0)
4384 == Some(&PathBuf::from(
4385 "/path/to/local/project/.devcontainer/docker-compose.yml",
4386 ))
4387 {
4388 return Ok(Some(DockerComposeConfig {
4389 name: None,
4390 services: HashMap::from([
4391 (
4392 "app".to_string(),
4393 DockerComposeService {
4394 build: Some(DockerComposeServiceBuild {
4395 context: Some(".".to_string()),
4396 dockerfile: Some("Dockerfile".to_string()),
4397 args: None,
4398 additional_contexts: None,
4399 }),
4400 volumes: vec![MountDefinition {
4401 source: "../..".to_string(),
4402 target: "/workspaces".to_string(),
4403 mount_type: Some("bind".to_string()),
4404 }],
4405 network_mode: Some("service:db".to_string()),
4406 ..Default::default()
4407 },
4408 ),
4409 (
4410 "db".to_string(),
4411 DockerComposeService {
4412 image: Some("postgres:14.1".to_string()),
4413 volumes: vec![MountDefinition {
4414 source: "postgres-data".to_string(),
4415 target: "/var/lib/postgresql/data".to_string(),
4416 mount_type: Some("volume".to_string()),
4417 }],
4418 env_file: Some(vec![".env".to_string()]),
4419 ..Default::default()
4420 },
4421 ),
4422 ]),
4423 volumes: HashMap::from([(
4424 "postgres-data".to_string(),
4425 DockerComposeVolume::default(),
4426 )]),
4427 }));
4428 }
4429 Err(DevContainerError::DockerNotAvailable)
4430 }
4431 async fn docker_compose_build(
4432 &self,
4433 _config_files: &Vec<PathBuf>,
4434 _project_name: &str,
4435 ) -> Result<(), DevContainerError> {
4436 Ok(())
4437 }
4438 async fn run_docker_exec(
4439 &self,
4440 container_id: &str,
4441 remote_folder: &str,
4442 user: &str,
4443 env: &HashMap<String, String>,
4444 inner_command: Command,
4445 ) -> Result<(), DevContainerError> {
4446 let mut record = self
4447 .exec_commands_recorded
4448 .lock()
4449 .expect("should be available");
4450 record.push(RecordedExecCommand {
4451 _container_id: container_id.to_string(),
4452 _remote_folder: remote_folder.to_string(),
4453 _user: user.to_string(),
4454 env: env.clone(),
4455 _inner_command: inner_command,
4456 });
4457 Ok(())
4458 }
4459 async fn start_container(&self, _id: &str) -> Result<(), DevContainerError> {
4460 Err(DevContainerError::DockerNotAvailable)
4461 }
4462 async fn find_process_by_filters(
4463 &self,
4464 _filters: Vec<String>,
4465 ) -> Result<Option<DockerPs>, DevContainerError> {
4466 Ok(Some(DockerPs {
4467 id: "found_docker_ps".to_string(),
4468 }))
4469 }
4470 fn supports_compose_buildkit(&self) -> bool {
4471 !self.podman
4472 }
4473 fn docker_cli(&self) -> String {
4474 if self.podman {
4475 "podman".to_string()
4476 } else {
4477 "docker".to_string()
4478 }
4479 }
4480 }
4481
4482 #[derive(Debug, Clone)]
4483 pub(crate) struct TestCommand {
4484 pub(crate) program: String,
4485 pub(crate) args: Vec<String>,
4486 }
4487
4488 pub(crate) struct TestCommandRunner {
4489 commands_recorded: Mutex<Vec<TestCommand>>,
4490 }
4491
4492 impl TestCommandRunner {
4493 fn new() -> Self {
4494 Self {
4495 commands_recorded: Mutex::new(Vec::new()),
4496 }
4497 }
4498
4499 fn commands_by_program(&self, program: &str) -> Vec<TestCommand> {
4500 let record = self.commands_recorded.lock().expect("poisoned");
4501 record
4502 .iter()
4503 .filter(|r| r.program == program)
4504 .map(|r| r.clone())
4505 .collect()
4506 }
4507 }
4508
4509 #[async_trait]
4510 impl CommandRunner for TestCommandRunner {
4511 async fn run_command(&self, command: &mut Command) -> Result<Output, std::io::Error> {
4512 let mut record = self.commands_recorded.lock().expect("poisoned");
4513
4514 record.push(TestCommand {
4515 program: command.get_program().display().to_string(),
4516 args: command
4517 .get_args()
4518 .map(|a| a.display().to_string())
4519 .collect(),
4520 });
4521
4522 Ok(Output {
4523 status: ExitStatus::default(),
4524 stdout: vec![],
4525 stderr: vec![],
4526 })
4527 }
4528 }
4529
4530 fn fake_http_client() -> Arc<dyn HttpClient> {
4531 FakeHttpClient::create(|request| async move {
4532 let (parts, _body) = request.into_parts();
4533 if parts.uri.path() == "/token" {
4534 let token_response = TokenResponse {
4535 token: "token".to_string(),
4536 };
4537 return Ok(http::Response::builder()
4538 .status(200)
4539 .body(http_client::AsyncBody::from(
4540 serde_json_lenient::to_string(&token_response).unwrap(),
4541 ))
4542 .unwrap());
4543 }
4544
4545 // OCI specific things
4546 if parts.uri.path() == "/v2/devcontainers/features/docker-in-docker/manifests/2" {
4547 let response = r#"
4548 {
4549 "schemaVersion": 2,
4550 "mediaType": "application/vnd.oci.image.manifest.v1+json",
4551 "config": {
4552 "mediaType": "application/vnd.devcontainers",
4553 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
4554 "size": 2
4555 },
4556 "layers": [
4557 {
4558 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
4559 "digest": "sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5",
4560 "size": 59392,
4561 "annotations": {
4562 "org.opencontainers.image.title": "devcontainer-feature-docker-in-docker.tgz"
4563 }
4564 }
4565 ],
4566 "annotations": {
4567 "dev.containers.metadata": "{\"id\":\"docker-in-docker\",\"version\":\"2.16.1\",\"name\":\"Docker (Docker-in-Docker)\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/docker-in-docker\",\"description\":\"Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"20.10\"],\"default\":\"latest\",\"description\":\"Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)\"},\"moby\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install OSS Moby build instead of Docker CE\"},\"mobyBuildxVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Install a specific version of moby-buildx when using Moby\"},\"dockerDashComposeVersion\":{\"type\":\"string\",\"enum\":[\"none\",\"v1\",\"v2\"],\"default\":\"v2\",\"description\":\"Default version of Docker Compose (v1, v2 or none)\"},\"azureDnsAutoDetection\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure\"},\"dockerDefaultAddressPool\":{\"type\":\"string\",\"default\":\"\",\"proposals\":[],\"description\":\"Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24\"},\"installDockerBuildx\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Install Docker Buildx\"},\"installDockerComposeSwitch\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter.\"},\"disableIp6tables\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Disable ip6tables (this option is only applicable for Docker versions 27 and greater)\"}},\"entrypoint\":\"/usr/local/share/docker-init.sh\",\"privileged\":true,\"containerEnv\":{\"DOCKER_BUILDKIT\":\"1\"},\"customizations\":{\"vscode\":{\"extensions\":[\"ms-azuretools.vscode-containers\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container.\"}]}}},\"mounts\":[{\"source\":\"dind-var-lib-docker-${devcontainerId}\",\"target\":\"/var/lib/docker\",\"type\":\"volume\"}],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
4568 "com.github.package.type": "devcontainer_feature"
4569 }
4570 }
4571 "#;
4572 return Ok(http::Response::builder()
4573 .status(200)
4574 .body(http_client::AsyncBody::from(response))
4575 .unwrap());
4576 }
4577
4578 if parts.uri.path()
4579 == "/v2/devcontainers/features/docker-in-docker/blobs/sha256:bc7ab0d8d8339416e1491419ab9ffe931458d0130110f4b18351b0fa184e67d5"
4580 {
4581 let response = build_tarball(vec that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4586 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4587 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4588 ```
4589 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4590 ```
4591 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4592
4593
4594 ## OS Support
4595
4596 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4597
4598 Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04).
4599
4600 `bash` is required to execute the `install.sh` script."#),
4601 ("./README.md", r#"
4602 # Docker (Docker-in-Docker) (docker-in-docker)
4603
4604 Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.
4605
4606 ## Example Usage
4607
4608 ```json
4609 "features": {
4610 "ghcr.io/devcontainers/features/docker-in-docker:2": {}
4611 }
4612 ```
4613
4614 ## Options
4615
4616 | Options Id | Description | Type | Default Value |
4617 |-----|-----|-----|-----|
4618 | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest |
4619 | moby | Install OSS Moby build instead of Docker CE | boolean | true |
4620 | mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest |
4621 | dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 |
4622 | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true |
4623 | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - |
4624 | installDockerBuildx | Install Docker Buildx | boolean | true |
4625 | installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | false |
4626 | disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false |
4627
4628 ## Customizations
4629
4630 ### VS Code Extensions
4631
4632 - `ms-azuretools.vscode-containers`
4633
4634 ## Limitations
4635
4636 This docker-in-docker Dev Container Feature is roughly based on the [official docker-in-docker wrapper script](https://github.com/moby/moby/blob/master/hack/dind) that is part of the [Moby project](https://mobyproject.org/). With this in mind:
4637 * As the name implies, the Feature is expected to work when the host is running Docker (or the OSS Moby container engine it is built on). It may be possible to get running in other container engines, but it has not been tested with them.
4638 * The host and the container must be running on the same chip architecture. You will not be able to use it with an emulated x86 image with Docker Desktop on an Apple Silicon Mac, like in this example:
4639 ```
4640 FROM --platform=linux/amd64 mcr.microsoft.com/devcontainers/typescript-node:16
4641 ```
4642 See [Issue #219](https://github.com/devcontainers/features/issues/219) for more details.
4643
4644
4645 ## OS Support
4646
4647 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed.
4648
4649 `bash` is required to execute the `install.sh` script.
4650
4651
4652 ---
4653
4654 _Note: This file was auto-generated from the [devcontainer-feature.json](https://github.com/devcontainers/features/blob/main/src/docker-in-docker/devcontainer-feature.json). Add additional notes to a `NOTES.md`._"#),
4655 ("./devcontainer-feature.json", r#"
4656 {
4657 "id": "docker-in-docker",
4658 "version": "2.16.1",
4659 "name": "Docker (Docker-in-Docker)",
4660 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker",
4661 "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.",
4662 "options": {
4663 "version": {
4664 "type": "string",
4665 "proposals": [
4666 "latest",
4667 "none",
4668 "20.10"
4669 ],
4670 "default": "latest",
4671 "description": "Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.)"
4672 },
4673 "moby": {
4674 "type": "boolean",
4675 "default": true,
4676 "description": "Install OSS Moby build instead of Docker CE"
4677 },
4678 "mobyBuildxVersion": {
4679 "type": "string",
4680 "default": "latest",
4681 "description": "Install a specific version of moby-buildx when using Moby"
4682 },
4683 "dockerDashComposeVersion": {
4684 "type": "string",
4685 "enum": [
4686 "none",
4687 "v1",
4688 "v2"
4689 ],
4690 "default": "v2",
4691 "description": "Default version of Docker Compose (v1, v2 or none)"
4692 },
4693 "azureDnsAutoDetection": {
4694 "type": "boolean",
4695 "default": true,
4696 "description": "Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure"
4697 },
4698 "dockerDefaultAddressPool": {
4699 "type": "string",
4700 "default": "",
4701 "proposals": [],
4702 "description": "Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24"
4703 },
4704 "installDockerBuildx": {
4705 "type": "boolean",
4706 "default": true,
4707 "description": "Install Docker Buildx"
4708 },
4709 "installDockerComposeSwitch": {
4710 "type": "boolean",
4711 "default": false,
4712 "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter."
4713 },
4714 "disableIp6tables": {
4715 "type": "boolean",
4716 "default": false,
4717 "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)"
4718 }
4719 },
4720 "entrypoint": "/usr/local/share/docker-init.sh",
4721 "privileged": true,
4722 "containerEnv": {
4723 "DOCKER_BUILDKIT": "1"
4724 },
4725 "customizations": {
4726 "vscode": {
4727 "extensions": [
4728 "ms-azuretools.vscode-containers"
4729 ],
4730 "settings": {
4731 "github.copilot.chat.codeGeneration.instructions": [
4732 {
4733 "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container."
4734 }
4735 ]
4736 }
4737 }
4738 },
4739 "mounts": [
4740 {
4741 "source": "dind-var-lib-docker-${devcontainerId}",
4742 "target": "/var/lib/docker",
4743 "type": "volume"
4744 }
4745 ],
4746 "installsAfter": [
4747 "ghcr.io/devcontainers/features/common-utils"
4748 ]
4749 }"#),
4750 ("./install.sh", r#"
4751 #!/usr/bin/env bash
4752 #-------------------------------------------------------------------------------------------------------------
4753 # Copyright (c) Microsoft Corporation. All rights reserved.
4754 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
4755 #-------------------------------------------------------------------------------------------------------------
4756 #
4757 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md
4758 # Maintainer: The Dev Container spec maintainers
4759
4760
4761 DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version
4762 USE_MOBY="${MOBY:-"true"}"
4763 MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}"
4764 DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none
4765 AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}"
4766 DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}"
4767 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
4768 INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}"
4769 INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"false"}"
4770 MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc"
4771 MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc"
4772 DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble"
4773 DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble"
4774 DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}"
4775
4776 # Default: Exit on any failure.
4777 set -e
4778
4779 # Clean up
4780 rm -rf /var/lib/apt/lists/*
4781
4782 # Setup STDERR.
4783 err() {
4784 echo "(!) $*" >&2
4785 }
4786
4787 if [ "$(id -u)" -ne 0 ]; then
4788 err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
4789 exit 1
4790 fi
4791
4792 ###################
4793 # Helper Functions
4794 # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh
4795 ###################
4796
4797 # Determine the appropriate non-root user
4798 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
4799 USERNAME=""
4800 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
4801 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
4802 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
4803 USERNAME=${CURRENT_USER}
4804 break
4805 fi
4806 done
4807 if [ "${USERNAME}" = "" ]; then
4808 USERNAME=root
4809 fi
4810 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
4811 USERNAME=root
4812 fi
4813
4814 # Package manager update function
4815 pkg_mgr_update() {
4816 case ${ADJUSTED_ID} in
4817 debian)
4818 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
4819 echo "Running apt-get update..."
4820 apt-get update -y
4821 fi
4822 ;;
4823 rhel)
4824 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
4825 cache_check_dir="/var/cache/yum"
4826 else
4827 cache_check_dir="/var/cache/${PKG_MGR_CMD}"
4828 fi
4829 if [ "$(ls ${cache_check_dir}/* 2>/dev/null | wc -l)" = 0 ]; then
4830 echo "Running ${PKG_MGR_CMD} makecache ..."
4831 ${PKG_MGR_CMD} makecache
4832 fi
4833 ;;
4834 esac
4835 }
4836
4837 # Checks if packages are installed and installs them if not
4838 check_packages() {
4839 case ${ADJUSTED_ID} in
4840 debian)
4841 if ! dpkg -s "$@" > /dev/null 2>&1; then
4842 pkg_mgr_update
4843 apt-get -y install --no-install-recommends "$@"
4844 fi
4845 ;;
4846 rhel)
4847 if ! rpm -q "$@" > /dev/null 2>&1; then
4848 pkg_mgr_update
4849 ${PKG_MGR_CMD} -y install "$@"
4850 fi
4851 ;;
4852 esac
4853 }
4854
4855 # Figure out correct version of a three part version number is not passed
4856 find_version_from_git_tags() {
4857 local variable_name=$1
4858 local requested_version=${!variable_name}
4859 if [ "${requested_version}" = "none" ]; then return; fi
4860 local repository=$2
4861 local prefix=${3:-"tags/v"}
4862 local separator=${4:-"."}
4863 local last_part_optional=${5:-"false"}
4864 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
4865 local escaped_separator=${separator//./\\.}
4866 local last_part
4867 if [ "${last_part_optional}" = "true" ]; then
4868 last_part="(${escaped_separator}[0-9]+)?"
4869 else
4870 last_part="${escaped_separator}[0-9]+"
4871 fi
4872 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
4873 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
4874 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
4875 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
4876 else
4877 set +e
4878 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
4879 set -e
4880 fi
4881 fi
4882 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
4883 err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
4884 exit 1
4885 fi
4886 echo "${variable_name}=${!variable_name}"
4887 }
4888
4889 # Use semver logic to decrement a version number then look for the closest match
4890 find_prev_version_from_git_tags() {
4891 local variable_name=$1
4892 local current_version=${!variable_name}
4893 local repository=$2
4894 # Normally a "v" is used before the version number, but support alternate cases
4895 local prefix=${3:-"tags/v"}
4896 # Some repositories use "_" instead of "." for version number part separation, support that
4897 local separator=${4:-"."}
4898 # Some tools release versions that omit the last digit (e.g. go)
4899 local last_part_optional=${5:-"false"}
4900 # Some repositories may have tags that include a suffix (e.g. actions/node-versions)
4901 local version_suffix_regex=$6
4902 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
4903 set +e
4904 major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')"
4905 minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
4906 breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
4907
4908 if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then
4909 ((major=major-1))
4910 declare -g ${variable_name}="${major}"
4911 # Look for latest version from previous major release
4912 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4913 # Handle situations like Go's odd version pattern where "0" releases omit the last part
4914 elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
4915 ((minor=minor-1))
4916 declare -g ${variable_name}="${major}.${minor}"
4917 # Look for latest version from previous minor release
4918 find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}"
4919 else
4920 ((breakfix=breakfix-1))
4921 if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then
4922 declare -g ${variable_name}="${major}.${minor}"
4923 else
4924 declare -g ${variable_name}="${major}.${minor}.${breakfix}"
4925 fi
4926 fi
4927 set -e
4928 }
4929
4930 # Function to fetch the version released prior to the latest version
4931 get_previous_version() {
4932 local url=$1
4933 local repo_url=$2
4934 local variable_name=$3
4935 prev_version=${!variable_name}
4936
4937 output=$(curl -s "$repo_url");
4938 if echo "$output" | jq -e 'type == "object"' > /dev/null; then
4939 message=$(echo "$output" | jq -r '.message')
4940
4941 if [[ $message == "API rate limit exceeded"* ]]; then
4942 echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}"
4943 echo -e "\nAttempting to find latest version using GitHub tags."
4944 find_prev_version_from_git_tags prev_version "$url" "tags/v"
4945 declare -g ${variable_name}="${prev_version}"
4946 fi
4947 elif echo "$output" | jq -e 'type == "array"' > /dev/null; then
4948 echo -e "\nAttempting to find latest version using GitHub Api."
4949 version=$(echo "$output" | jq -r '.[1].tag_name')
4950 declare -g ${variable_name}="${version#v}"
4951 fi
4952 echo "${variable_name}=${!variable_name}"
4953 }
4954
4955 get_github_api_repo_url() {
4956 local url=$1
4957 echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases"
4958 }
4959
4960 ###########################################
4961 # Start docker-in-docker installation
4962 ###########################################
4963
4964 # Ensure apt is in non-interactive to avoid prompts
4965 export DEBIAN_FRONTEND=noninteractive
4966
4967 # Source /etc/os-release to get OS info
4968 . /etc/os-release
4969
4970 # Determine adjusted ID and package manager
4971 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
4972 ADJUSTED_ID="debian"
4973 PKG_MGR_CMD="apt-get"
4974 # Use dpkg for Debian-based systems
4975 architecture="$(dpkg --print-architecture 2>/dev/null || uname -m)"
4976 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"azurelinux"* || "${ID_LIKE}" = *"mariner"* ]]; then
4977 ADJUSTED_ID="rhel"
4978 # Determine the appropriate package manager for RHEL-based systems
4979 for pkg_mgr in tdnf dnf microdnf yum; do
4980 if command -v "$pkg_mgr" >/dev/null 2>&1; then
4981 PKG_MGR_CMD="$pkg_mgr"
4982 break
4983 fi
4984 done
4985
4986 if [ -z "${PKG_MGR_CMD}" ]; then
4987 err "Unable to find a supported package manager (tdnf, dnf, microdnf, yum)"
4988 exit 1
4989 fi
4990
4991 architecture="$(rpm --eval '%{_arch}' 2>/dev/null || uname -m)"
4992 else
4993 err "Linux distro ${ID} not supported."
4994 exit 1
4995 fi
4996
4997 # Azure Linux specific setup
4998 if [ "${ID}" = "azurelinux" ]; then
4999 VERSION_CODENAME="azurelinux${VERSION_ID}"
5000 fi
5001
5002 # Prevent attempting to install Moby on Debian trixie (packages removed)
5003 if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then
5004 err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution."
5005 err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')."
5006 exit 1
5007 fi
5008
5009 # Check if distro is supported
5010 if [ "${USE_MOBY}" = "true" ]; then
5011 if [ "${ADJUSTED_ID}" = "debian" ]; then
5012 if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5013 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution"
5014 err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}"
5015 exit 1
5016 fi
5017 echo "(*) ${VERSION_CODENAME} is supported for Moby installation - setting up Microsoft repository"
5018 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5019 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5020 echo " (*) ${ID} ${VERSION_ID} detected - using Microsoft repositories for Moby packages"
5021 else
5022 echo "RHEL-based system (${ID}) detected - Moby packages may require additional configuration"
5023 fi
5024 fi
5025 else
5026 if [ "${ADJUSTED_ID}" = "debian" ]; then
5027 if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then
5028 err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution"
5029 err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}"
5030 exit 1
5031 fi
5032 echo "(*) ${VERSION_CODENAME} is supported for Docker CE installation (supported: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}) - setting up Docker repository"
5033 elif [ "${ADJUSTED_ID}" = "rhel" ]; then
5034
5035 echo "RHEL-based system (${ID}) detected - using Docker CE packages"
5036 fi
5037 fi
5038
5039 # Install base dependencies
5040 base_packages="curl ca-certificates pigz iptables gnupg2 wget jq"
5041 case ${ADJUSTED_ID} in
5042 debian)
5043 check_packages apt-transport-https $base_packages dirmngr
5044 ;;
5045 rhel)
5046 check_packages $base_packages tar gawk shadow-utils policycoreutils procps-ng systemd-libs systemd-devel
5047
5048 ;;
5049 esac
5050
5051 # Install git if not already present
5052 if ! command -v git >/dev/null 2>&1; then
5053 check_packages git
5054 fi
5055
5056 # Update CA certificates to ensure HTTPS connections work properly
5057 # This is especially important for Ubuntu 24.04 (Noble) and Debian Trixie
5058 # Only run for Debian-based systems (RHEL uses update-ca-trust instead)
5059 if [ "${ADJUSTED_ID}" = "debian" ] && command -v update-ca-certificates > /dev/null 2>&1; then
5060 update-ca-certificates
5061 fi
5062
5063 # Swap to legacy iptables for compatibility (Debian only)
5064 if [ "${ADJUSTED_ID}" = "debian" ] && type iptables-legacy > /dev/null 2>&1; then
5065 update-alternatives --set iptables /usr/sbin/iptables-legacy
5066 update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy
5067 fi
5068
5069 # Set up the necessary repositories
5070 if [ "${USE_MOBY}" = "true" ]; then
5071 # Name of open source engine/cli
5072 engine_package_name="moby-engine"
5073 cli_package_name="moby-cli"
5074
5075 case ${ADJUSTED_ID} in
5076 debian)
5077 # Import key safely and import Microsoft apt repo
5078 {
5079 curl -sSL ${MICROSOFT_GPG_KEYS_URI}
5080 curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI}
5081 } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg
5082 echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list
5083 ;;
5084 rhel)
5085 echo "(*) ${ID} detected - checking for Moby packages..."
5086
5087 # Check if moby packages are available in default repos
5088 if ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5089 echo "(*) Using built-in ${ID} Moby packages"
5090 else
5091 case "${ID}" in
5092 azurelinux)
5093 echo "(*) Moby packages not found in Azure Linux repositories"
5094 echo "(*) For Azure Linux, Docker CE ('moby': false) is recommended"
5095 err "Moby packages are not available for Azure Linux ${VERSION_ID}."
5096 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5097 exit 1
5098 ;;
5099 mariner)
5100 echo "(*) Adding Microsoft repository for CBL-Mariner..."
5101 # Add Microsoft repository if packages aren't available locally
5102 curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /etc/pki/rpm-gpg/microsoft.gpg
5103 cat > /etc/yum.repos.d/microsoft.repo << EOF
5104 [microsoft]
5105 name=Microsoft Repository
5106 baseurl=https://packages.microsoft.com/repos/microsoft-cbl-mariner-2.0-prod-base/
5107 enabled=1
5108 gpgcheck=1
5109 gpgkey=file:///etc/pki/rpm-gpg/microsoft.gpg
5110 EOF
5111 # Verify packages are available after adding repo
5112 pkg_mgr_update
5113 if ! ${PKG_MGR_CMD} list available moby-engine >/dev/null 2>&1; then
5114 echo "(*) Moby packages not found in Microsoft repository either"
5115 err "Moby packages are not available for CBL-Mariner ${VERSION_ID}."
5116 err "Recommendation: Use '\"moby\": false' to install Docker CE instead."
5117 exit 1
5118 fi
5119 ;;
5120 *)
5121 err "Moby packages are not available for ${ID}. Please use 'moby': false option."
5122 exit 1
5123 ;;
5124 esac
5125 fi
5126 ;;
5127 esac
5128 else
5129 # Name of licensed engine/cli
5130 engine_package_name="docker-ce"
5131 cli_package_name="docker-ce-cli"
5132 case ${ADJUSTED_ID} in
5133 debian)
5134 curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg
5135 echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list
5136 ;;
5137 rhel)
5138 # Docker CE repository setup for RHEL-based systems
5139 setup_docker_ce_repo() {
5140 curl -fsSL https://download.docker.com/linux/centos/gpg > /etc/pki/rpm-gpg/docker-ce.gpg
5141 cat > /etc/yum.repos.d/docker-ce.repo << EOF
5142 [docker-ce-stable]
5143 name=Docker CE Stable
5144 baseurl=https://download.docker.com/linux/centos/9/\$basearch/stable
5145 enabled=1
5146 gpgcheck=1
5147 gpgkey=file:///etc/pki/rpm-gpg/docker-ce.gpg
5148 skip_if_unavailable=1
5149 module_hotfixes=1
5150 EOF
5151 }
5152 install_azure_linux_deps() {
5153 echo "(*) Installing device-mapper libraries for Docker CE..."
5154 [ "${ID}" != "mariner" ] && ${PKG_MGR_CMD} -y install device-mapper-libs 2>/dev/null || echo "(*) Device-mapper install failed, proceeding"
5155 echo "(*) Installing additional Docker CE dependencies..."
5156 ${PKG_MGR_CMD} -y install libseccomp libtool-ltdl systemd-libs libcgroup tar xz || {
5157 echo "(*) Some optional dependencies could not be installed, continuing..."
5158 }
5159 }
5160 setup_selinux_context() {
5161 if command -v getenforce >/dev/null 2>&1 && [ "$(getenforce 2>/dev/null)" != "Disabled" ]; then
5162 echo "(*) Creating minimal SELinux context for Docker compatibility..."
5163 mkdir -p /etc/selinux/targeted/contexts/files/ 2>/dev/null || true
5164 echo "/var/lib/docker(/.*)? system_u:object_r:container_file_t:s0" >> /etc/selinux/targeted/contexts/files/file_contexts.local 2>/dev/null || true
5165 fi
5166 }
5167
5168 # Special handling for RHEL Docker CE installation
5169 case "${ID}" in
5170 azurelinux|mariner)
5171 echo "(*) ${ID} detected"
5172 echo "(*) Note: Moby packages work better on Azure Linux. Consider using 'moby': true"
5173 echo "(*) Setting up Docker CE repository..."
5174
5175 setup_docker_ce_repo
5176 install_azure_linux_deps
5177
5178 if [ "${USE_MOBY}" != "true" ]; then
5179 echo "(*) Docker CE installation for Azure Linux - skipping container-selinux"
5180 echo "(*) Note: SELinux policies will be minimal but Docker will function normally"
5181 setup_selinux_context
5182 else
5183 echo "(*) Using Moby - container-selinux not required"
5184 fi
5185 ;;
5186 *)
5187 # Standard RHEL/CentOS/Fedora approach
5188 if command -v dnf >/dev/null 2>&1; then
5189 dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5190 elif command -v yum-config-manager >/dev/null 2>&1; then
5191 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
5192 else
5193 # Manual fallback
5194 setup_docker_ce_repo
5195 fi
5196 ;;
5197 esac
5198 ;;
5199 esac
5200 fi
5201
5202 # Refresh package database
5203 case ${ADJUSTED_ID} in
5204 debian)
5205 apt-get update
5206 ;;
5207 rhel)
5208 pkg_mgr_update
5209 ;;
5210 esac
5211
5212 # Soft version matching
5213 if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then
5214 # Empty, meaning grab whatever "latest" is in apt repo
5215 engine_version_suffix=""
5216 cli_version_suffix=""
5217 else
5218 case ${ADJUSTED_ID} in
5219 debian)
5220 # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...)
5221 docker_version_dot_escaped="${DOCKER_VERSION//./\\.}"
5222 docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}"
5223 # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/
5224 docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5225 set +e # Don't exit if finding version fails - will handle gracefully
5226 cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5227 engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")"
5228 set -e
5229 if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then
5230 err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5231 apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5232 exit 1
5233 fi
5234 ;;
5235 rhel)
5236 # For RHEL-based systems, use dnf/yum to find versions
5237 docker_version_escaped="${DOCKER_VERSION//./\\.}"
5238 set +e # Don't exit if finding version fails - will handle gracefully
5239 if [ "${USE_MOBY}" = "true" ]; then
5240 available_versions=$(${PKG_MGR_CMD} list --available moby-engine 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5241 else
5242 available_versions=$(${PKG_MGR_CMD} list --available docker-ce 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${docker_version_escaped}" | head -1)
5243 fi
5244 set -e
5245 if [ -n "${available_versions}" ]; then
5246 engine_version_suffix="-${available_versions}"
5247 cli_version_suffix="-${available_versions}"
5248 else
5249 echo "(*) Exact version ${DOCKER_VERSION} not found, using latest available"
5250 engine_version_suffix=""
5251 cli_version_suffix=""
5252 fi
5253 ;;
5254 esac
5255 fi
5256
5257 # Version matching for moby-buildx
5258 if [ "${USE_MOBY}" = "true" ]; then
5259 if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then
5260 # Empty, meaning grab whatever "latest" is in apt repo
5261 buildx_version_suffix=""
5262 else
5263 case ${ADJUSTED_ID} in
5264 debian)
5265 buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5266 buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}"
5267 buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)"
5268 set +e
5269 buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")"
5270 set -e
5271 if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then
5272 err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:"
5273 apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+'
5274 exit 1
5275 fi
5276 ;;
5277 rhel)
5278 # For RHEL-based systems, try to find buildx version or use latest
5279 buildx_version_escaped="${MOBY_BUILDX_VERSION//./\\.}"
5280 set +e
5281 available_buildx=$(${PKG_MGR_CMD} list --available moby-buildx 2>/dev/null | grep -v "Available Packages" | awk '{print $2}' | grep -E "^${buildx_version_escaped}" | head -1)
5282 set -e
5283 if [ -n "${available_buildx}" ]; then
5284 buildx_version_suffix="-${available_buildx}"
5285 else
5286 echo "(*) Exact buildx version ${MOBY_BUILDX_VERSION} not found, using latest available"
5287 buildx_version_suffix=""
5288 fi
5289 ;;
5290 esac
5291 echo "buildx_version_suffix ${buildx_version_suffix}"
5292 fi
5293 fi
5294
5295 # Install Docker / Moby CLI if not already installed
5296 if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then
5297 echo "Docker / Moby CLI and Engine already installed."
5298 else
5299 case ${ADJUSTED_ID} in
5300 debian)
5301 if [ "${USE_MOBY}" = "true" ]; then
5302 # Install engine
5303 set +e # Handle error gracefully
5304 apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix}
5305 exit_code=$?
5306 set -e
5307
5308 if [ ${exit_code} -ne 0 ]; then
5309 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')."
5310 exit 1
5311 fi
5312
5313 # Install compose
5314 apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5315 else
5316 apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix}
5317 # Install compose
5318 apt-mark hold docker-ce docker-ce-cli
5319 apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5320 fi
5321 ;;
5322 rhel)
5323 if [ "${USE_MOBY}" = "true" ]; then
5324 set +e # Handle error gracefully
5325 ${PKG_MGR_CMD} -y install moby-cli${cli_version_suffix} moby-engine${engine_version_suffix}
5326 exit_code=$?
5327 set -e
5328
5329 if [ ${exit_code} -ne 0 ]; then
5330 err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version."
5331 exit 1
5332 fi
5333
5334 # Install compose
5335 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5336 ${PKG_MGR_CMD} -y install moby-compose || echo "(*) Package moby-compose not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5337 fi
5338 else
5339 # Special handling for Azure Linux Docker CE installation
5340 if [ "${ID}" = "azurelinux" ] || [ "${ID}" = "mariner" ]; then
5341 echo "(*) Installing Docker CE on Azure Linux (bypassing container-selinux dependency)..."
5342
5343 # Use rpm with --force and --nodeps for Azure Linux
5344 set +e # Don't exit on error for this section
5345 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5346 install_result=$?
5347 set -e
5348
5349 if [ $install_result -ne 0 ]; then
5350 echo "(*) Standard installation failed, trying manual installation..."
5351
5352 echo "(*) Standard installation failed, trying manual installation..."
5353
5354 # Create directory for downloading packages
5355 mkdir -p /tmp/docker-ce-install
5356
5357 # Download packages manually using curl since tdnf doesn't support download
5358 echo "(*) Downloading Docker CE packages manually..."
5359
5360 # Get the repository baseurl
5361 repo_baseurl="https://download.docker.com/linux/centos/9/x86_64/stable"
5362
5363 # Download packages directly
5364 cd /tmp/docker-ce-install
5365
5366 # Get package names with versions
5367 if [ -n "${cli_version_suffix}" ]; then
5368 docker_ce_version="${cli_version_suffix#-}"
5369 docker_cli_version="${engine_version_suffix#-}"
5370 else
5371 # Get latest version from repository
5372 docker_ce_version="latest"
5373 fi
5374
5375 echo "(*) Attempting to download Docker CE packages from repository..."
5376
5377 # Try to download latest packages if specific version fails
5378 if ! curl -fsSL "${repo_baseurl}/Packages/docker-ce-${docker_ce_version}.el9.x86_64.rpm" -o docker-ce.rpm 2>/dev/null; then
5379 # Fallback: try to get latest available version
5380 echo "(*) Specific version not found, trying latest..."
5381 latest_docker=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5382 latest_cli=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'docker-ce-cli-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5383 latest_containerd=$(curl -s "${repo_baseurl}/Packages/" | grep -o 'containerd\.io-[0-9][^"]*\.el9\.x86_64\.rpm' | head -1)
5384
5385 if [ -n "${latest_docker}" ]; then
5386 curl -fsSL "${repo_baseurl}/Packages/${latest_docker}" -o docker-ce.rpm
5387 curl -fsSL "${repo_baseurl}/Packages/${latest_cli}" -o docker-ce-cli.rpm
5388 curl -fsSL "${repo_baseurl}/Packages/${latest_containerd}" -o containerd.io.rpm
5389 else
5390 echo "(*) ERROR: Could not find Docker CE packages in repository"
5391 echo "(*) Please check repository configuration or use 'moby': true"
5392 exit 1
5393 fi
5394 fi
5395 # Install systemd libraries required by Docker CE
5396 echo "(*) Installing systemd libraries required by Docker CE..."
5397 ${PKG_MGR_CMD} -y install systemd-libs || ${PKG_MGR_CMD} -y install systemd-devel || {
5398 echo "(*) WARNING: Could not install systemd libraries"
5399 echo "(*) Docker may fail to start without these"
5400 }
5401
5402 # Install with rpm --force --nodeps
5403 echo "(*) Installing Docker CE packages with dependency override..."
5404 rpm -Uvh --force --nodeps *.rpm
5405
5406 # Cleanup
5407 cd /
5408 rm -rf /tmp/docker-ce-install
5409
5410 echo "(*) Docker CE installation completed with dependency bypass"
5411 echo "(*) Note: Some SELinux functionality may be limited without container-selinux"
5412 fi
5413 else
5414 # Standard installation for other RHEL-based systems
5415 ${PKG_MGR_CMD} -y install docker-ce${cli_version_suffix} docker-ce-cli${engine_version_suffix} containerd.io
5416 fi
5417 # Install compose
5418 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5419 ${PKG_MGR_CMD} -y install docker-compose-plugin || echo "(*) Package docker-compose-plugin not available for ${ID} ${VERSION_CODENAME} (${architecture}). Skipping."
5420 fi
5421 fi
5422 ;;
5423 esac
5424 fi
5425
5426 echo "Finished installing docker / moby!"
5427
5428 docker_home="/usr/libexec/docker"
5429 cli_plugins_dir="${docker_home}/cli-plugins"
5430
5431 # fallback for docker-compose
5432 fallback_compose(){
5433 local url=$1
5434 local repo_url=$(get_github_api_repo_url "$url")
5435 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5436 get_previous_version "${url}" "${repo_url}" compose_version
5437 echo -e "\nAttempting to install v${compose_version}"
5438 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path}
5439 }
5440
5441 # If 'docker-compose' command is to be included
5442 if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then
5443 case "${architecture}" in
5444 amd64|x86_64) target_compose_arch=x86_64 ;;
5445 arm64|aarch64) target_compose_arch=aarch64 ;;
5446 *)
5447 echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
5448 exit 1
5449 esac
5450
5451 docker_compose_path="/usr/local/bin/docker-compose"
5452 if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then
5453 err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk."
5454 INSTALL_DOCKER_COMPOSE_SWITCH="false"
5455
5456 if [ "${target_compose_arch}" = "x86_64" ]; then
5457 echo "(*) Installing docker compose v1..."
5458 curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path}
5459 chmod +x ${docker_compose_path}
5460
5461 # Download the SHA256 checksum
5462 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')"
5463 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5464 sha256sum -c docker-compose.sha256sum --ignore-missing
5465 elif [ "${VERSION_CODENAME}" = "bookworm" ]; then
5466 err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2"
5467 exit 1
5468 else
5469 # Use pip to get a version that runs on this architecture
5470 check_packages python3-minimal python3-pip libffi-dev python3-venv
5471 echo "(*) Installing docker compose v1 via pip..."
5472 export PYTHONUSERBASE=/usr/local
5473 pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation
5474 fi
5475 else
5476 compose_version=${DOCKER_DASH_COMPOSE_VERSION#v}
5477 docker_compose_url="https://github.com/docker/compose"
5478 find_version_from_git_tags compose_version "$docker_compose_url" "tags/v"
5479 echo "(*) Installing docker-compose ${compose_version}..."
5480 curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || {
5481 echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..."
5482 fallback_compose "$docker_compose_url"
5483 }
5484
5485 chmod +x ${docker_compose_path}
5486
5487 # Download the SHA256 checksum
5488 DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')"
5489 echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum
5490 sha256sum -c docker-compose.sha256sum --ignore-missing
5491
5492 mkdir -p ${cli_plugins_dir}
5493 cp ${docker_compose_path} ${cli_plugins_dir}
5494 fi
5495 fi
5496
5497 # fallback method for compose-switch
5498 fallback_compose-switch() {
5499 local url=$1
5500 local repo_url=$(get_github_api_repo_url "$url")
5501 echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..."
5502 get_previous_version "$url" "$repo_url" compose_switch_version
5503 echo -e "\nAttempting to install v${compose_switch_version}"
5504 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch
5505 }
5506 # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation
5507 if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then
5508 if type docker-compose > /dev/null 2>&1; then
5509 echo "(*) Installing compose-switch..."
5510 current_compose_path="$(command -v docker-compose)"
5511 target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1"
5512 compose_switch_version="latest"
5513 compose_switch_url="https://github.com/docker/compose-switch"
5514 # Try to get latest version, fallback to known stable version if GitHub API fails
5515 set +e
5516 find_version_from_git_tags compose_switch_version "$compose_switch_url"
5517 if [ $? -ne 0 ] || [ -z "${compose_switch_version}" ] || [ "${compose_switch_version}" = "latest" ]; then
5518 echo "(*) GitHub API rate limited or failed, using fallback method"
5519 fallback_compose-switch "$compose_switch_url"
5520 fi
5521 set -e
5522
5523 # Map architecture for compose-switch downloads
5524 case "${architecture}" in
5525 amd64|x86_64) target_switch_arch=amd64 ;;
5526 arm64|aarch64) target_switch_arch=arm64 ;;
5527 *) target_switch_arch=${architecture} ;;
5528 esac
5529 curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${target_switch_arch}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url"
5530 chmod +x /usr/local/bin/compose-switch
5531 # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11
5532 # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2)
5533 mv "${current_compose_path}" "${target_compose_path}"
5534 update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99
5535 update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1
5536 else
5537 err "Skipping installation of compose-switch as docker compose is unavailable..."
5538 fi
5539 fi
5540
5541 # If init file already exists, exit
5542 if [ -f "/usr/local/share/docker-init.sh" ]; then
5543 echo "/usr/local/share/docker-init.sh already exists, so exiting."
5544 # Clean up
5545 rm -rf /var/lib/apt/lists/*
5546 exit 0
5547 fi
5548 echo "docker-init doesn't exist, adding..."
5549
5550 if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then
5551 groupadd -r docker
5552 fi
5553
5554 usermod -aG docker ${USERNAME}
5555
5556 # fallback for docker/buildx
5557 fallback_buildx() {
5558 local url=$1
5559 local repo_url=$(get_github_api_repo_url "$url")
5560 echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..."
5561 get_previous_version "$url" "$repo_url" buildx_version
5562 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5563 echo -e "\nAttempting to install v${buildx_version}"
5564 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}
5565 }
5566
5567 if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then
5568 buildx_version="latest"
5569 docker_buildx_url="https://github.com/docker/buildx"
5570 find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v"
5571 echo "(*) Installing buildx ${buildx_version}..."
5572
5573 # Map architecture for buildx downloads
5574 case "${architecture}" in
5575 amd64|x86_64) target_buildx_arch=amd64 ;;
5576 arm64|aarch64) target_buildx_arch=arm64 ;;
5577 *) target_buildx_arch=${architecture} ;;
5578 esac
5579
5580 buildx_file_name="buildx-v${buildx_version}.linux-${target_buildx_arch}"
5581
5582 cd /tmp
5583 wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url"
5584
5585 docker_home="/usr/libexec/docker"
5586 cli_plugins_dir="${docker_home}/cli-plugins"
5587
5588 mkdir -p ${cli_plugins_dir}
5589 mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx
5590 chmod +x ${cli_plugins_dir}/docker-buildx
5591
5592 chown -R "${USERNAME}:docker" "${docker_home}"
5593 chmod -R g+r+w "${docker_home}"
5594 find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s
5595 fi
5596
5597 DOCKER_DEFAULT_IP6_TABLES=""
5598 if [ "$DISABLE_IP6_TABLES" == true ]; then
5599 requested_version=""
5600 # checking whether the version requested either is in semver format or just a number denoting the major version
5601 # and, extracting the major version number out of the two scenarios
5602 semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$"
5603 if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then
5604 requested_version=$(echo $DOCKER_VERSION | cut -d. -f1)
5605 elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then
5606 requested_version=$DOCKER_VERSION
5607 fi
5608 if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then
5609 DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false"
5610 echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'"
5611 fi
5612 fi
5613
5614 if [ ! -d /usr/local/share ]; then
5615 mkdir -p /usr/local/share
5616 fi
5617
5618 tee /usr/local/share/docker-init.sh > /dev/null \
5619 << EOF
5620 #!/bin/sh
5621 #-------------------------------------------------------------------------------------------------------------
5622 # Copyright (c) Microsoft Corporation. All rights reserved.
5623 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
5624 #-------------------------------------------------------------------------------------------------------------
5625
5626 set -e
5627
5628 AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION}
5629 DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL}
5630 DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES}
5631 EOF
5632
5633 tee -a /usr/local/share/docker-init.sh > /dev/null \
5634 << 'EOF'
5635 dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF'
5636 # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly
5637 find /run /var/run -iname 'docker*.pid' -delete || :
5638 find /run /var/run -iname 'container*.pid' -delete || :
5639
5640 # -- Start: dind wrapper script --
5641 # Maintained: https://github.com/moby/moby/blob/master/hack/dind
5642
5643 export container=docker
5644
5645 if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
5646 mount -t securityfs none /sys/kernel/security || {
5647 echo >&2 'Could not mount /sys/kernel/security.'
5648 echo >&2 'AppArmor detection and --privileged mode might break.'
5649 }
5650 fi
5651
5652 # Mount /tmp (conditionally)
5653 if ! mountpoint -q /tmp; then
5654 mount -t tmpfs none /tmp
5655 fi
5656
5657 set_cgroup_nesting()
5658 {
5659 # cgroup v2: enable nesting
5660 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
5661 # move the processes from the root group to the /init group,
5662 # otherwise writing subtree_control fails with EBUSY.
5663 # An error during moving non-existent process (i.e., "cat") is ignored.
5664 mkdir -p /sys/fs/cgroup/init
5665 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
5666 # enable controllers
5667 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
5668 > /sys/fs/cgroup/cgroup.subtree_control
5669 fi
5670 }
5671
5672 # Set cgroup nesting, retrying if necessary
5673 retry_cgroup_nesting=0
5674
5675 until [ "${retry_cgroup_nesting}" -eq "5" ];
5676 do
5677 set +e
5678 set_cgroup_nesting
5679
5680 if [ $? -ne 0 ]; then
5681 echo "(*) cgroup v2: Failed to enable nesting, retrying..."
5682 else
5683 break
5684 fi
5685
5686 retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1`
5687 set -e
5688 done
5689
5690 # -- End: dind wrapper script --
5691
5692 # Handle DNS
5693 set +e
5694 cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' > /dev/null 2>&1
5695 if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ]
5696 then
5697 echo "Setting dockerd Azure DNS."
5698 CUSTOMDNS="--dns 168.63.129.16"
5699 else
5700 echo "Not setting dockerd DNS manually."
5701 CUSTOMDNS=""
5702 fi
5703 set -e
5704
5705 if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ]
5706 then
5707 DEFAULT_ADDRESS_POOL=""
5708 else
5709 DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL"
5710 fi
5711
5712 # Start docker/moby engine
5713 ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) &
5714 INNEREOF
5715 )"
5716
5717 sudo_if() {
5718 COMMAND="$*"
5719
5720 if [ "$(id -u)" -ne 0 ]; then
5721 sudo $COMMAND
5722 else
5723 $COMMAND
5724 fi
5725 }
5726
5727 retry_docker_start_count=0
5728 docker_ok="false"
5729
5730 until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ];
5731 do
5732 # Start using sudo if not invoked as root
5733 if [ "$(id -u)" -ne 0 ]; then
5734 sudo /bin/sh -c "${dockerd_start}"
5735 else
5736 eval "${dockerd_start}"
5737 fi
5738
5739 retry_count=0
5740 until [ "${docker_ok}" = "true" ] || [ "${retry_count}" -eq "5" ];
5741 do
5742 sleep 1s
5743 set +e
5744 docker info > /dev/null 2>&1 && docker_ok="true"
5745 set -e
5746
5747 retry_count=`expr $retry_count + 1`
5748 done
5749
5750 if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then
5751 echo "(*) Failed to start docker, retrying..."
5752 set +e
5753 sudo_if pkill dockerd
5754 sudo_if pkill containerd
5755 set -e
5756 fi
5757
5758 retry_docker_start_count=`expr $retry_docker_start_count + 1`
5759 done
5760
5761 # Execute whatever commands were passed in (if any). This allows us
5762 # to set this script to ENTRYPOINT while still executing the default CMD.
5763 exec "$@"
5764 EOF
5765
5766 chmod +x /usr/local/share/docker-init.sh
5767 chown ${USERNAME}:root /usr/local/share/docker-init.sh
5768
5769 # Clean up
5770 rm -rf /var/lib/apt/lists/*
5771
5772 echo 'docker-in-docker-debian script has completed!'"#),
5773 ]).await;
5774
5775 return Ok(http::Response::builder()
5776 .status(200)
5777 .body(AsyncBody::from(response))
5778 .unwrap());
5779 }
5780 if parts.uri.path() == "/v2/devcontainers/features/go/manifests/1" {
5781 let response = r#"
5782 {
5783 "schemaVersion": 2,
5784 "mediaType": "application/vnd.oci.image.manifest.v1+json",
5785 "config": {
5786 "mediaType": "application/vnd.devcontainers",
5787 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
5788 "size": 2
5789 },
5790 "layers": [
5791 {
5792 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
5793 "digest": "sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1",
5794 "size": 20992,
5795 "annotations": {
5796 "org.opencontainers.image.title": "devcontainer-feature-go.tgz"
5797 }
5798 }
5799 ],
5800 "annotations": {
5801 "dev.containers.metadata": "{\"id\":\"go\",\"version\":\"1.3.3\",\"name\":\"Go\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/go\",\"description\":\"Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\",\"none\",\"1.24\",\"1.23\"],\"default\":\"latest\",\"description\":\"Select or enter a Go version to install\"},\"golangciLintVersion\":{\"type\":\"string\",\"default\":\"latest\",\"description\":\"Version of golangci-lint to install\"}},\"init\":true,\"customizations\":{\"vscode\":{\"extensions\":[\"golang.Go\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development.\"}]}}},\"containerEnv\":{\"GOROOT\":\"/usr/local/go\",\"GOPATH\":\"/go\",\"PATH\":\"/usr/local/go/bin:/go/bin:${PATH}\"},\"capAdd\":[\"SYS_PTRACE\"],\"securityOpt\":[\"seccomp=unconfined\"],\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
5802 "com.github.package.type": "devcontainer_feature"
5803 }
5804 }
5805 "#;
5806
5807 return Ok(http::Response::builder()
5808 .status(200)
5809 .body(http_client::AsyncBody::from(response))
5810 .unwrap());
5811 }
5812 if parts.uri.path()
5813 == "/v2/devcontainers/features/go/blobs/sha256:eadd8a4757ee8ea6c1bc0aae22da49b7e5f2f1e32a87a5eac3cadeb7d2ccdad1"
5814 {
5815 let response = build_tarball(vec![
5816 ("./devcontainer-feature.json", r#"
5817 {
5818 "id": "go",
5819 "version": "1.3.3",
5820 "name": "Go",
5821 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go",
5822 "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.",
5823 "options": {
5824 "version": {
5825 "type": "string",
5826 "proposals": [
5827 "latest",
5828 "none",
5829 "1.24",
5830 "1.23"
5831 ],
5832 "default": "latest",
5833 "description": "Select or enter a Go version to install"
5834 },
5835 "golangciLintVersion": {
5836 "type": "string",
5837 "default": "latest",
5838 "description": "Version of golangci-lint to install"
5839 }
5840 },
5841 "init": true,
5842 "customizations": {
5843 "vscode": {
5844 "extensions": [
5845 "golang.Go"
5846 ],
5847 "settings": {
5848 "github.copilot.chat.codeGeneration.instructions": [
5849 {
5850 "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development."
5851 }
5852 ]
5853 }
5854 }
5855 },
5856 "containerEnv": {
5857 "GOROOT": "/usr/local/go",
5858 "GOPATH": "/go",
5859 "PATH": "/usr/local/go/bin:/go/bin:${PATH}"
5860 },
5861 "capAdd": [
5862 "SYS_PTRACE"
5863 ],
5864 "securityOpt": [
5865 "seccomp=unconfined"
5866 ],
5867 "installsAfter": [
5868 "ghcr.io/devcontainers/features/common-utils"
5869 ]
5870 }
5871 "#),
5872 ("./install.sh", r#"
5873 #!/usr/bin/env bash
5874 #-------------------------------------------------------------------------------------------------------------
5875 # Copyright (c) Microsoft Corporation. All rights reserved.
5876 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information
5877 #-------------------------------------------------------------------------------------------------------------
5878 #
5879 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/go.md
5880 # Maintainer: The VS Code and Codespaces Teams
5881
5882 TARGET_GO_VERSION="${VERSION:-"latest"}"
5883 GOLANGCILINT_VERSION="${GOLANGCILINTVERSION:-"latest"}"
5884
5885 TARGET_GOROOT="${TARGET_GOROOT:-"/usr/local/go"}"
5886 TARGET_GOPATH="${TARGET_GOPATH:-"/go"}"
5887 USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}"
5888 INSTALL_GO_TOOLS="${INSTALL_GO_TOOLS:-"true"}"
5889
5890 # https://www.google.com/linuxrepositories/
5891 GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub"
5892
5893 set -e
5894
5895 if [ "$(id -u)" -ne 0 ]; then
5896 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
5897 exit 1
5898 fi
5899
5900 # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME
5901 . /etc/os-release
5902 # Get an adjusted ID independent of distro variants
5903 MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1)
5904 if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then
5905 ADJUSTED_ID="debian"
5906 elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then
5907 ADJUSTED_ID="rhel"
5908 if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then
5909 VERSION_CODENAME="rhel${MAJOR_VERSION_ID}"
5910 else
5911 VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}"
5912 fi
5913 else
5914 echo "Linux distro ${ID} not supported."
5915 exit 1
5916 fi
5917
5918 if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then
5919 # As of 1 July 2024, mirrorlist.centos.org no longer exists.
5920 # Update the repo files to reference vault.centos.org.
5921 sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
5922 sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
5923 sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
5924 fi
5925
5926 # Setup INSTALL_CMD & PKG_MGR_CMD
5927 if type apt-get > /dev/null 2>&1; then
5928 PKG_MGR_CMD=apt-get
5929 INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends"
5930 elif type microdnf > /dev/null 2>&1; then
5931 PKG_MGR_CMD=microdnf
5932 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5933 elif type dnf > /dev/null 2>&1; then
5934 PKG_MGR_CMD=dnf
5935 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0"
5936 else
5937 PKG_MGR_CMD=yum
5938 INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0"
5939 fi
5940
5941 # Clean up
5942 clean_up() {
5943 case ${ADJUSTED_ID} in
5944 debian)
5945 rm -rf /var/lib/apt/lists/*
5946 ;;
5947 rhel)
5948 rm -rf /var/cache/dnf/* /var/cache/yum/*
5949 rm -rf /tmp/yum.log
5950 rm -rf ${GPG_INSTALL_PATH}
5951 ;;
5952 esac
5953 }
5954 clean_up
5955
5956
5957 # Figure out correct version of a three part version number is not passed
5958 find_version_from_git_tags() {
5959 local variable_name=$1
5960 local requested_version=${!variable_name}
5961 if [ "${requested_version}" = "none" ]; then return; fi
5962 local repository=$2
5963 local prefix=${3:-"tags/v"}
5964 local separator=${4:-"."}
5965 local last_part_optional=${5:-"false"}
5966 if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then
5967 local escaped_separator=${separator//./\\.}
5968 local last_part
5969 if [ "${last_part_optional}" = "true" ]; then
5970 last_part="(${escaped_separator}[0-9]+)?"
5971 else
5972 last_part="${escaped_separator}[0-9]+"
5973 fi
5974 local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$"
5975 local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)"
5976 if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then
5977 declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)"
5978 else
5979 set +e
5980 declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")"
5981 set -e
5982 fi
5983 fi
5984 if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then
5985 echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2
5986 exit 1
5987 fi
5988 echo "${variable_name}=${!variable_name}"
5989 }
5990
5991 pkg_mgr_update() {
5992 case $ADJUSTED_ID in
5993 debian)
5994 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
5995 echo "Running apt-get update..."
5996 ${PKG_MGR_CMD} update -y
5997 fi
5998 ;;
5999 rhel)
6000 if [ ${PKG_MGR_CMD} = "microdnf" ]; then
6001 if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then
6002 echo "Running ${PKG_MGR_CMD} makecache ..."
6003 ${PKG_MGR_CMD} makecache
6004 fi
6005 else
6006 if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then
6007 echo "Running ${PKG_MGR_CMD} check-update ..."
6008 set +e
6009 ${PKG_MGR_CMD} check-update
6010 rc=$?
6011 if [ $rc != 0 ] && [ $rc != 100 ]; then
6012 exit 1
6013 fi
6014 set -e
6015 fi
6016 fi
6017 ;;
6018 esac
6019 }
6020
6021 # Checks if packages are installed and installs them if not
6022 check_packages() {
6023 case ${ADJUSTED_ID} in
6024 debian)
6025 if ! dpkg -s "$@" > /dev/null 2>&1; then
6026 pkg_mgr_update
6027 ${INSTALL_CMD} "$@"
6028 fi
6029 ;;
6030 rhel)
6031 if ! rpm -q "$@" > /dev/null 2>&1; then
6032 pkg_mgr_update
6033 ${INSTALL_CMD} "$@"
6034 fi
6035 ;;
6036 esac
6037 }
6038
6039 # Ensure that login shells get the correct path if the user updated the PATH using ENV.
6040 rm -f /etc/profile.d/00-restore-env.sh
6041 echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh
6042 chmod +x /etc/profile.d/00-restore-env.sh
6043
6044 # Some distributions do not install awk by default (e.g. Mariner)
6045 if ! type awk >/dev/null 2>&1; then
6046 check_packages awk
6047 fi
6048
6049 # Determine the appropriate non-root user
6050 if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then
6051 USERNAME=""
6052 POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)")
6053 for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do
6054 if id -u ${CURRENT_USER} > /dev/null 2>&1; then
6055 USERNAME=${CURRENT_USER}
6056 break
6057 fi
6058 done
6059 if [ "${USERNAME}" = "" ]; then
6060 USERNAME=root
6061 fi
6062 elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then
6063 USERNAME=root
6064 fi
6065
6066 export DEBIAN_FRONTEND=noninteractive
6067
6068 check_packages ca-certificates gnupg2 tar gcc make pkg-config
6069
6070 if [ $ADJUSTED_ID = "debian" ]; then
6071 check_packages g++ libc6-dev
6072 else
6073 check_packages gcc-c++ glibc-devel
6074 fi
6075 # Install curl, git, other dependencies if missing
6076 if ! type curl > /dev/null 2>&1; then
6077 check_packages curl
6078 fi
6079 if ! type git > /dev/null 2>&1; then
6080 check_packages git
6081 fi
6082 # Some systems, e.g. Mariner, still a few more packages
6083 if ! type as > /dev/null 2>&1; then
6084 check_packages binutils
6085 fi
6086 if ! [ -f /usr/include/linux/errno.h ]; then
6087 check_packages kernel-headers
6088 fi
6089 # Minimal RHEL install may need findutils installed
6090 if ! [ -f /usr/bin/find ]; then
6091 check_packages findutils
6092 fi
6093
6094 # Get closest match for version number specified
6095 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6096
6097 architecture="$(uname -m)"
6098 case $architecture in
6099 x86_64) architecture="amd64";;
6100 aarch64 | armv8*) architecture="arm64";;
6101 aarch32 | armv7* | armvhf*) architecture="armv6l";;
6102 i?86) architecture="386";;
6103 *) echo "(!) Architecture $architecture unsupported"; exit 1 ;;
6104 esac
6105
6106 # Install Go
6107 umask 0002
6108 if ! cat /etc/group | grep -e "^golang:" > /dev/null 2>&1; then
6109 groupadd -r golang
6110 fi
6111 usermod -a -G golang "${USERNAME}"
6112 mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6113
6114 if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then
6115 # Use a temporary location for gpg keys to avoid polluting image
6116 export GNUPGHOME="/tmp/tmp-gnupg"
6117 mkdir -p ${GNUPGHOME}
6118 chmod 700 ${GNUPGHOME}
6119 curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}"
6120 gpg -q --import /tmp/tmp-gnupg/golang_key
6121 echo "Downloading Go ${TARGET_GO_VERSION}..."
6122 set +e
6123 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6124 exit_code=$?
6125 set -e
6126 if [ "$exit_code" != "0" ]; then
6127 echo "(!) Download failed."
6128 # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios.
6129 set +e
6130 major="$(echo "${TARGET_GO_VERSION}" | grep -oE '^[0-9]+' || echo '')"
6131 minor="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')"
6132 breakfix="$(echo "${TARGET_GO_VERSION}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')"
6133 # Handle Go's odd version pattern where "0" releases omit the last part
6134 if [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then
6135 ((minor=minor-1))
6136 TARGET_GO_VERSION="${major}.${minor}"
6137 # Look for latest version from previous minor release
6138 find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true"
6139 else
6140 ((breakfix=breakfix-1))
6141 if [ "${breakfix}" = "0" ]; then
6142 TARGET_GO_VERSION="${major}.${minor}"
6143 else
6144 TARGET_GO_VERSION="${major}.${minor}.${breakfix}"
6145 fi
6146 fi
6147 set -e
6148 echo "Trying ${TARGET_GO_VERSION}..."
6149 curl -fsSL -o /tmp/go.tar.gz "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz"
6150 fi
6151 curl -fsSL -o /tmp/go.tar.gz.asc "https://golang.org/dl/go${TARGET_GO_VERSION}.linux-${architecture}.tar.gz.asc"
6152 gpg --verify /tmp/go.tar.gz.asc /tmp/go.tar.gz
6153 echo "Extracting Go ${TARGET_GO_VERSION}..."
6154 tar -xzf /tmp/go.tar.gz -C "${TARGET_GOROOT}" --strip-components=1
6155 rm -rf /tmp/go.tar.gz /tmp/go.tar.gz.asc /tmp/tmp-gnupg
6156 else
6157 echo "(!) Go is already installed with version ${TARGET_GO_VERSION}. Skipping."
6158 fi
6159
6160 # Install Go tools that are isImportant && !replacedByGopls based on
6161 # https://github.com/golang/vscode-go/blob/v0.38.0/src/goToolsInformation.ts
6162 GO_TOOLS="\
6163 golang.org/x/tools/gopls@latest \
6164 honnef.co/go/tools/cmd/staticcheck@latest \
6165 golang.org/x/lint/golint@latest \
6166 github.com/mgechev/revive@latest \
6167 github.com/go-delve/delve/cmd/dlv@latest \
6168 github.com/fatih/gomodifytags@latest \
6169 github.com/haya14busa/goplay/cmd/goplay@latest \
6170 github.com/cweill/gotests/gotests@latest \
6171 github.com/josharian/impl@latest"
6172
6173 if [ "${INSTALL_GO_TOOLS}" = "true" ]; then
6174 echo "Installing common Go tools..."
6175 export PATH=${TARGET_GOROOT}/bin:${PATH}
6176 export GOPATH=/tmp/gotools
6177 export GOCACHE="${GOPATH}/cache"
6178
6179 mkdir -p "${GOPATH}" /usr/local/etc/vscode-dev-containers "${TARGET_GOPATH}/bin"
6180 cd "${GOPATH}"
6181
6182 # Use go get for versions of go under 1.16
6183 go_install_command=install
6184 if [[ "1.16" > "$(go version | grep -oP 'go\K[0-9]+\.[0-9]+(\.[0-9]+)?')" ]]; then
6185 export GO111MODULE=on
6186 go_install_command=get
6187 echo "Go version < 1.16, using go get."
6188 fi
6189
6190 (echo "${GO_TOOLS}" | xargs -n 1 go ${go_install_command} -v )2>&1 | tee -a /usr/local/etc/vscode-dev-containers/go.log
6191
6192 # Move Go tools into path
6193 if [ -d "${GOPATH}/bin" ]; then
6194 mv "${GOPATH}/bin"/* "${TARGET_GOPATH}/bin/"
6195 fi
6196
6197 # Install golangci-lint from precompiled binaries
6198 if [ "$GOLANGCILINT_VERSION" = "latest" ] || [ "$GOLANGCILINT_VERSION" = "" ]; then
6199 echo "Installing golangci-lint latest..."
6200 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6201 sh -s -- -b "${TARGET_GOPATH}/bin"
6202 else
6203 echo "Installing golangci-lint ${GOLANGCILINT_VERSION}..."
6204 curl -fsSL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | \
6205 sh -s -- -b "${TARGET_GOPATH}/bin" "v${GOLANGCILINT_VERSION}"
6206 fi
6207
6208 # Remove Go tools temp directory
6209 rm -rf "${GOPATH}"
6210 fi
6211
6212
6213 chown -R "${USERNAME}:golang" "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6214 chmod -R g+r+w "${TARGET_GOROOT}" "${TARGET_GOPATH}"
6215 find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s
6216 find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s
6217
6218 # Clean up
6219 clean_up
6220
6221 echo "Done!"
6222 "#),
6223 ])
6224 .await;
6225 return Ok(http::Response::builder()
6226 .status(200)
6227 .body(AsyncBody::from(response))
6228 .unwrap());
6229 }
6230 if parts.uri.path() == "/v2/devcontainers/features/aws-cli/manifests/1" {
6231 let response = r#"
6232 {
6233 "schemaVersion": 2,
6234 "mediaType": "application/vnd.oci.image.manifest.v1+json",
6235 "config": {
6236 "mediaType": "application/vnd.devcontainers",
6237 "digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
6238 "size": 2
6239 },
6240 "layers": [
6241 {
6242 "mediaType": "application/vnd.devcontainers.layer.v1+tar",
6243 "digest": "sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13",
6244 "size": 19968,
6245 "annotations": {
6246 "org.opencontainers.image.title": "devcontainer-feature-aws-cli.tgz"
6247 }
6248 }
6249 ],
6250 "annotations": {
6251 "dev.containers.metadata": "{\"id\":\"aws-cli\",\"version\":\"1.1.3\",\"name\":\"AWS CLI\",\"documentationURL\":\"https://github.com/devcontainers/features/tree/main/src/aws-cli\",\"description\":\"Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.\",\"options\":{\"version\":{\"type\":\"string\",\"proposals\":[\"latest\"],\"default\":\"latest\",\"description\":\"Select or enter an AWS CLI version.\"},\"verbose\":{\"type\":\"boolean\",\"default\":true,\"description\":\"Suppress verbose output.\"}},\"customizations\":{\"vscode\":{\"extensions\":[\"AmazonWebServices.aws-toolkit-vscode\"],\"settings\":{\"github.copilot.chat.codeGeneration.instructions\":[{\"text\":\"This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development.\"}]}}},\"installsAfter\":[\"ghcr.io/devcontainers/features/common-utils\"]}",
6252 "com.github.package.type": "devcontainer_feature"
6253 }
6254 }"#;
6255 return Ok(http::Response::builder()
6256 .status(200)
6257 .body(AsyncBody::from(response))
6258 .unwrap());
6259 }
6260 if parts.uri.path()
6261 == "/v2/devcontainers/features/aws-cli/blobs/sha256:4e9b04b394fb63e297b3d5f58185406ea45bddb639c2ba83b5a8394643cd5b13"
6262 {
6263 let response = build_tarball(vec![
6264 (
6265 "./devcontainer-feature.json",
6266 r#"
6267{
6268 "id": "aws-cli",
6269 "version": "1.1.3",
6270 "name": "AWS CLI",
6271 "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli",
6272 "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.",
6273 "options": {
6274 "version": {
6275 "type": "string",
6276 "proposals": [
6277 "latest"
6278 ],
6279 "default": "latest",
6280 "description": "Select or enter an AWS CLI version."
6281 },
6282 "verbose": {
6283 "type": "boolean",
6284 "default": true,
6285 "description": "Suppress verbose output."
6286 }
6287 },
6288 "customizations": {
6289 "vscode": {
6290 "extensions": [
6291 "AmazonWebServices.aws-toolkit-vscode"
6292 ],
6293 "settings": {
6294 "github.copilot.chat.codeGeneration.instructions": [
6295 {
6296 "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development."
6297 }
6298 ]
6299 }
6300 }
6301 },
6302 "installsAfter": [
6303 "ghcr.io/devcontainers/features/common-utils"
6304 ]
6305}
6306 "#,
6307 ),
6308 (
6309 "./install.sh",
6310 r#"#!/usr/bin/env bash
6311 #-------------------------------------------------------------------------------------------------------------
6312 # Copyright (c) Microsoft Corporation. All rights reserved.
6313 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6314 #-------------------------------------------------------------------------------------------------------------
6315 #
6316 # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/awscli.md
6317 # Maintainer: The VS Code and Codespaces Teams
6318
6319 set -e
6320
6321 # Clean up
6322 rm -rf /var/lib/apt/lists/*
6323
6324 VERSION=${VERSION:-"latest"}
6325 VERBOSE=${VERBOSE:-"true"}
6326
6327 AWSCLI_GPG_KEY=FB5DB77FD5C118B80511ADA8A6310ACC4672475C
6328 AWSCLI_GPG_KEY_MATERIAL="-----BEGIN PGP PUBLIC KEY BLOCK-----
6329
6330 mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG
6331 ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx
6332 PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G
6333 TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz
6334 gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk
6335 C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG
6336 94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO
6337 lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG
6338 fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG
6339 EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX
6340 XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB
6341 tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7
6342 Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE
6343 FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM
6344 yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ
6345 MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox
6346 au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do
6347 ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B
6348 hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO
6349 tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H
6350 QYmVr3aIUse20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF
6351 RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB
6352 rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d
6353 H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe
6354 YLZATHZKTJyiqA==
6355 =vYOk
6356 -----END PGP PUBLIC KEY BLOCK-----"
6357
6358 if [ "$(id -u)" -ne 0 ]; then
6359 echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.'
6360 exit 1
6361 fi
6362
6363 apt_get_update()
6364 {
6365 if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then
6366 echo "Running apt-get update..."
6367 apt-get update -y
6368 fi
6369 }
6370
6371 # Checks if packages are installed and installs them if not
6372 check_packages() {
6373 if ! dpkg -s "$@" > /dev/null 2>&1; then
6374 apt_get_update
6375 apt-get -y install --no-install-recommends "$@"
6376 fi
6377 }
6378
6379 export DEBIAN_FRONTEND=noninteractive
6380
6381 check_packages curl ca-certificates gpg dirmngr unzip bash-completion less
6382
6383 verify_aws_cli_gpg_signature() {
6384 local filePath=$1
6385 local sigFilePath=$2
6386 local awsGpgKeyring=aws-cli-public-key.gpg
6387
6388 echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}"
6389 gpg --batch --quiet --no-default-keyring --keyring "./${awsGpgKeyring}" --verify "${sigFilePath}" "${filePath}"
6390 local status=$?
6391
6392 rm "./${awsGpgKeyring}"
6393
6394 return ${status}
6395 }
6396
6397 install() {
6398 local scriptZipFile=awscli.zip
6399 local scriptSigFile=awscli.sig
6400
6401 # See Linux install docs at https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
6402 if [ "${VERSION}" != "latest" ]; then
6403 local versionStr=-${VERSION}
6404 fi
6405 architecture=$(dpkg --print-architecture)
6406 case "${architecture}" in
6407 amd64) architectureStr=x86_64 ;;
6408 arm64) architectureStr=aarch64 ;;
6409 *)
6410 echo "AWS CLI does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine."
6411 exit 1
6412 esac
6413 local scriptUrl=https://awscli.amazonaws.com/awscli-exe-linux-${architectureStr}${versionStr}.zip
6414 curl "${scriptUrl}" -o "${scriptZipFile}"
6415 curl "${scriptUrl}.sig" -o "${scriptSigFile}"
6416
6417 verify_aws_cli_gpg_signature "$scriptZipFile" "$scriptSigFile"
6418 if (( $? > 0 )); then
6419 echo "Could not verify GPG signature of AWS CLI install script. Make sure you provided a valid version."
6420 exit 1
6421 fi
6422
6423 if [ "${VERBOSE}" = "false" ]; then
6424 unzip -q "${scriptZipFile}"
6425 else
6426 unzip "${scriptZipFile}"
6427 fi
6428
6429 ./aws/install
6430
6431 # kubectl bash completion
6432 mkdir -p /etc/bash_completion.d
6433 cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws
6434
6435 # kubectl zsh completion
6436 if [ -e "${USERHOME}/.oh-my-zsh" ]; then
6437 mkdir -p "${USERHOME}/.oh-my-zsh/completions"
6438 cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws"
6439 chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh"
6440 fi
6441
6442 rm -rf ./aws
6443 }
6444
6445 echo "(*) Installing AWS CLI..."
6446
6447 install
6448
6449 # Clean up
6450 rm -rf /var/lib/apt/lists/*
6451
6452 echo "Done!""#,
6453 ),
6454 ("./scripts/", r#""#),
6455 (
6456 "./scripts/fetch-latest-completer-scripts.sh",
6457 r#"
6458 #!/bin/bash
6459 #-------------------------------------------------------------------------------------------------------------
6460 # Copyright (c) Microsoft Corporation. All rights reserved.
6461 # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information.
6462 #-------------------------------------------------------------------------------------------------------------
6463 #
6464 # Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli
6465 # Maintainer: The Dev Container spec maintainers
6466 #
6467 # Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version
6468 #
6469 COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}")
6470 BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer"
6471 ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh"
6472
6473 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT"
6474 chmod +x "$BASH_COMPLETER_SCRIPT"
6475
6476 wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT"
6477 chmod +x "$ZSH_COMPLETER_SCRIPT"
6478 "#,
6479 ),
6480 ("./scripts/vendor/", r#""#),
6481 (
6482 "./scripts/vendor/aws_bash_completer",
6483 r#"
6484 # Typically that would be added under one of the following paths:
6485 # - /etc/bash_completion.d
6486 # - /usr/local/etc/bash_completion.d
6487 # - /usr/share/bash-completion/completions
6488
6489 complete -C aws_completer aws
6490 "#,
6491 ),
6492 (
6493 "./scripts/vendor/aws_zsh_completer.sh",
6494 r#"
6495 # Source this file to activate auto completion for zsh using the bash
6496 # compatibility helper. Make sure to run `compinit` before, which should be
6497 # given usually.
6498 #
6499 # % source /path/to/zsh_complete.sh
6500 #
6501 # Typically that would be called somewhere in your .zshrc.
6502 #
6503 # Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT
6504 # That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6505 #
6506 # https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570
6507 #
6508 # zsh releases prior to that version do not export the required env variables!
6509
6510 autoload -Uz bashcompinit
6511 bashcompinit -i
6512
6513 _bash_complete() {
6514 local ret=1
6515 local -a suf matches
6516 local -x COMP_POINT COMP_CWORD
6517 local -a COMP_WORDS COMPREPLY BASH_VERSINFO
6518 local -x COMP_LINE="$words"
6519 local -A savejobstates savejobtexts
6520
6521 (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX ))
6522 (( COMP_CWORD = CURRENT - 1))
6523 COMP_WORDS=( $words )
6524 BASH_VERSINFO=( 2 05b 0 1 release )
6525
6526 savejobstates=( ${(kv)jobstates} )
6527 savejobtexts=( ${(kv)jobtexts} )
6528
6529 [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' )
6530
6531 matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} )
6532
6533 if [[ -n $matches ]]; then
6534 if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then
6535 compset -P '*/' && matches=( ${matches##*/} )
6536 compset -S '/*' && matches=( ${matches%%/*} )
6537 compadd -Q -f "${suf[@]}" -a matches && ret=0
6538 else
6539 compadd -Q "${suf[@]}" -a matches && ret=0
6540 fi
6541 fi
6542
6543 if (( ret )); then
6544 if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then
6545 _default "${suf[@]}" && ret=0
6546 elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then
6547 _directories "${suf[@]}" && ret=0
6548 fi
6549 fi
6550
6551 return ret
6552 }
6553
6554 complete -C aws_completer aws
6555 "#,
6556 ),
6557 ]).await;
6558
6559 return Ok(http::Response::builder()
6560 .status(200)
6561 .body(AsyncBody::from(response))
6562 .unwrap());
6563 }
6564
6565 Ok(http::Response::builder()
6566 .status(404)
6567 .body(http_client::AsyncBody::default())
6568 .unwrap())
6569 })
6570 }
6571}