bootc_lib/
deploy.rs

1//! # Write deployments merging image with configmap
2//!
3//! Create a merged filesystem tree with the image and mounted configmaps.
4
5use std::collections::HashSet;
6use std::io::{BufRead, Write};
7use std::process::Command;
8
9use anyhow::{Context, Result, anyhow};
10use bootc_kernel_cmdline::utf8::CmdlineOwned;
11use cap_std::fs::{Dir, MetadataExt};
12use cap_std_ext::cap_std;
13use cap_std_ext::dirext::CapStdExtDirExt;
14use fn_error_context::context;
15use ostree::{gio, glib};
16use ostree_container::OstreeImageReference;
17use ostree_ext::container as ostree_container;
18use ostree_ext::container::store::{ImageImporter, ImportProgress, PrepareResult, PreparedImport};
19use ostree_ext::oci_spec::image::{Descriptor, Digest};
20use ostree_ext::ostree::Deployment;
21use ostree_ext::ostree::{self, Sysroot};
22use ostree_ext::sysroot::SysrootLock;
23use ostree_ext::tokio_util::spawn_blocking_cancellable_flatten;
24
25use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep};
26use crate::spec::ImageReference;
27use crate::spec::{BootOrder, HostSpec};
28use crate::status::labels_of_config;
29use crate::store::Storage;
30use crate::utils::async_task_with_spinner;
31
32// TODO use https://github.com/ostreedev/ostree-rs-ext/pull/493/commits/afc1837ff383681b947de30c0cefc70080a4f87a
33const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage/bootc";
34
35/// Set on an ostree commit if this is a derived commit
36const BOOTC_DERIVED_KEY: &str = "bootc.derived";
37
38/// Variant of HostSpec but required to be filled out
39pub(crate) struct RequiredHostSpec<'a> {
40    pub(crate) image: &'a ImageReference,
41}
42
43/// State of a locally fetched image
44pub(crate) struct ImageState {
45    pub(crate) manifest_digest: Digest,
46    pub(crate) version: Option<String>,
47    pub(crate) ostree_commit: String,
48}
49
50impl<'a> RequiredHostSpec<'a> {
51    /// Given a (borrowed) host specification, "unwrap" its internal
52    /// options, giving a spec that is required to have a base container image.
53    pub(crate) fn from_spec(spec: &'a HostSpec) -> Result<Self> {
54        let image = spec
55            .image
56            .as_ref()
57            .ok_or_else(|| anyhow::anyhow!("Missing image in specification"))?;
58        Ok(Self { image })
59    }
60}
61
62impl From<ostree_container::store::LayeredImageState> for ImageState {
63    fn from(value: ostree_container::store::LayeredImageState) -> Self {
64        let version = value.version().map(|v| v.to_owned());
65        let ostree_commit = value.get_commit().to_owned();
66        Self {
67            manifest_digest: value.manifest_digest,
68            version,
69            ostree_commit,
70        }
71    }
72}
73
74impl ImageState {
75    /// Fetch the manifest corresponding to this image.  May not be available in all backends.
76    pub(crate) fn get_manifest(
77        &self,
78        repo: &ostree::Repo,
79    ) -> Result<Option<ostree_ext::oci_spec::image::ImageManifest>> {
80        ostree_container::store::query_image_commit(repo, &self.ostree_commit)
81            .map(|v| Some(v.manifest))
82    }
83}
84
85/// Wrapper for pulling a container image, wiring up status output.
86pub(crate) async fn new_importer(
87    repo: &ostree::Repo,
88    imgref: &ostree_container::OstreeImageReference,
89) -> Result<ostree_container::store::ImageImporter> {
90    let config = Default::default();
91    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
92    imp.require_bootable();
93    Ok(imp)
94}
95
96/// Wrapper for pulling a container image with a custom proxy config (e.g. for unified storage).
97pub(crate) async fn new_importer_with_config(
98    repo: &ostree::Repo,
99    imgref: &ostree_container::OstreeImageReference,
100    config: ostree_ext::containers_image_proxy::ImageProxyConfig,
101) -> Result<ostree_container::store::ImageImporter> {
102    let mut imp = ostree_container::store::ImageImporter::new(repo, imgref, config).await?;
103    imp.require_bootable();
104    Ok(imp)
105}
106
107pub(crate) fn check_bootc_label(config: &ostree_ext::oci_spec::image::ImageConfiguration) {
108    if let Some(label) =
109        labels_of_config(config).and_then(|labels| labels.get(crate::metadata::BOOTC_COMPAT_LABEL))
110    {
111        match label.as_str() {
112            crate::metadata::COMPAT_LABEL_V1 => {}
113            o => crate::journal::journal_print(
114                libsystemd::logging::Priority::Warning,
115                &format!(
116                    "notice: Unknown {} value {}",
117                    crate::metadata::BOOTC_COMPAT_LABEL,
118                    o
119                ),
120            ),
121        }
122    } else {
123        crate::journal::journal_print(
124            libsystemd::logging::Priority::Warning,
125            &format!(
126                "notice: Image is missing label: {}",
127                crate::metadata::BOOTC_COMPAT_LABEL
128            ),
129        )
130    }
131}
132
133fn descriptor_of_progress(p: &ImportProgress) -> &Descriptor {
134    match p {
135        ImportProgress::OstreeChunkStarted(l) => l,
136        ImportProgress::OstreeChunkCompleted(l) => l,
137        ImportProgress::DerivedLayerStarted(l) => l,
138        ImportProgress::DerivedLayerCompleted(l) => l,
139    }
140}
141
142fn prefix_of_progress(p: &ImportProgress) -> &'static str {
143    match p {
144        ImportProgress::OstreeChunkStarted(_) | ImportProgress::OstreeChunkCompleted(_) => {
145            "ostree chunk"
146        }
147        ImportProgress::DerivedLayerStarted(_) | ImportProgress::DerivedLayerCompleted(_) => {
148            "layer"
149        }
150    }
151}
152
153/// Configuration for layer progress printing
154struct LayerProgressConfig {
155    layers: tokio::sync::mpsc::Receiver<ostree_container::store::ImportProgress>,
156    layer_bytes: tokio::sync::watch::Receiver<Option<ostree_container::store::LayerProgress>>,
157    digest: Box<str>,
158    n_layers_to_fetch: usize,
159    layers_total: usize,
160    bytes_to_download: u64,
161    bytes_total: u64,
162    prog: ProgressWriter,
163    quiet: bool,
164}
165
166/// Write container fetch progress to standard output.
167async fn handle_layer_progress_print(mut config: LayerProgressConfig) -> ProgressWriter {
168    let start = std::time::Instant::now();
169    let mut total_read = 0u64;
170    let bar = indicatif::MultiProgress::new();
171    if config.quiet {
172        bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
173    }
174    let layers_bar = bar.add(indicatif::ProgressBar::new(
175        config.n_layers_to_fetch.try_into().unwrap(),
176    ));
177    let byte_bar = bar.add(indicatif::ProgressBar::new(0));
178    // let byte_bar = indicatif::ProgressBar::new(0);
179    // byte_bar.set_draw_target(indicatif::ProgressDrawTarget::hidden());
180    layers_bar.set_style(
181        indicatif::ProgressStyle::default_bar()
182            .template("{prefix} {bar} {pos}/{len} {wide_msg}")
183            .unwrap(),
184    );
185    let taskname = "Fetching layers";
186    layers_bar.set_prefix(taskname);
187    layers_bar.set_message("");
188    byte_bar.set_prefix("Fetching");
189    byte_bar.set_style(
190        indicatif::ProgressStyle::default_bar()
191                .template(
192                    " └ {prefix} {bar} {binary_bytes}/{binary_total_bytes} ({binary_bytes_per_sec}) {wide_msg}",
193                )
194                .unwrap()
195        );
196
197    let mut subtasks = vec![];
198    let mut subtask: SubTaskBytes = Default::default();
199    loop {
200        tokio::select! {
201            // Always handle layer changes first.
202            biased;
203            layer = config.layers.recv() => {
204                if let Some(l) = layer {
205                    let layer = descriptor_of_progress(&l);
206                    let layer_type = prefix_of_progress(&l);
207                    let short_digest = &layer.digest().digest()[0..21];
208                    let layer_size = layer.size();
209                    if l.is_starting() {
210                        // Reset the progress bar
211                        byte_bar.reset_elapsed();
212                        byte_bar.reset_eta();
213                        byte_bar.set_length(layer_size);
214                        byte_bar.set_message(format!("{layer_type} {short_digest}"));
215
216                        subtask = SubTaskBytes {
217                            subtask: layer_type.into(),
218                            description: format!("{layer_type}: {short_digest}").clone().into(),
219                            id: short_digest.to_string().clone().into(),
220                            bytes_cached: 0,
221                            bytes: 0,
222                            bytes_total: layer_size,
223                        };
224                    } else {
225                        byte_bar.set_position(layer_size);
226                        layers_bar.inc(1);
227                        total_read = total_read.saturating_add(layer_size);
228                        // Emit an event where bytes == total to signal completion.
229                        subtask.bytes = layer_size;
230                        subtasks.push(subtask.clone());
231                        config.prog.send(Event::ProgressBytes {
232                            task: "pulling".into(),
233                            description: format!("Pulling Image: {}", config.digest).into(),
234                            id: (*config.digest).into(),
235                            bytes_cached: config.bytes_total - config.bytes_to_download,
236                            bytes: total_read,
237                            bytes_total: config.bytes_to_download,
238                            steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
239                            steps: layers_bar.position(),
240                            steps_total: config.n_layers_to_fetch as u64,
241                            subtasks: subtasks.clone(),
242                        }).await;
243                    }
244                } else {
245                    // If the receiver is disconnected, then we're done
246                    break
247                };
248            },
249            r = config.layer_bytes.changed() => {
250                if r.is_err() {
251                    // If the receiver is disconnected, then we're done
252                    break
253                }
254                let bytes = {
255                    let bytes = config.layer_bytes.borrow_and_update();
256                    bytes.as_ref().cloned()
257                };
258                if let Some(bytes) = bytes {
259                    byte_bar.set_position(bytes.fetched);
260                    subtask.bytes = byte_bar.position();
261                    config.prog.send_lossy(Event::ProgressBytes {
262                        task: "pulling".into(),
263                        description: format!("Pulling Image: {}", config.digest).into(),
264                        id: (*config.digest).into(),
265                        bytes_cached: config.bytes_total - config.bytes_to_download,
266                        bytes: total_read + byte_bar.position(),
267                        bytes_total: config.bytes_to_download,
268                        steps_cached: (config.layers_total - config.n_layers_to_fetch) as u64,
269                        steps: layers_bar.position(),
270                        steps_total: config.n_layers_to_fetch as u64,
271                        subtasks: subtasks.clone().into_iter().chain([subtask.clone()]).collect(),
272                    }).await;
273                }
274            }
275        }
276    }
277    byte_bar.finish_and_clear();
278    layers_bar.finish_and_clear();
279    if let Err(e) = bar.clear() {
280        tracing::warn!("clearing bar: {e}");
281    }
282    let end = std::time::Instant::now();
283    let elapsed = end.duration_since(start);
284    let persec = total_read as f64 / elapsed.as_secs_f64();
285    let persec = indicatif::HumanBytes(persec as u64);
286    if let Err(e) = bar.println(&format!(
287        "Fetched layers: {} in {} ({}/s)",
288        indicatif::HumanBytes(total_read),
289        indicatif::HumanDuration(elapsed),
290        persec,
291    )) {
292        tracing::warn!("writing to stdout: {e}");
293    }
294
295    // Since the progress notifier closed, we know import has started
296    // use as a heuristic to begin import progress
297    // Cannot be lossy or it is dropped
298    config
299        .prog
300        .send(Event::ProgressSteps {
301            task: "importing".into(),
302            description: "Importing Image".into(),
303            id: (*config.digest).into(),
304            steps_cached: 0,
305            steps: 0,
306            steps_total: 1,
307            subtasks: [SubTaskStep {
308                subtask: "importing".into(),
309                description: "Importing Image".into(),
310                id: "importing".into(),
311                completed: false,
312            }]
313            .into(),
314        })
315        .await;
316
317    // Return the writer
318    config.prog
319}
320
321/// Gather all bound images in all deployments, then prune the image store,
322/// using the gathered images as the roots (that will not be GC'd).
323pub(crate) async fn prune_container_store(sysroot: &Storage) -> Result<()> {
324    let ostree = sysroot.get_ostree()?;
325    let deployments = ostree.deployments();
326    let mut all_bound_images = Vec::new();
327    for deployment in deployments {
328        let bound = crate::boundimage::query_bound_images_for_deployment(ostree, &deployment)?;
329        all_bound_images.extend(bound.into_iter());
330        // Also include the host image itself
331        // Note: Use just the image name (not the full transport:image format) because
332        // podman's image names don't include the transport prefix.
333        if let Some(host_image) = crate::status::boot_entry_from_deployment(ostree, &deployment)?
334            .image
335            .map(|i| i.image)
336        {
337            all_bound_images.push(crate::boundimage::BoundImage {
338                image: host_image.image.clone(),
339                auth_file: None,
340            });
341        }
342    }
343    // Convert to a hashset of just the image names
344    let image_names = HashSet::from_iter(all_bound_images.iter().map(|img| img.image.as_str()));
345    let pruned = sysroot
346        .get_ensure_imgstore()?
347        .prune_except_roots(&image_names)
348        .await?;
349    tracing::debug!("Pruned images: {}", pruned.len());
350    Ok(())
351}
352
353pub(crate) struct PreparedImportMeta {
354    pub imp: ImageImporter,
355    pub prep: Box<PreparedImport>,
356    pub digest: Digest,
357    pub n_layers_to_fetch: usize,
358    pub layers_total: usize,
359    pub bytes_to_fetch: u64,
360    pub bytes_total: u64,
361}
362
363pub(crate) enum PreparedPullResult {
364    Ready(Box<PreparedImportMeta>),
365    AlreadyPresent(Box<ImageState>),
366}
367
368pub(crate) async fn prepare_for_pull(
369    repo: &ostree::Repo,
370    imgref: &ImageReference,
371    target_imgref: Option<&OstreeImageReference>,
372) -> Result<PreparedPullResult> {
373    let imgref_canonicalized = imgref.clone().canonicalize()?;
374    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
375    let ostree_imgref = &OstreeImageReference::from(imgref_canonicalized);
376    let mut imp = new_importer(repo, ostree_imgref).await?;
377    if let Some(target) = target_imgref {
378        imp.set_target(target);
379    }
380    let prep = match imp.prepare().await? {
381        PrepareResult::AlreadyPresent(c) => {
382            println!("No changes in {imgref:#} => {}", c.manifest_digest);
383            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
384        }
385        PrepareResult::Ready(p) => p,
386    };
387    check_bootc_label(&prep.config);
388    if let Some(warning) = prep.deprecated_warning() {
389        ostree_ext::cli::print_deprecated_warning(warning).await;
390    }
391    ostree_ext::cli::print_layer_status(&prep);
392    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
393
394    let prepared_image = PreparedImportMeta {
395        imp,
396        n_layers_to_fetch: layers_to_fetch.len(),
397        layers_total: prep.all_layers().count(),
398        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
399        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
400        digest: prep.manifest_digest.clone(),
401        prep,
402    };
403
404    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
405}
406
407/// Check whether the image exists in bootc's unified container storage.
408///
409/// This is used for auto-detection: if the image already exists in bootc storage
410/// (e.g., from a previous `bootc image set-unified` or LBI pull), we can use
411/// the unified storage path for faster imports.
412///
413/// Returns true if the image exists in bootc storage.
414pub(crate) async fn image_exists_in_unified_storage(
415    store: &Storage,
416    imgref: &ImageReference,
417) -> Result<bool> {
418    let imgstore = store.get_ensure_imgstore()?;
419    let image_ref_str = imgref.to_transport_image()?;
420    imgstore.exists(&image_ref_str).await
421}
422
423/// Unified approach: Use bootc's CStorage to pull the image, then prepare from containers-storage.
424/// This reuses the same infrastructure as LBIs.
425pub(crate) async fn prepare_for_pull_unified(
426    repo: &ostree::Repo,
427    imgref: &ImageReference,
428    target_imgref: Option<&OstreeImageReference>,
429    store: &Storage,
430) -> Result<PreparedPullResult> {
431    // Get or initialize the bootc container storage (same as used for LBIs)
432    let imgstore = store.get_ensure_imgstore()?;
433
434    let image_ref_str = imgref.to_transport_image()?;
435
436    // Always pull to ensure we have the latest image, whether from a remote
437    // registry or a locally rebuilt image
438    tracing::info!(
439        "Unified pull: pulling from transport '{}' to bootc storage",
440        &imgref.transport
441    );
442
443    // Pull the image to bootc storage using the same method as LBIs
444    // Show a spinner since podman pull can take a while and doesn't output progress
445    let pull_msg = format!("Pulling {} to bootc storage", &image_ref_str);
446    async_task_with_spinner(&pull_msg, async move {
447        imgstore
448            .pull(&image_ref_str, crate::podstorage::PullMode::Always)
449            .await
450    })
451    .await?;
452
453    // Now create a containers-storage reference to read from bootc storage
454    tracing::info!("Unified pull: now importing from containers-storage transport");
455    let containers_storage_imgref = ImageReference {
456        transport: "containers-storage".to_string(),
457        image: imgref.image.clone(),
458        signature: imgref.signature.clone(),
459    };
460    let ostree_imgref = OstreeImageReference::from(containers_storage_imgref);
461
462    // Configure the importer to use bootc storage as an additional image store
463    let mut config = ostree_ext::containers_image_proxy::ImageProxyConfig::default();
464    let mut cmd = Command::new("skopeo");
465    // Use the physical path to bootc storage from the Storage struct
466    let storage_path = format!(
467        "{}/{}",
468        store.physical_root_path,
469        crate::podstorage::CStorage::subpath()
470    );
471    crate::podstorage::set_additional_image_store(&mut cmd, &storage_path);
472    config.skopeo_cmd = Some(cmd);
473
474    // Use the preparation flow with the custom config
475    let mut imp = new_importer_with_config(repo, &ostree_imgref, config).await?;
476    if let Some(target) = target_imgref {
477        imp.set_target(target);
478    }
479    let prep = match imp.prepare().await? {
480        PrepareResult::AlreadyPresent(c) => {
481            println!("No changes in {imgref:#} => {}", c.manifest_digest);
482            return Ok(PreparedPullResult::AlreadyPresent(Box::new((*c).into())));
483        }
484        PrepareResult::Ready(p) => p,
485    };
486    check_bootc_label(&prep.config);
487    if let Some(warning) = prep.deprecated_warning() {
488        ostree_ext::cli::print_deprecated_warning(warning).await;
489    }
490    ostree_ext::cli::print_layer_status(&prep);
491    let layers_to_fetch = prep.layers_to_fetch().collect::<Result<Vec<_>>>()?;
492
493    // Log that we're importing a new image from containers-storage
494    const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
495    tracing::info!(
496        message_id = PULLING_NEW_IMAGE_ID,
497        bootc.image.reference = &imgref.image,
498        bootc.image.transport = "containers-storage",
499        bootc.original_transport = &imgref.transport,
500        bootc.status = "importing_from_storage",
501        "Importing image from bootc storage: {}",
502        ostree_imgref
503    );
504
505    let prepared_image = PreparedImportMeta {
506        imp,
507        n_layers_to_fetch: layers_to_fetch.len(),
508        layers_total: prep.all_layers().count(),
509        bytes_to_fetch: layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(),
510        bytes_total: prep.all_layers().map(|l| l.layer.size()).sum(),
511        digest: prep.manifest_digest.clone(),
512        prep,
513    };
514
515    Ok(PreparedPullResult::Ready(Box::new(prepared_image)))
516}
517
518/// Unified pull: Use podman to pull to containers-storage, then read from there
519pub(crate) async fn pull_unified(
520    repo: &ostree::Repo,
521    imgref: &ImageReference,
522    target_imgref: Option<&OstreeImageReference>,
523    quiet: bool,
524    prog: ProgressWriter,
525    store: &Storage,
526) -> Result<Box<ImageState>> {
527    match prepare_for_pull_unified(repo, imgref, target_imgref, store).await? {
528        PreparedPullResult::AlreadyPresent(existing) => {
529            // Log that the image was already present (Debug level since it's not actionable)
530            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
531            tracing::debug!(
532                message_id = IMAGE_ALREADY_PRESENT_ID,
533                bootc.image.reference = &imgref.image,
534                bootc.image.transport = &imgref.transport,
535                bootc.status = "already_present",
536                "Image already present: {}",
537                imgref
538            );
539            Ok(existing)
540        }
541        PreparedPullResult::Ready(prepared_image_meta) => {
542            // To avoid duplicate success logs, pass a containers-storage imgref to the importer
543            let cs_imgref = ImageReference {
544                transport: "containers-storage".to_string(),
545                image: imgref.image.clone(),
546                signature: imgref.signature.clone(),
547            };
548            pull_from_prepared(&cs_imgref, quiet, prog, *prepared_image_meta).await
549        }
550    }
551}
552
553#[context("Pulling")]
554pub(crate) async fn pull_from_prepared(
555    imgref: &ImageReference,
556    quiet: bool,
557    prog: ProgressWriter,
558    mut prepared_image: PreparedImportMeta,
559) -> Result<Box<ImageState>> {
560    let layer_progress = prepared_image.imp.request_progress();
561    let layer_byte_progress = prepared_image.imp.request_layer_progress();
562    let digest = prepared_image.digest.clone();
563    let digest_imp = prepared_image.digest.clone();
564
565    let printer = tokio::task::spawn(async move {
566        handle_layer_progress_print(LayerProgressConfig {
567            layers: layer_progress,
568            layer_bytes: layer_byte_progress,
569            digest: digest.as_ref().into(),
570            n_layers_to_fetch: prepared_image.n_layers_to_fetch,
571            layers_total: prepared_image.layers_total,
572            bytes_to_download: prepared_image.bytes_to_fetch,
573            bytes_total: prepared_image.bytes_total,
574            prog,
575            quiet,
576        })
577        .await
578    });
579    let import = prepared_image.imp.import(prepared_image.prep).await;
580    let prog = printer.await?;
581    // Both the progress and the import are done, so import is done as well
582    prog.send(Event::ProgressSteps {
583        task: "importing".into(),
584        description: "Importing Image".into(),
585        id: digest_imp.clone().as_ref().into(),
586        steps_cached: 0,
587        steps: 1,
588        steps_total: 1,
589        subtasks: [SubTaskStep {
590            subtask: "importing".into(),
591            description: "Importing Image".into(),
592            id: "importing".into(),
593            completed: true,
594        }]
595        .into(),
596    })
597    .await;
598    let import = import?;
599    let imgref_canonicalized = imgref.clone().canonicalize()?;
600    tracing::debug!("Canonicalized image reference: {imgref_canonicalized:#}");
601
602    // Log successful import completion (skip if using unified storage to avoid double logging)
603    let is_unified_path = imgref.transport == "containers-storage";
604    if !is_unified_path {
605        const IMPORT_COMPLETE_JOURNAL_ID: &str = "4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8";
606
607        tracing::info!(
608            message_id = IMPORT_COMPLETE_JOURNAL_ID,
609            bootc.image.reference = &imgref.image,
610            bootc.image.transport = &imgref.transport,
611            bootc.manifest_digest = import.manifest_digest.as_ref(),
612            bootc.ostree_commit = &import.merge_commit,
613            "Successfully imported image: {}",
614            imgref
615        );
616    }
617
618    if let Some(msg) =
619        ostree_container::store::image_filtered_content_warning(&import.filtered_files)
620            .context("Image content warning")?
621    {
622        tracing::info!("{}", msg);
623    }
624    Ok(Box::new((*import).into()))
625}
626
627/// Wrapper for pulling a container image, wiring up status output.
628pub(crate) async fn pull(
629    repo: &ostree::Repo,
630    imgref: &ImageReference,
631    target_imgref: Option<&OstreeImageReference>,
632    quiet: bool,
633    prog: ProgressWriter,
634) -> Result<Box<ImageState>> {
635    match prepare_for_pull(repo, imgref, target_imgref).await? {
636        PreparedPullResult::AlreadyPresent(existing) => {
637            // Log that the image was already present (Debug level since it's not actionable)
638            const IMAGE_ALREADY_PRESENT_ID: &str = "5c4d3e2f1a0b9c8d7e6f5a4b3c2d1e0f9";
639            tracing::debug!(
640                message_id = IMAGE_ALREADY_PRESENT_ID,
641                bootc.image.reference = &imgref.image,
642                bootc.image.transport = &imgref.transport,
643                bootc.status = "already_present",
644                "Image already present: {}",
645                imgref
646            );
647            Ok(existing)
648        }
649        PreparedPullResult::Ready(prepared_image_meta) => {
650            // Log that we're pulling a new image
651            const PULLING_NEW_IMAGE_ID: &str = "6d5e4f3a2b1c0d9e8f7a6b5c4d3e2f1a0";
652            tracing::info!(
653                message_id = PULLING_NEW_IMAGE_ID,
654                bootc.image.reference = &imgref.image,
655                bootc.image.transport = &imgref.transport,
656                bootc.status = "pulling_new",
657                "Pulling new image: {}",
658                imgref
659            );
660            Ok(pull_from_prepared(imgref, quiet, prog, *prepared_image_meta).await?)
661        }
662    }
663}
664
665pub(crate) async fn wipe_ostree(sysroot: Sysroot) -> Result<()> {
666    tokio::task::spawn_blocking(move || {
667        sysroot
668            .write_deployments(&[], gio::Cancellable::NONE)
669            .context("removing deployments")
670    })
671    .await??;
672
673    Ok(())
674}
675
676pub(crate) async fn cleanup(sysroot: &Storage) -> Result<()> {
677    // Log the cleanup operation to systemd journal
678    const CLEANUP_JOURNAL_ID: &str = "2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7c6";
679
680    tracing::info!(
681        message_id = CLEANUP_JOURNAL_ID,
682        "Starting cleanup of old images and deployments"
683    );
684
685    let bound_prune = prune_container_store(sysroot);
686
687    // We create clones (just atomic reference bumps) here to move to the thread.
688    let ostree = sysroot.get_ostree_cloned()?;
689    let repo = ostree.repo();
690    let repo_prune =
691        ostree_ext::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| {
692            let locked_sysroot = &SysrootLock::from_assumed_locked(&ostree);
693            let cancellable = Some(cancellable);
694            let repo = &repo;
695            let txn = repo.auto_transaction(cancellable)?;
696            let repo = txn.repo();
697
698            // Regenerate our base references.  First, we delete the ones that exist
699            for ref_entry in repo
700                .list_refs_ext(
701                    Some(BASE_IMAGE_PREFIX),
702                    ostree::RepoListRefsExtFlags::NONE,
703                    cancellable,
704                )
705                .context("Listing refs")?
706                .keys()
707            {
708                repo.transaction_set_refspec(ref_entry, None);
709            }
710
711            // Then, for each deployment which is derived (e.g. has configmaps) we synthesize
712            // a base ref to ensure that it's not GC'd.
713            for (i, deployment) in ostree.deployments().into_iter().enumerate() {
714                let commit = deployment.csum();
715                if let Some(base) = get_base_commit(repo, &commit)? {
716                    repo.transaction_set_refspec(&format!("{BASE_IMAGE_PREFIX}/{i}"), Some(&base));
717                }
718            }
719
720            let pruned =
721                ostree_container::deploy::prune(locked_sysroot).context("Pruning images")?;
722            if !pruned.is_empty() {
723                let size = glib::format_size(pruned.objsize);
724                println!(
725                    "Pruned images: {} (layers: {}, objsize: {})",
726                    pruned.n_images, pruned.n_layers, size
727                );
728            } else {
729                tracing::debug!("Nothing to prune");
730            }
731
732            Ok(())
733        });
734
735    // We run these in parallel mostly because we can.
736    tokio::try_join!(repo_prune, bound_prune)?;
737    Ok(())
738}
739
740/// If commit is a bootc-derived commit (e.g. has configmaps), return its base.
741#[context("Finding base commit")]
742pub(crate) fn get_base_commit(repo: &ostree::Repo, commit: &str) -> Result<Option<String>> {
743    let commitv = repo.load_commit(commit)?.0;
744    let commitmeta = commitv.child_value(0);
745    let commitmeta = &glib::VariantDict::new(Some(&commitmeta));
746    let r = commitmeta.lookup::<String>(BOOTC_DERIVED_KEY)?;
747    Ok(r)
748}
749
750#[context("Writing deployment")]
751async fn deploy(
752    sysroot: &Storage,
753    from: MergeState,
754    image: &ImageState,
755    origin: &glib::KeyFile,
756    lock_finalization: bool,
757) -> Result<Deployment> {
758    // Compute the kernel argument overrides. In practice today this API is always expecting
759    // a merge deployment. The kargs code also always looks at the booted root (which
760    // is a distinct minor issue, but not super important as right now the install path
761    // doesn't use this API).
762    let (stateroot, override_kargs) = match &from {
763        MergeState::MergeDeployment(deployment) => {
764            let kargs = crate::bootc_kargs::get_kargs(sysroot, &deployment, image)?;
765            (deployment.stateroot().into(), Some(kargs))
766        }
767        MergeState::Reset { stateroot, kargs } => (stateroot.clone(), Some(kargs.clone())),
768    };
769    // Clone all the things to move to worker thread
770    let ostree = sysroot.get_ostree_cloned()?;
771    // ostree::Deployment is incorrectly !Send 😢 so convert it to an integer
772    let merge_deployment = from.as_merge_deployment();
773    let merge_deployment = merge_deployment.map(|d| d.index() as usize);
774    let ostree_commit = image.ostree_commit.to_string();
775    // GKeyFile also isn't Send! So we serialize that as a string...
776    let origin_data = origin.to_data();
777    let r = async_task_with_spinner(
778        "Deploying",
779        spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> {
780            let ostree = ostree;
781            let stateroot = Some(stateroot);
782            let mut opts = ostree::SysrootDeployTreeOpts::default();
783
784            // Set finalization lock if requested
785            opts.locked = lock_finalization;
786
787            // Because the C API expects a Vec<&str>, convert the Cmdline to string slices.
788            // The references borrow from the Cmdline, which outlives this usage.
789            let override_kargs_refs = override_kargs
790                .as_ref()
791                .map(|kargs| kargs.iter_str().collect::<Vec<_>>());
792            if let Some(kargs) = override_kargs_refs.as_ref() {
793                opts.override_kernel_argv = Some(kargs);
794            }
795
796            let deployments = ostree.deployments();
797            let merge_deployment = merge_deployment.map(|m| &deployments[m]);
798            let origin = glib::KeyFile::new();
799            origin.load_from_data(&origin_data, glib::KeyFileFlags::NONE)?;
800            let d = ostree.stage_tree_with_options(
801                stateroot.as_deref(),
802                &ostree_commit,
803                Some(&origin),
804                merge_deployment,
805                &opts,
806                Some(cancellable),
807            )?;
808            Ok(d.index())
809        }),
810    )
811    .await?;
812    // SAFETY: We must have a staged deployment
813    let ostree = sysroot.get_ostree()?;
814    let staged = ostree.staged_deployment().unwrap();
815    assert_eq!(staged.index(), r);
816    Ok(staged)
817}
818
819#[context("Generating origin")]
820fn origin_from_imageref(imgref: &ImageReference) -> Result<glib::KeyFile> {
821    let origin = glib::KeyFile::new();
822    let imgref = OstreeImageReference::from(imgref.clone());
823    origin.set_string(
824        "origin",
825        ostree_container::deploy::ORIGIN_CONTAINER,
826        imgref.to_string().as_str(),
827    );
828    Ok(origin)
829}
830
831/// The source of data for staging a new deployment
832#[derive(Debug)]
833pub(crate) enum MergeState {
834    /// Use the provided merge deployment
835    MergeDeployment(Deployment),
836    /// Don't use a merge deployment, but only this
837    /// provided initial state.
838    Reset {
839        stateroot: String,
840        kargs: CmdlineOwned,
841    },
842}
843impl MergeState {
844    /// Initialize using the default merge deployment for the given stateroot.
845    pub(crate) fn from_stateroot(sysroot: &Storage, stateroot: &str) -> Result<Self> {
846        let ostree = sysroot.get_ostree()?;
847        let merge_deployment = ostree.merge_deployment(Some(stateroot)).ok_or_else(|| {
848            anyhow::anyhow!("No merge deployment found for stateroot {stateroot}")
849        })?;
850        Ok(Self::MergeDeployment(merge_deployment))
851    }
852
853    /// Cast this to a merge deployment case.
854    pub(crate) fn as_merge_deployment(&self) -> Option<&Deployment> {
855        match self {
856            Self::MergeDeployment(d) => Some(d),
857            Self::Reset { .. } => None,
858        }
859    }
860}
861
862/// Stage (queue deployment of) a fetched container image.
863#[context("Staging")]
864pub(crate) async fn stage(
865    sysroot: &Storage,
866    from: MergeState,
867    image: &ImageState,
868    spec: &RequiredHostSpec<'_>,
869    prog: ProgressWriter,
870    lock_finalization: bool,
871) -> Result<()> {
872    // Log the staging operation to systemd journal with comprehensive upgrade information
873    const STAGE_JOURNAL_ID: &str = "8f7a2b1c3d4e5f6a7b8c9d0e1f2a3b4c";
874
875    tracing::info!(
876        message_id = STAGE_JOURNAL_ID,
877        bootc.image.reference = &spec.image.image,
878        bootc.image.transport = &spec.image.transport,
879        bootc.manifest_digest = image.manifest_digest.as_ref(),
880        "Staging image for deployment: {} (digest: {})",
881        spec.image,
882        image.manifest_digest
883    );
884
885    let mut subtask = SubTaskStep {
886        subtask: "merging".into(),
887        description: "Merging Image".into(),
888        id: "fetching".into(),
889        completed: false,
890    };
891    let mut subtasks = vec![];
892    prog.send(Event::ProgressSteps {
893        task: "staging".into(),
894        description: "Deploying Image".into(),
895        id: image.manifest_digest.clone().as_ref().into(),
896        steps_cached: 0,
897        steps: 0,
898        steps_total: 3,
899        subtasks: subtasks
900            .clone()
901            .into_iter()
902            .chain([subtask.clone()])
903            .collect(),
904    })
905    .await;
906
907    subtask.completed = true;
908    subtasks.push(subtask.clone());
909    subtask.subtask = "deploying".into();
910    subtask.id = "deploying".into();
911    subtask.description = "Deploying Image".into();
912    subtask.completed = false;
913    prog.send(Event::ProgressSteps {
914        task: "staging".into(),
915        description: "Deploying Image".into(),
916        id: image.manifest_digest.clone().as_ref().into(),
917        steps_cached: 0,
918        steps: 1,
919        steps_total: 3,
920        subtasks: subtasks
921            .clone()
922            .into_iter()
923            .chain([subtask.clone()])
924            .collect(),
925    })
926    .await;
927    let origin = origin_from_imageref(spec.image)?;
928    let deployment =
929        crate::deploy::deploy(sysroot, from, image, &origin, lock_finalization).await?;
930
931    subtask.completed = true;
932    subtasks.push(subtask.clone());
933    subtask.subtask = "bound_images".into();
934    subtask.id = "bound_images".into();
935    subtask.description = "Pulling Bound Images".into();
936    subtask.completed = false;
937    prog.send(Event::ProgressSteps {
938        task: "staging".into(),
939        description: "Deploying Image".into(),
940        id: image.manifest_digest.clone().as_ref().into(),
941        steps_cached: 0,
942        steps: 1,
943        steps_total: 3,
944        subtasks: subtasks
945            .clone()
946            .into_iter()
947            .chain([subtask.clone()])
948            .collect(),
949    })
950    .await;
951    crate::boundimage::pull_bound_images(sysroot, &deployment).await?;
952
953    subtask.completed = true;
954    subtasks.push(subtask.clone());
955    subtask.subtask = "cleanup".into();
956    subtask.id = "cleanup".into();
957    subtask.description = "Removing old images".into();
958    subtask.completed = false;
959    prog.send(Event::ProgressSteps {
960        task: "staging".into(),
961        description: "Deploying Image".into(),
962        id: image.manifest_digest.clone().as_ref().into(),
963        steps_cached: 0,
964        steps: 2,
965        steps_total: 3,
966        subtasks: subtasks
967            .clone()
968            .into_iter()
969            .chain([subtask.clone()])
970            .collect(),
971    })
972    .await;
973    crate::deploy::cleanup(sysroot).await?;
974    println!("Queued for next boot: {:#}", spec.image);
975    if let Some(version) = image.version.as_deref() {
976        println!("  Version: {version}");
977    }
978    println!("  Digest: {}", image.manifest_digest);
979
980    subtask.completed = true;
981    subtasks.push(subtask.clone());
982    prog.send(Event::ProgressSteps {
983        task: "staging".into(),
984        description: "Deploying Image".into(),
985        id: image.manifest_digest.clone().as_ref().into(),
986        steps_cached: 0,
987        steps: 3,
988        steps_total: 3,
989        subtasks: subtasks
990            .clone()
991            .into_iter()
992            .chain([subtask.clone()])
993            .collect(),
994    })
995    .await;
996
997    // Unconditionally create or update /run/reboot-required to signal a reboot is needed.
998    // This is monitored by kured (Kubernetes Reboot Daemon).
999    write_reboot_required(&image.manifest_digest.as_ref())?;
1000
1001    Ok(())
1002}
1003
1004/// Update the /run/reboot-required file with the image that will be active after a reboot.
1005fn write_reboot_required(image: &str) -> Result<()> {
1006    let reboot_message = format!("bootc: Reboot required for image: {}", image);
1007    let run_dir = Dir::open_ambient_dir("/run", cap_std::ambient_authority())?;
1008    run_dir
1009        .atomic_write("reboot-required", reboot_message.as_bytes())
1010        .context("Creating /run/reboot-required")?;
1011
1012    Ok(())
1013}
1014
1015/// Implementation of rollback functionality
1016pub(crate) async fn rollback(sysroot: &Storage) -> Result<()> {
1017    const ROLLBACK_JOURNAL_ID: &str = "26f3b1eb24464d12aa5e7b544a6b5468";
1018    let ostree = sysroot.get_ostree()?;
1019    let (booted_ostree, deployments, host) = crate::status::get_status_require_booted(ostree)?;
1020
1021    let new_spec = {
1022        let mut new_spec = host.spec.clone();
1023        new_spec.boot_order = new_spec.boot_order.swap();
1024        new_spec
1025    };
1026
1027    let repo = &booted_ostree.repo();
1028
1029    // Just to be sure
1030    host.spec.verify_transition(&new_spec)?;
1031
1032    let reverting = new_spec.boot_order == BootOrder::Default;
1033    if reverting {
1034        println!("notice: Reverting queued rollback state");
1035    }
1036    let rollback_status = host
1037        .status
1038        .rollback
1039        .ok_or_else(|| anyhow!("No rollback available"))?;
1040    let rollback_image = rollback_status
1041        .query_image(repo)?
1042        .ok_or_else(|| anyhow!("Rollback is not container image based"))?;
1043
1044    // Get current booted image for comparison
1045    let current_image = host
1046        .status
1047        .booted
1048        .as_ref()
1049        .and_then(|b| b.query_image(repo).ok()?);
1050
1051    tracing::info!(
1052        message_id = ROLLBACK_JOURNAL_ID,
1053        bootc.manifest_digest = rollback_image.manifest_digest.as_ref(),
1054        bootc.ostree_commit = &rollback_image.merge_commit,
1055        bootc.rollback_type = if reverting { "revert" } else { "rollback" },
1056        bootc.current_manifest_digest = current_image
1057            .as_ref()
1058            .map(|i| i.manifest_digest.as_ref())
1059            .unwrap_or("none"),
1060        "Rolling back to image: {}",
1061        rollback_image.manifest_digest
1062    );
1063    // SAFETY: If there's a rollback status, then there's a deployment
1064    let rollback_deployment = deployments.rollback.expect("rollback deployment");
1065    let new_deployments = if reverting {
1066        [booted_ostree.deployment, rollback_deployment]
1067    } else {
1068        [rollback_deployment, booted_ostree.deployment]
1069    };
1070    let new_deployments = new_deployments
1071        .into_iter()
1072        .chain(deployments.other)
1073        .collect::<Vec<_>>();
1074    tracing::debug!("Writing new deployments: {new_deployments:?}");
1075    booted_ostree
1076        .sysroot
1077        .write_deployments(&new_deployments, gio::Cancellable::NONE)?;
1078    if reverting {
1079        println!("Next boot: current deployment");
1080    } else {
1081        println!("Next boot: rollback deployment");
1082    }
1083
1084    write_reboot_required(rollback_image.manifest_digest.as_ref())?;
1085
1086    sysroot.update_mtime()?;
1087
1088    Ok(())
1089}
1090
1091fn find_newest_deployment_name(deploysdir: &Dir) -> Result<String> {
1092    let mut dirs = Vec::new();
1093    for ent in deploysdir.entries()? {
1094        let ent = ent?;
1095        if !ent.file_type()?.is_dir() {
1096            continue;
1097        }
1098        let name = ent.file_name();
1099        let Some(name) = name.to_str() else {
1100            continue;
1101        };
1102        dirs.push((name.to_owned(), ent.metadata()?.mtime()));
1103    }
1104    dirs.sort_unstable_by(|a, b| a.1.cmp(&b.1));
1105    if let Some((name, _ts)) = dirs.pop() {
1106        Ok(name)
1107    } else {
1108        anyhow::bail!("No deployment directory found")
1109    }
1110}
1111
1112// Implementation of `bootc switch --in-place`
1113pub(crate) fn switch_origin_inplace(root: &Dir, imgref: &ImageReference) -> Result<String> {
1114    // Log the in-place switch operation to systemd journal
1115    const SWITCH_INPLACE_JOURNAL_ID: &str = "3e2f1a0b9c8d7e6f5a4b3c2d1e0f9a8b7";
1116
1117    tracing::info!(
1118        message_id = SWITCH_INPLACE_JOURNAL_ID,
1119        bootc.image.reference = &imgref.image,
1120        bootc.image.transport = &imgref.transport,
1121        bootc.switch_type = "in_place",
1122        "Performing in-place switch to image: {}",
1123        imgref
1124    );
1125
1126    // First, just create the new origin file
1127    let origin = origin_from_imageref(imgref)?;
1128    let serialized_origin = origin.to_data();
1129
1130    // Now, we can't rely on being officially booted (e.g. with the `ostree=` karg)
1131    // in a scenario like running in the anaconda %post.
1132    // Eventually, we should support a setup here where ostree-prepare-root
1133    // can officially be run to "enter" an ostree root in a supportable way.
1134    // Anyways for now, the brutal hack is to just scrape through the deployments
1135    // and find the newest one, which we will mutate.  If there's more than one,
1136    // ultimately the calling tooling should be fixed to set things up correctly.
1137
1138    let mut ostree_deploys = root.open_dir("sysroot/ostree/deploy")?.entries()?;
1139    let deploydir = loop {
1140        if let Some(ent) = ostree_deploys.next() {
1141            let ent = ent?;
1142            if !ent.file_type()?.is_dir() {
1143                continue;
1144            }
1145            tracing::debug!("Checking {:?}", ent.file_name());
1146            let child_dir = ent
1147                .open_dir()
1148                .with_context(|| format!("Opening dir {:?}", ent.file_name()))?;
1149            if let Some(d) = child_dir.open_dir_optional("deploy")? {
1150                break d;
1151            }
1152        } else {
1153            anyhow::bail!("Failed to find a deployment");
1154        }
1155    };
1156    let newest_deployment = find_newest_deployment_name(&deploydir)?;
1157    let origin_path = format!("{newest_deployment}.origin");
1158    if !deploydir.try_exists(&origin_path)? {
1159        tracing::warn!("No extant origin for {newest_deployment}");
1160    }
1161    deploydir
1162        .atomic_write(&origin_path, serialized_origin.as_bytes())
1163        .context("Writing origin")?;
1164    Ok(newest_deployment)
1165}
1166
1167/// A workaround for https://github.com/ostreedev/ostree/issues/3193
1168/// as generated by anaconda.
1169#[context("Updating /etc/fstab for anaconda+composefs")]
1170pub(crate) fn fixup_etc_fstab(root: &Dir) -> Result<()> {
1171    let fstab_path = "etc/fstab";
1172    // Read the old file
1173    let fd = root
1174        .open(fstab_path)
1175        .with_context(|| format!("Opening {fstab_path}"))
1176        .map(std::io::BufReader::new)?;
1177
1178    // Helper function to possibly change a line from /etc/fstab.
1179    // Returns Ok(true) if we made a change (and we wrote the modified line)
1180    // otherwise returns Ok(false) and the caller should write the original line.
1181    fn edit_fstab_line(line: &str, mut w: impl Write) -> Result<bool> {
1182        if line.starts_with('#') {
1183            return Ok(false);
1184        }
1185        let parts = line.split_ascii_whitespace().collect::<Vec<_>>();
1186
1187        let path_idx = 1;
1188        let options_idx = 3;
1189        let (&path, &options) = match (parts.get(path_idx), parts.get(options_idx)) {
1190            (None, _) => {
1191                tracing::debug!("No path in entry: {line}");
1192                return Ok(false);
1193            }
1194            (_, None) => {
1195                tracing::debug!("No options in entry: {line}");
1196                return Ok(false);
1197            }
1198            (Some(p), Some(o)) => (p, o),
1199        };
1200        // If this is not the root, we're not matching on it
1201        if path != "/" {
1202            return Ok(false);
1203        }
1204        // If options already contains `ro`, nothing to do
1205        if options.split(',').any(|s| s == "ro") {
1206            return Ok(false);
1207        }
1208
1209        writeln!(w, "# {}", crate::generator::BOOTC_EDITED_STAMP)?;
1210
1211        // SAFETY: we unpacked the options before.
1212        // This adds `ro` to the option list
1213        assert!(!options.is_empty()); // Split wouldn't have turned this up if it was empty
1214        let options = format!("{options},ro");
1215        for (i, part) in parts.into_iter().enumerate() {
1216            // TODO: would obviously be nicer to preserve whitespace...but...eh.
1217            if i > 0 {
1218                write!(w, " ")?;
1219            }
1220            if i == options_idx {
1221                write!(w, "{options}")?;
1222            } else {
1223                write!(w, "{part}")?
1224            }
1225        }
1226        // And add the trailing newline
1227        writeln!(w)?;
1228        Ok(true)
1229    }
1230
1231    // Read the input, and atomically write a modified version
1232    root.atomic_replace_with(fstab_path, move |mut w| -> Result<()> {
1233        for line in fd.lines() {
1234            let line = line?;
1235            if !edit_fstab_line(&line, &mut w)? {
1236                writeln!(w, "{line}")?;
1237            }
1238        }
1239        Ok(())
1240    })
1241    .context("Replacing /etc/fstab")?;
1242
1243    println!("Updated /etc/fstab to add `ro` for `/`");
1244    Ok(())
1245}
1246
1247#[cfg(test)]
1248mod tests {
1249    use super::*;
1250
1251    #[test]
1252    fn test_switch_inplace() -> Result<()> {
1253        use cap_std::fs::DirBuilderExt;
1254
1255        let td = cap_std_ext::cap_tempfile::TempDir::new(cap_std::ambient_authority())?;
1256        let mut builder = cap_std::fs::DirBuilder::new();
1257        let builder = builder.recursive(true).mode(0o755);
1258        let deploydir = "sysroot/ostree/deploy/default/deploy";
1259        let target_deployment =
1260            "af36eb0086bb55ac601600478c6168f834288013d60f8870b7851f44bf86c3c5.0";
1261        td.ensure_dir_with(
1262            format!("sysroot/ostree/deploy/default/deploy/{target_deployment}"),
1263            builder,
1264        )?;
1265        let deploydir = &td.open_dir(deploydir)?;
1266        let orig_imgref = ImageReference {
1267            image: "quay.io/exampleos/original:sometag".into(),
1268            transport: "registry".into(),
1269            signature: None,
1270        };
1271        {
1272            let origin = origin_from_imageref(&orig_imgref)?;
1273            deploydir.atomic_write(
1274                format!("{target_deployment}.origin"),
1275                origin.to_data().as_bytes(),
1276            )?;
1277        }
1278
1279        let target_imgref = ImageReference {
1280            image: "quay.io/someother/otherimage:latest".into(),
1281            transport: "registry".into(),
1282            signature: None,
1283        };
1284
1285        let replaced = switch_origin_inplace(&td, &target_imgref).unwrap();
1286        assert_eq!(replaced, target_deployment);
1287        Ok(())
1288    }
1289
1290    #[test]
1291    fn test_fixup_etc_fstab_default() -> Result<()> {
1292        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1293        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n";
1294        tempdir.create_dir_all("etc")?;
1295        tempdir.atomic_write("etc/fstab", default)?;
1296        fixup_etc_fstab(&tempdir).unwrap();
1297        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1298        Ok(())
1299    }
1300
1301    #[test]
1302    fn test_fixup_etc_fstab_multi() -> Result<()> {
1303        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1304        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1305UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1306        tempdir.create_dir_all("etc")?;
1307        tempdir.atomic_write("etc/fstab", default)?;
1308        fixup_etc_fstab(&tempdir).unwrap();
1309        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1310        Ok(())
1311    }
1312
1313    #[test]
1314    fn test_fixup_etc_fstab_ro() -> Result<()> {
1315        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1316        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1317UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   ro 0 0\n\
1318UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1319        tempdir.create_dir_all("etc")?;
1320        tempdir.atomic_write("etc/fstab", default)?;
1321        fixup_etc_fstab(&tempdir).unwrap();
1322        assert_eq!(tempdir.read_to_string("etc/fstab")?, default);
1323        Ok(())
1324    }
1325
1326    #[test]
1327    fn test_fixup_etc_fstab_rw() -> Result<()> {
1328        let tempdir = cap_std_ext::cap_tempfile::tempdir(cap_std::ambient_authority())?;
1329        // This case uses `defaults`
1330        let default = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1331UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 /                     xfs   defaults 0 0\n\
1332UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1333        let modified = "UUID=f7436547-20ac-43cb-aa2f-eac9632183f6 /boot auto ro 0 0\n\
1334# Updated by bootc-fstab-edit.service\n\
1335UUID=1eef9f42-40e3-4bd8-ae20-e9f2325f8b52 / xfs defaults,ro 0 0\n\
1336UUID=6907-17CA          /boot/efi               vfat    umask=0077,shortname=winnt 0 2\n";
1337        tempdir.create_dir_all("etc")?;
1338        tempdir.atomic_write("etc/fstab", default)?;
1339        fixup_etc_fstab(&tempdir).unwrap();
1340        assert_eq!(tempdir.read_to_string("etc/fstab")?, modified);
1341        Ok(())
1342    }
1343}