rakata_formats/mdl/
writer.rs

1//! MDL binary writer.
2//!
3//! Serializes an [`Mdl`] struct back into the binary MDL+MDX format used by
4//! the Odyssey engine. The writer uses a single-pass cursor approach: it
5//! reserves placeholder space for headers and arrays, writes child data
6//! forward, then backpatches pointers and counts using seek-back helpers.
7//!
8//! MDX vertex data is written separately via [`write_mdl_with_mdx_to_vec`] or
9//! deferred to a second buffer. Non-skin meshes are emitted first (DFS order),
10//! then skin meshes (DFS order), matching the vanilla engine layout.
11
12use std::collections::HashMap;
13use std::io::{Cursor, Write};
14
15use crate::binary::{write_f32, write_i32, write_u16, write_u32};
16
17use super::controllers::MdlController;
18use super::types::MdlNodeData;
19use super::{
20    aabb_offsets, anim_mesh_offsets, dangly_offsets, header_offsets, light_offsets, mesh_offsets,
21    node_offsets, saber_offsets, skin_offsets, Mdl, MdlError, MdlNode, AABB_EXTRA_SIZE,
22    ANIM_MESH_EXTRA_SIZE, DANGLY_EXTRA_SIZE, LIGHT_EXTRA_SIZE, MDL_WRAPPER_SIZE, MESH_EXTRA_SIZE,
23    NODE_HEADER_SIZE, SABER_EXTRA_SIZE, SKIN_EXTRA_SIZE,
24};
25
26/// Mesh reference collected during DFS traversal for deferred MDX writing.
27struct MeshRef<'a> {
28    /// The base mesh data.
29    mesh: &'a super::MdlMesh,
30    /// Skin wrapper (if this is a skin node), carrying bone weight/index data.
31    skin: Option<&'a super::MdlSkin>,
32    /// Whether this node is a skin mesh.
33    is_skin: bool,
34}
35
36/// Writes an MDL file structure to a writer.
37#[cfg_attr(
38    feature = "tracing",
39    tracing::instrument(level = "debug", skip(writer, mdl))
40)]
41pub fn write_mdl<W: Write>(writer: &mut W, mdl: &Mdl) -> Result<(), MdlError> {
42    let bytes = write_mdl_to_vec(mdl)?;
43    crate::trace_debug!(bytes_len = bytes.len(), "wrote mdl to writer");
44    writer.write_all(&bytes)?;
45    Ok(())
46}
47
48/// Writes an MDL file structure to a byte vector.
49#[cfg_attr(feature = "tracing", tracing::instrument(level = "debug", skip(mdl)))]
50pub fn write_mdl_to_vec(mdl: &Mdl) -> Result<Vec<u8>, MdlError> {
51    let writer = MdlWriter::new();
52    let (bytes, _) = writer.write(mdl)?;
53    crate::trace_debug!(bytes_len = bytes.len(), "serialized mdl to vec");
54    Ok(bytes)
55}
56
57/// Writes an MDL model and its companion MDX vertex data to byte vectors.
58///
59/// Unlike [`write_mdl_to_vec`], this function computes the MDX vertex buffer
60/// layout (stride, flags, per-attribute offsets) from the mesh vertex data and
61/// writes interleaved vertex attributes into a separate MDX buffer.
62#[cfg_attr(feature = "tracing", tracing::instrument(level = "debug", skip(mdl)))]
63pub fn write_mdl_with_mdx_to_vec(mdl: &Mdl) -> Result<super::MdlWriteResult, MdlError> {
64    let writer = MdlWriter::new_with_mdx();
65    let (mdl_bytes, mdx_bytes) = writer.write(mdl)?;
66    crate::trace_debug!(
67        mdl_bytes_len = mdl_bytes.len(),
68        mdx_bytes_len = mdx_bytes.len(),
69        "serialized mdl+mdx to vec"
70    );
71    Ok(super::MdlWriteResult {
72        mdl_bytes,
73        mdx_bytes,
74    })
75}
76
77/// Checked narrowing cast from `usize` to `u32`.
78fn count_u32(len: usize, field: &'static str) -> Result<u32, MdlError> {
79    u32::try_from(len).map_err(|_| MdlError::ValueOverflow(field))
80}
81
82/// Checked narrowing cast from `usize` to `u16`.
83fn count_u16(len: usize, field: &'static str) -> Result<u16, MdlError> {
84    u16::try_from(len).map_err(|_| MdlError::ValueOverflow(field))
85}
86
87/// Entry for deferred MDX writing. Collected during the node DFS traversal,
88/// then flushed in non-skin-first/skin-second order to match the BioWare
89/// engine's canonical MDX layout.
90struct DeferredMdxEntry {
91    /// Position of the mesh extra header in the MDL buffer (for backpatching).
92    mesh_header_pos: u64,
93    /// Position of the skin extra header (for backpatching bone offsets), if skin.
94    skin_header_pos: Option<u64>,
95    /// Whether this mesh belongs to a Skin node.
96    is_skin: bool,
97    /// DFS encounter index (for stable sort within each group).
98    dfs_index: usize,
99}
100
101struct MdlWriter {
102    buffer: Cursor<Vec<u8>>,
103    mdx_buffer: Cursor<Vec<u8>>,
104    write_mdx: bool,
105    names: Vec<String>,
106    name_map: HashMap<String, u16>,
107    /// Content-relative byte offset of each name string (indexed by node_id).
108    name_string_offsets: Vec<u32>,
109    /// MDX position right after the last terminator, before alignment padding.
110    /// Used to trim trailing alignment from the final mesh.
111    mdx_pre_align_pos: u64,
112    /// Deferred MDX write entries, collected during DFS node traversal.
113    deferred_mdx: Vec<DeferredMdxEntry>,
114    /// Map of geometry node name -> written content-relative offset.
115    /// Used to resolve `anim_root_node` during header backpatch.
116    node_offset_map: HashMap<String, u32>,
117}
118
119impl MdlWriter {
120    fn new() -> Self {
121        MdlWriter {
122            buffer: Cursor::new(Vec::new()),
123            mdx_buffer: Cursor::new(Vec::new()),
124            write_mdx: false,
125            names: Vec::new(),
126            name_map: HashMap::new(),
127            name_string_offsets: Vec::new(),
128            mdx_pre_align_pos: 0,
129            deferred_mdx: Vec::new(),
130            node_offset_map: HashMap::new(),
131        }
132    }
133
134    fn new_with_mdx() -> Self {
135        MdlWriter {
136            buffer: Cursor::new(Vec::new()),
137            mdx_buffer: Cursor::new(Vec::new()),
138            write_mdx: true,
139            names: Vec::new(),
140            name_map: HashMap::new(),
141            name_string_offsets: Vec::new(),
142            mdx_pre_align_pos: 0,
143            deferred_mdx: Vec::new(),
144            node_offset_map: HashMap::new(),
145        }
146    }
147
148    /// Returns current buffer position as content-relative u32 offset.
149    /// Content byte 0 = on-disk byte 12 (after the 12-byte MDL wrapper).
150    fn content_position(&self) -> Result<u32, MdlError> {
151        let pos = self.buffer.position();
152        let content = pos
153            .checked_sub(MDL_WRAPPER_SIZE)
154            .ok_or(MdlError::ValueOverflow("content_position underflow"))?;
155        u32::try_from(content).map_err(|_| MdlError::ValueOverflow("content_position"))
156    }
157
158    /// Returns current MDX buffer position as u32.
159    fn mdx_position(&self) -> Result<u32, MdlError> {
160        u32::try_from(self.mdx_buffer.position())
161            .map_err(|_| MdlError::ValueOverflow("mdx_position"))
162    }
163
164    /// Seeks the MDL buffer to `base + offset`.
165    fn seek_to(&mut self, base: u64, offset: usize) {
166        self.buffer
167            .set_position(base + u64::try_from(offset).expect("offset fits in u64"));
168    }
169
170    fn write(mut self, mdl: &Mdl) -> Result<(Vec<u8>, Vec<u8>), MdlError> {
171        // 1. Gather Names via DFS tree walk (geometry + animation nodes).
172        self.collect_names(&mdl.root_node);
173        for anim in &mdl.animations {
174            self.collect_anim_names(&anim.root_node);
175        }
176
177        // 2. Write Wrapper placeholder (12 bytes, backpatched at end).
178        #[allow(clippy::as_conversions)]
179        const WRAPPER_BYTES: usize = MDL_WRAPPER_SIZE as usize;
180        // Static assert: MDL_WRAPPER_SIZE fits in usize without truncation.
181        #[allow(clippy::as_conversions)]
182        const _: () = assert!(MDL_WRAPPER_SIZE == WRAPPER_BYTES as u64);
183        self.buffer.write_all(&[0u8; WRAPPER_BYTES])?;
184
185        // 3. Reserve Header (196 bytes: 80-byte geometry + 116-byte model).
186        let header_start = self.buffer.position();
187        self.buffer.write_all(&[0u8; 196])?;
188
189        // 4. Write Name Table.
190        let name_offsets_ptr = self.content_position()?;
191        let name_count = count_u32(self.names.len(), "name_count")?;
192
193        // Reserve name offsets array.
194        let name_offsets_start = self.buffer.position();
195        for _ in 0..name_count {
196            write_u32(&mut self.buffer, 0)?;
197        }
198
199        // Write strings and record offsets.
200        self.name_string_offsets = Vec::with_capacity(self.names.len());
201        let mut current_offset_pos = name_offsets_start;
202        for name in &self.names {
203            let str_start = self.content_position()?;
204            self.name_string_offsets.push(str_start);
205
206            // Go back and write the offset for this string.
207            let save_pos = self.buffer.position();
208            self.buffer.set_position(current_offset_pos);
209            write_u32(&mut self.buffer, str_start)?;
210            self.buffer.set_position(save_pos);
211            current_offset_pos += 4;
212
213            // Write string + null terminator.
214            self.buffer.write_all(name.as_bytes())?;
215            self.buffer.write_all(&[0u8])?;
216        }
217
218        // 5. Write Nodes Recursively
219        let root_node_offset = self.write_node(&mdl.root_node, None)?;
220
221        // 5b. Flush deferred MDX writes in canonical order:
222        // non-skin meshes first (DFS order), then skin meshes (DFS order).
223        // See mdl_mdx.md §MDX Per-Mesh Terminator Rows (Finding 4).
224        if self.write_mdx && !self.deferred_mdx.is_empty() {
225            self.write_deferred_mdx(&mdl.root_node)?;
226        }
227
228        // 5c. Write Animations (after geometry nodes, before header backpatch).
229        let (anim_arr_ptr, anim_arr_count) = self.write_animations(&mdl.animations)?;
230
231        // 6. Backpatch Header - write all fields explicitly.
232
233        // --- Geometry header (+0x00..+0x4F) ---
234        self.seek_to(header_start, header_offsets::FN_PTR1);
235        write_u32(&mut self.buffer, mdl.geometry_fn_ptr1)?;
236
237        self.seek_to(header_start, header_offsets::FN_PTR2);
238        write_u32(&mut self.buffer, mdl.geometry_fn_ptr2)?;
239
240        // Model name at +0x08 (32-byte null-terminated, same as root node name).
241        self.seek_to(header_start, header_offsets::MODEL_NAME);
242        let model_name = mdl.root_node.name.as_bytes();
243        let model_name_len = model_name.len().min(header_offsets::MODEL_NAME_SIZE - 1);
244        self.buffer.write_all(&model_name[..model_name_len])?;
245        self.buffer.write_all(&[0])?;
246
247        self.seek_to(header_start, header_offsets::ROOT_NODE_PTR);
248        write_u32(&mut self.buffer, root_node_offset)?;
249
250        self.seek_to(header_start, header_offsets::NODE_COUNT);
251        write_u32(&mut self.buffer, mdl.node_count)?;
252
253        // Runtime arrays at +0x30..+0x47 stay zero (written at init).
254        // ref_count at +0x48 stays zero.
255
256        // Model type at +0x4C (always 2 for geometry models).
257        self.seek_to(header_start, header_offsets::MODEL_TYPE);
258        self.buffer.write_all(&[mdl.model_type])?;
259
260        // --- Model header (+0x50..+0xC3) ---
261        self.seek_to(header_start, header_offsets::CLASSIFICATION);
262        self.buffer.write_all(&[mdl.classification])?;
263
264        self.seek_to(header_start, header_offsets::SUBCLASSIFICATION);
265        self.buffer.write_all(&[mdl.subclassification])?;
266
267        // +0x52: unknown byte, stays zero.
268
269        self.seek_to(header_start, header_offsets::AFFECTED_BY_FOG);
270        self.buffer.write_all(&[mdl.affected_by_fog])?;
271
272        // num_child_models at +0x54: always 0 (stays zero from init).
273
274        // Animation array CExoArrayList at +0x58 (ptr/count/alloc).
275        self.seek_to(header_start, header_offsets::ANIMATION_ARR_PTR);
276        write_u32(&mut self.buffer, anim_arr_ptr)?;
277        write_u32(&mut self.buffer, anim_arr_count)?;
278        write_u32(&mut self.buffer, anim_arr_count)?; // alloc mirrors count
279
280        // supermodel_ref at +0x64: always 0 (stays zero from init).
281
282        // Bounding box at +0x68 (6 floats).
283        self.seek_to(header_start, header_offsets::BOUNDING_BOX_MIN);
284        for &val in &mdl.bounding_box {
285            write_f32(&mut self.buffer, val)?;
286        }
287
288        // Radius at +0x80.
289        self.seek_to(header_start, header_offsets::RADIUS);
290        write_f32(&mut self.buffer, mdl.radius)?;
291
292        // Animation scale at +0x84.
293        self.seek_to(header_start, header_offsets::ANIMATION_SCALE);
294        write_f32(&mut self.buffer, mdl.animation_scale)?;
295
296        // Supermodel name at +0x88 (null-terminated, 32-byte field).
297        self.seek_to(header_start, header_offsets::SUPERMODEL_NAME);
298        let name_bytes = mdl.supermodel_name.as_bytes();
299        let write_len = name_bytes
300            .len()
301            .min(header_offsets::SUPERMODEL_NAME_SIZE - 1);
302        self.buffer.write_all(&name_bytes[..write_len])?;
303        self.buffer.write_all(&[0])?;
304
305        // off_anim_root at +0xA8: points to the animation root node.
306        // For most models this is the geometry root. Head models point to
307        // `neck_g` so the engine applies head animations from the neck bone.
308        let anim_root_offset = mdl
309            .anim_root_node
310            .as_ref()
311            .and_then(|name| self.node_offset_map.get(name).copied())
312            .unwrap_or(root_node_offset);
313        self.seek_to(header_start, header_offsets::OFF_ANIM_ROOT);
314        write_u32(&mut self.buffer, anim_root_offset)?;
315
316        // +0xAC: padding, stays zero.
317
318        // Name array at +0xB8.
319        self.seek_to(header_start, header_offsets::NAME_OFFSETS_PTR);
320        write_u32(&mut self.buffer, name_offsets_ptr)?;
321
322        self.seek_to(header_start, header_offsets::NAME_COUNT);
323        write_u32(&mut self.buffer, name_count)?;
324
325        let mut mdl_bytes = self.buffer.into_inner();
326        let mut mdx_bytes = self.mdx_buffer.into_inner();
327
328        // Trim trailing 16-byte alignment padding from the last mesh's
329        // terminator. The MDX file ends right after the last terminator,
330        // without alignment padding. The writer tracks the pre-alignment
331        // position so we can truncate precisely.
332        // See mdl_mdx.md §MDX Per-Mesh Terminator Rows (Finding 3).
333        let trim_to =
334            usize::try_from(self.mdx_pre_align_pos).expect("mdx_pre_align_pos fits in usize");
335        if trim_to > 0 && trim_to < mdx_bytes.len() {
336            mdx_bytes.truncate(trim_to);
337        }
338
339        // Backpatch wrapper sizes:
340        // +0x00 zero marker (always 0), +0x04 MDL content size, +0x08 MDX file size.
341        let mdl_content_size = mdl_bytes
342            .len()
343            .checked_sub(WRAPPER_BYTES)
344            .ok_or_else(|| MdlError::InvalidData("serialized MDL shorter than wrapper".into()))?;
345        let mdl_content_size_u32 = u32::try_from(mdl_content_size)
346            .map_err(|_| MdlError::ValueOverflow("mdl_content_size"))?;
347        let mdx_size_u32 =
348            u32::try_from(mdx_bytes.len()).map_err(|_| MdlError::ValueOverflow("mdx_file_size"))?;
349        mdl_bytes[0..4].copy_from_slice(&0u32.to_le_bytes());
350        mdl_bytes[4..8].copy_from_slice(&mdl_content_size_u32.to_le_bytes());
351        mdl_bytes[8..12].copy_from_slice(&mdx_size_u32.to_le_bytes());
352
353        // Backpatch MDX size in model header (+0xB0).
354        let mdx_size_offset = WRAPPER_BYTES + header_offsets::MDX_SIZE;
355        mdl_bytes[mdx_size_offset..mdx_size_offset + 4]
356            .copy_from_slice(&mdx_size_u32.to_le_bytes());
357
358        Ok((mdl_bytes, mdx_bytes))
359    }
360
361    /// Flush deferred MDX writes in canonical order: non-skin meshes first
362    /// (DFS order), then skin meshes (DFS order). Each mesh gets its vertex
363    /// data written followed by a terminator row with 16-byte alignment
364    /// (except the very last mesh, whose trailing alignment is trimmed by
365    /// the caller).
366    ///
367    /// The DFS-order mesh references are obtained by walking the node tree
368    /// in the same pre-order traversal used by `write_node`.
369    fn write_deferred_mdx(&mut self, root: &MdlNode) -> Result<(), MdlError> {
370        // Collect mesh node references in DFS order.
371        let mut mesh_refs: Vec<MeshRef<'_>> = Vec::new();
372        Self::collect_mesh_refs(root, &mut mesh_refs);
373
374        // Sanity check: deferred entries must match mesh refs 1:1.
375        debug_assert_eq!(mesh_refs.len(), self.deferred_mdx.len());
376
377        // Build ordered indices: non-skin first, then skin (both in DFS order).
378        // DFS order is preserved because `deferred_mdx` was populated in DFS
379        // order and we use a stable partition.
380        let entries: Vec<DeferredMdxEntry> = std::mem::take(&mut self.deferred_mdx);
381        let (non_skin, skin): (Vec<usize>, Vec<usize>) =
382            (0..entries.len()).partition(|&i| !entries[i].is_skin);
383        let ordered: Vec<usize> = non_skin.into_iter().chain(skin).collect();
384
385        for idx in &ordered {
386            let entry = &entries[*idx];
387            let mesh_ref = &mesh_refs[entry.dfs_index];
388
389            // Record MDX file offset BEFORE writing vertex data -- this is
390            // where the engine will find this mesh's vertices in the MDX buffer.
391            let mdx_offset = self.mdx_position()?;
392
393            let stride = self.write_mesh_mdx_data(
394                mesh_ref.mesh,
395                mesh_ref.skin,
396                entry.mesh_header_pos,
397                entry.skin_header_pos,
398            )?;
399            self.write_mdx_terminator(stride, mesh_ref.is_skin)?;
400
401            // Backpatch mesh header +0x144 with the MDX file offset.
402            // Community tools (mdlops, kotorblender) write this value and the
403            // engine uses it to locate per-mesh vertex data within the bulk
404            // MDX GL vertex buffer.
405            let save = self.buffer.position();
406            self.seek_to(entry.mesh_header_pos, mesh_offsets::MDX_DATA_OFFSET);
407            write_u32(&mut self.buffer, mdx_offset)?;
408            self.buffer.set_position(save);
409        }
410
411        Ok(())
412    }
413
414    /// Collect mesh references from the node tree in pre-order DFS, matching
415    /// the traversal order of `write_node`. Returns mesh, optional skin, and
416    /// whether the node is a skin.
417    ///
418    /// Saber nodes are excluded because they have no MDX vertex data (stride=0).
419    fn collect_mesh_refs<'a>(node: &'a MdlNode, out: &mut Vec<MeshRef<'a>>) {
420        if !node.is_saber() {
421            if let Some(mesh) = node.node_data.mesh() {
422                let skin = if let MdlNodeData::Skin(s) = &node.node_data {
423                    Some(s)
424                } else {
425                    None
426                };
427                out.push(MeshRef {
428                    mesh,
429                    skin,
430                    is_skin: node.is_skin(),
431                });
432            }
433        }
434        for child in &node.children {
435            Self::collect_mesh_refs(child, out);
436        }
437    }
438
439    fn collect_names(&mut self, node: &MdlNode) {
440        if !self.name_map.contains_key(&node.name) {
441            let index = count_u16(self.names.len(), "name_index")
442                .expect("name table must not exceed u16::MAX entries during collect_names");
443            // Both the name table (Vec) and the lookup map (HashMap) need owned
444            // copies of the name. The double clone is unavoidable without an
445            // index-based refactor; model name tables are small (~100 entries).
446            self.name_map.insert(node.name.clone(), index);
447            self.names.push(node.name.clone());
448        }
449        for child in &node.children {
450            self.collect_names(child);
451        }
452    }
453
454    fn collect_anim_names(&mut self, node: &super::MdlAnimNode) {
455        if !self.name_map.contains_key(&node.name) {
456            let index = count_u16(self.names.len(), "name_index")
457                .expect("name table must not exceed u16::MAX entries during collect_anim_names");
458            self.name_map.insert(node.name.clone(), index);
459            self.names.push(node.name.clone());
460        }
461        for child in &node.children {
462            self.collect_anim_names(child);
463        }
464    }
465
466    // -----------------------------------------------------------------------
467    // Animation writer
468    // -----------------------------------------------------------------------
469
470    /// Writes all animations and returns `(anim_arr_ptr, anim_count)` for
471    /// backpatching into the model header's CExoArrayList at +0x58.
472    fn write_animations(
473        &mut self,
474        animations: &[super::MdlAnimation],
475    ) -> Result<(u32, u32), MdlError> {
476        if animations.is_empty() {
477            return Ok((0, 0));
478        }
479
480        // Write the offset array (one u32 per animation, content-relative).
481        let arr_ptr = self.content_position()?;
482        let arr_count = count_u32(animations.len(), "animation_count")?;
483
484        // Reserve the offset array, backpatch after writing each animation.
485        let arr_start = self.buffer.position();
486        for _ in 0..arr_count {
487            write_u32(&mut self.buffer, 0)?;
488        }
489
490        let mut anim_offsets = Vec::with_capacity(animations.len());
491        for anim in animations {
492            let offset = self.write_animation(anim)?;
493            anim_offsets.push(offset);
494        }
495
496        // Backpatch the offset array.
497        let save_pos = self.buffer.position();
498        self.buffer.set_position(arr_start);
499        for &offset in &anim_offsets {
500            write_u32(&mut self.buffer, offset)?;
501        }
502        self.buffer.set_position(save_pos);
503
504        Ok((arr_ptr, arr_count))
505    }
506
507    /// Writes a single animation: header (136 bytes) + events + node tree.
508    /// Returns the content-relative offset of the animation header.
509    fn write_animation(&mut self, anim: &super::MdlAnimation) -> Result<u32, MdlError> {
510        let anim_offset = self.content_position()?;
511
512        // Reserve 136-byte animation header (backpatched below).
513        let header_pos = self.buffer.position();
514        self.buffer
515            .write_all(&[0u8; super::ANIMATION_HEADER_SIZE])?;
516
517        // Write events.
518        let events_ptr = if anim.events.is_empty() {
519            0u32
520        } else {
521            let ptr = self.content_position()?;
522            for event in &anim.events {
523                write_f32(&mut self.buffer, event.time)?;
524                self.write_fixed_string(&event.name, 32)?;
525            }
526            ptr
527        };
528        let event_count = count_u32(anim.events.len(), "event_count")?;
529
530        // Write animation node tree recursively.
531        let root_node_offset = self.write_anim_node(&anim.root_node, anim_offset, None)?;
532
533        // Count total animation nodes.
534        let total_nodes = super::count_anim_nodes(&anim.root_node);
535
536        // Backpatch animation header.
537        let save_pos = self.buffer.position();
538        self.buffer.set_position(header_pos);
539
540        // fn_ptr1, fn_ptr2 at +0x00, +0x04
541        write_u32(&mut self.buffer, anim.fn_ptr1)?;
542        write_u32(&mut self.buffer, anim.fn_ptr2)?;
543
544        // name at +0x08 (32 bytes)
545        self.write_fixed_string(&anim.name, super::anim_header_offsets::NAME_SIZE)?;
546
547        // root_node_ptr at +0x28
548        write_u32(&mut self.buffer, root_node_offset)?;
549
550        // total_num_nodes at +0x2C
551        write_u32(&mut self.buffer, total_nodes)?;
552
553        // runtime_arr1 at +0x30 (12 bytes zeros) - already zero from init
554        self.buffer.write_all(&[0u8; 12])?;
555        // runtime_arr2 at +0x3C (12 bytes zeros) - already zero from init
556        self.buffer.write_all(&[0u8; 12])?;
557        // ref_count at +0x48
558        write_u32(&mut self.buffer, 0)?;
559
560        // model_type at +0x4C (u8 = 5 for animations, + 3 padding)
561        self.buffer.write_all(&[5, 0, 0, 0])?;
562
563        // length at +0x50
564        write_f32(&mut self.buffer, anim.length)?;
565
566        // transition at +0x54
567        write_f32(&mut self.buffer, anim.transition_time)?;
568
569        // anim_root at +0x58 (32 bytes)
570        self.write_fixed_string(&anim.anim_root, super::anim_header_offsets::ANIM_ROOT_SIZE)?;
571
572        // event_arr at +0x78 (ptr/count/alloc)
573        write_u32(&mut self.buffer, events_ptr)?;
574        write_u32(&mut self.buffer, event_count)?;
575        write_u32(&mut self.buffer, event_count)?;
576
577        // padding at +0x84 (4 bytes, stays zero from init)
578        write_u32(&mut self.buffer, 0)?;
579
580        self.buffer.set_position(save_pos);
581        Ok(anim_offset)
582    }
583
584    /// Writes a single animation node (80-byte base header) and its children.
585    /// Returns the content-relative offset.
586    fn write_anim_node(
587        &mut self,
588        node: &super::MdlAnimNode,
589        anim_header_offset: u32,
590        parent_node_offset: Option<u32>,
591    ) -> Result<u32, MdlError> {
592        let node_offset = self.content_position()?;
593
594        // Reserve 80-byte node header.
595        let header_pos = self.buffer.position();
596        self.buffer.write_all(&[0u8; NODE_HEADER_SIZE])?;
597
598        // Write children array.
599        let child_arr_ptr = if node.children.is_empty() {
600            0u32
601        } else {
602            let ptr = self.content_position()?;
603            // Reserve child offset array.
604            let children_start = self.buffer.position();
605            for _ in 0..node.children.len() {
606                write_u32(&mut self.buffer, 0)?;
607            }
608
609            // Write each child and collect offsets.
610            let mut child_offsets = Vec::with_capacity(node.children.len());
611            for child in &node.children {
612                let child_off =
613                    self.write_anim_node(child, anim_header_offset, Some(node_offset))?;
614                child_offsets.push(child_off);
615            }
616
617            // Backpatch child offsets.
618            let save = self.buffer.position();
619            self.buffer.set_position(children_start);
620            for &off in &child_offsets {
621                write_u32(&mut self.buffer, off)?;
622            }
623            self.buffer.set_position(save);
624
625            ptr
626        };
627        let child_count = count_u32(node.children.len(), "anim_child_count")?;
628
629        // Write controllers.
630        let (ctrl_key_ptr, ctrl_key_count, ctrl_data_ptr, ctrl_data_count) =
631            self.write_controllers(&node.controllers, &node.orphan_controller_data)?;
632
633        // Backpatch node header.
634        let save_pos = self.buffer.position();
635        self.buffer.set_position(header_pos);
636
637        // +0x00: type_flags (u16) -- animation nodes are always NODE_BASE (0x0001)
638        write_u16(
639            &mut self.buffer,
640            u16::try_from(super::node_flags::HEADER)
641                .map_err(|_| MdlError::ValueOverflow("anim_node_flags"))?,
642        )?;
643
644        // +0x02: node_number (u16) -- maps to the geometry node index
645        write_u16(&mut self.buffer, node.node_number)?;
646
647        // +0x04: name_index (u16) -- index into model name table
648        let name_index = *self.name_map.get(&node.name).unwrap_or(&0);
649        write_u16(&mut self.buffer, name_index)?;
650
651        // +0x06: padding (2 bytes)
652        self.buffer.write_all(&[0u8; 2])?;
653
654        // +0x08: root pointer (content-relative offset to animation header)
655        write_u32(&mut self.buffer, anim_header_offset)?;
656
657        // +0x0C: parent pointer
658        write_u32(&mut self.buffer, parent_node_offset.unwrap_or(0))?;
659
660        // +0x10: position (3 × f32) - animation nodes use identity transform
661        write_f32(&mut self.buffer, 0.0)?;
662        write_f32(&mut self.buffer, 0.0)?;
663        write_f32(&mut self.buffer, 0.0)?;
664
665        // +0x1C: orientation quaternion (w, x, y, z)
666        write_f32(&mut self.buffer, 1.0)?;
667        write_f32(&mut self.buffer, 0.0)?;
668        write_f32(&mut self.buffer, 0.0)?;
669        write_f32(&mut self.buffer, 0.0)?;
670
671        // +0x2C: children array (ptr/count/alloc)
672        write_u32(&mut self.buffer, child_arr_ptr)?;
673        write_u32(&mut self.buffer, child_count)?;
674        write_u32(&mut self.buffer, child_count)?;
675
676        // +0x38: controller key array (ptr/count/alloc)
677        write_u32(&mut self.buffer, ctrl_key_ptr)?;
678        write_u32(&mut self.buffer, ctrl_key_count)?;
679        write_u32(&mut self.buffer, ctrl_key_count)?;
680
681        // +0x44: controller data array (ptr/count/alloc)
682        write_u32(&mut self.buffer, ctrl_data_ptr)?;
683        write_u32(&mut self.buffer, ctrl_data_count)?;
684        write_u32(&mut self.buffer, ctrl_data_count)?;
685
686        self.buffer.set_position(save_pos);
687        Ok(node_offset)
688    }
689
690    // -----------------------------------------------------------------------
691    // Geometry node writer
692    // -----------------------------------------------------------------------
693
694    fn write_node(
695        &mut self,
696        node: &MdlNode,
697        parent_node_offset: Option<u32>,
698    ) -> Result<u32, MdlError> {
699        // Current position relative to wrapper end (byte 12 is offset 0)
700        let node_start_offset = self.content_position()?;
701
702        // Record this node's written offset for anim_root_node resolution.
703        self.node_offset_map
704            .insert(node.name.clone(), node_start_offset);
705
706        // Reserve Node Header (0x44 = 68 bytes)
707        let header_pos = self.buffer.position();
708        self.buffer.write_all(&[0u8; NODE_HEADER_SIZE])?;
709
710        // Write type-specific headers based on node_data variant.
711        // Non-mesh types with stub headers come first (matching binary packing order),
712        // then mesh header if this is a mesh-derived variant.
713        let light_header_pos = match &node.node_data {
714            MdlNodeData::Light(light) => Some(self.write_light_header(light)?),
715            _ => None,
716        };
717        match &node.node_data {
718            MdlNodeData::Light(_) => {} // header already written above
719            MdlNodeData::Emitter(emitter) => {
720                self.write_emitter_header(emitter)?;
721            }
722            MdlNodeData::Camera(_) => {
723                // Camera: 0 extra bytes (Ghidra-verified, mdl_mdx.md Non-Mesh Node Type Structs)
724            }
725            MdlNodeData::Reference(reference) => {
726                self.write_reference_header(reference)?;
727            }
728            _ => {}
729        }
730
731        // Phase 1: Write all headers contiguously (mesh + subtype).
732        // The binary format requires headers to be contiguous, with array
733        // data following separately (pointed to by CExoArrayList pointers).
734        let mesh_header_pos = if let Some(mesh) = node.node_data.mesh() {
735            Some(self.write_mesh_header(mesh)?)
736        } else {
737            None
738        };
739
740        // Write mesh subtype extra headers (immediately after TriMesh header).
741        let skin_header_pos = if let MdlNodeData::Skin(skin) = &node.node_data {
742            Some(self.write_skin_header(skin)?)
743        } else {
744            None
745        };
746        let anim_header_pos = if let MdlNodeData::AnimMesh(anim) = &node.node_data {
747            Some(self.write_anim_mesh_header(anim)?)
748        } else {
749            None
750        };
751        let dangly_header_pos = if let MdlNodeData::Dangly(dangly) = &node.node_data {
752            Some(self.write_dangly_header(dangly)?)
753        } else {
754            None
755        };
756        let aabb_header_pos = if let MdlNodeData::Aabb(_) = &node.node_data {
757            Some(self.write_aabb_header()?)
758        } else {
759            None
760        };
761        let saber_header_pos = if let MdlNodeData::Saber(saber) = &node.node_data {
762            Some(self.write_saber_header(saber)?)
763        } else {
764            None
765        };
766
767        // Phase 2: Write array data (faces, constraints, bones, verts, etc.) after all headers.
768        //
769        // Light flare arrays are written first (non-mesh type with CExoArrayList data).
770        if let (MdlNodeData::Light(light), Some(lhp)) = (&node.node_data, light_header_pos) {
771            self.write_light_arrays(light, lhp)?;
772        }
773        // Skin nodes are ordered specially: skin-bone arrays are emitted before
774        // TriMesh internal arrays in vanilla binaries. This ordering affects
775        // TriMesh pointer fields (+0xB0/+0xBC/+0xC8) and must match for
776        // roundtrip parity.
777        if let (MdlNodeData::Skin(skin), Some(shp)) = (&node.node_data, skin_header_pos) {
778            self.write_skin_arrays(skin, shp)?;
779        }
780        if let (MdlNodeData::Saber(saber), Some(shp)) = (&node.node_data, saber_header_pos) {
781            self.write_saber_arrays(saber, shp)?;
782        }
783        if let (MdlNodeData::Dangly(dangly), Some(dhp)) = (&node.node_data, dangly_header_pos) {
784            self.write_dangly_arrays(dangly, dhp)?;
785            self.write_dangly_pre_mesh_payload(dangly, dhp)?;
786        }
787        if let (MdlNodeData::Aabb(aabb), Some(ahp)) = (&node.node_data, aabb_header_pos) {
788            self.write_aabb_arrays(aabb, ahp)?;
789        }
790        let embedded_pos_ptr =
791            if let (Some(mesh), Some(mhp)) = (node.node_data.mesh(), mesh_header_pos) {
792                self.write_mesh_arrays(mesh, mhp)?
793            } else {
794                None
795            };
796        if let (MdlNodeData::AnimMesh(anim), Some(ahp)) = (&node.node_data, anim_header_pos) {
797            self.write_anim_mesh_arrays(anim, ahp)?;
798        }
799        // Phase 2b: Write position-only vertex data into MDL content blob.
800        //
801        // Embedded vertex positions are ALWAYS written (even when MDX is also
802        // being generated). The vert_array_offset field (+0x148) is a
803        // content-relative pointer to this data, and the engine relocates it
804        // during Reset. The separate mdx_data_offset (+0x144) points into the
805        // MDX file for the full interleaved vertex buffer.
806        if let (Some(mesh), Some(mhp)) = (node.node_data.mesh(), mesh_header_pos) {
807            self.write_mesh_content_positions(mesh, mhp, embedded_pos_ptr)?;
808        }
809
810        // Phase 3: Defer MDX vertex data (if enabled).
811        // MDX data is written after all MDL nodes are complete, in canonical
812        // order: non-skin meshes first, then skin meshes (both in DFS order).
813        // See mdl_mdx.md §MDX Per-Mesh Terminator Rows (Finding 4).
814        //
815        // Saber nodes are excluded: they have stride=0 and no MDX vertex data.
816        // Saber vertex positions live in the MDL content blob (vert_array_offset),
817        // not in the MDX file.
818        if self.write_mdx && !node.is_saber() {
819            if let Some(mhp) = mesh_header_pos {
820                if node.node_data.mesh().is_some() {
821                    let dfs_index = self.deferred_mdx.len();
822                    self.deferred_mdx.push(DeferredMdxEntry {
823                        mesh_header_pos: mhp,
824                        skin_header_pos,
825                        is_skin: node.is_skin(),
826                        dfs_index,
827                    });
828                }
829            }
830        }
831
832        // Write Children (pointer table + recursive child payloads) before
833        // controller arrays to match vanilla binary node packing.
834        let child_count = count_u32(node.children.len(), "child_count")?;
835        let mut child_array_ptr = 0;
836
837        if child_count > 0 {
838            // Reserve Child Pointer Array
839            let array_start = self.buffer.position();
840            child_array_ptr = self.content_position()?;
841            let child_arr_bytes =
842                usize::try_from(child_count).expect("child_count fits in usize") * 4;
843            self.buffer.write_all(&vec![0u8; child_arr_bytes])?;
844
845            // Recurse for each child
846            for (i, child) in node.children.iter().enumerate() {
847                let child_offset = self.write_node(child, Some(node_start_offset))?;
848
849                // Backpatch pointer
850                let save = self.buffer.position();
851                // i is bounded by child_count (verified to fit u32 above).
852                self.seek_to(array_start, i * 4);
853                write_u32(&mut self.buffer, child_offset)?;
854                self.buffer.set_position(save);
855            }
856        }
857
858        // Write Controllers after child payloads.
859        let (key_ptr, key_count, data_ptr, data_count) =
860            self.write_controllers(&node.controllers, &node.orphan_controller_data)?;
861
862        // Backpatch Node Header
863        let save_end = self.buffer.position();
864        self.buffer.set_position(header_pos);
865
866        write_u16(
867            &mut self.buffer,
868            u16::try_from(node.node_data.flags())
869                .map_err(|_| MdlError::ValueOverflow("node_flags"))?,
870        )?; // 0x00
871        self.buffer.write_all(&node.header_padding_02)?; // 0x02..0x03
872
873        let node_id = *self.name_map.get(&node.name).unwrap_or(&0);
874        write_u16(&mut self.buffer, node_id)?; // 0x04
875
876        // +0x06: struct alignment padding.
877        self.buffer.write_all(&node.header_padding_06)?; // 0x06..0x07
878
879        // +0x08: binary MDL files preserve this slot as 0 in vanilla assets.
880        // Node naming is resolved via node_id + global name table.
881        write_u32(&mut self.buffer, 0)?; // 0x08..0x0B
882
883        // +0x0C: parent node offset (content-relative, relocated if non-zero).
884        // Root nodes have no parent (0). See mdl_mdx.md §MdlNode Binary Layout.
885        write_u32(&mut self.buffer, parent_node_offset.unwrap_or(0))?; // 0x0C..0x0F
886
887        self.seek_to(header_pos, node_offsets::POS_X);
888        write_f32(&mut self.buffer, node.position[0])?;
889        write_f32(&mut self.buffer, node.position[1])?;
890        write_f32(&mut self.buffer, node.position[2])?;
891
892        // Orientation quaternion (w, x, y, z) - direct, no swizzle
893        self.seek_to(header_pos, node_offsets::ORIENTATION_W);
894        write_f32(&mut self.buffer, node.rotation[0])?; // w
895        write_f32(&mut self.buffer, node.rotation[1])?; // x
896        write_f32(&mut self.buffer, node.rotation[2])?; // y
897        write_f32(&mut self.buffer, node.rotation[3])?; // z
898
899        // 3-field arrays: ptr, count_used, count_allocated (alloc = used on disk)
900        self.seek_to(header_pos, node_offsets::CHILD_ARRAY_PTR);
901        let child_ptr_field = if child_count > 0 {
902            child_array_ptr
903        } else if key_count > 0 {
904            // Vanilla binaries often mirror controller-key pointer here even
905            // when child_count is zero.
906            key_ptr
907        } else {
908            0
909        };
910        write_u32(&mut self.buffer, child_ptr_field)?;
911        write_u32(&mut self.buffer, child_count)?; // used
912        write_u32(&mut self.buffer, child_count)?; // allocated
913
914        self.seek_to(header_pos, node_offsets::CONTROLLER_KEY_PTR);
915        write_u32(&mut self.buffer, key_ptr)?;
916        write_u32(&mut self.buffer, key_count)?; // used
917        write_u32(&mut self.buffer, key_count)?; // allocated
918
919        self.seek_to(header_pos, node_offsets::CONTROLLER_DATA_PTR);
920        write_u32(&mut self.buffer, data_ptr)?;
921        write_u32(&mut self.buffer, data_count)?; // used
922        write_u32(&mut self.buffer, data_count)?; // allocated
923
924        self.buffer.set_position(save_end);
925        Ok(node_start_offset)
926    }
927
928    /// Writes a f32 at `header_pos + field_offset` without moving the cursor.
929    fn write_f32_at(
930        &mut self,
931        header_pos: u64,
932        field_offset: usize,
933        val: f32,
934    ) -> Result<(), MdlError> {
935        let save = self.buffer.position();
936        self.seek_to(header_pos, field_offset);
937        write_f32(&mut self.buffer, val)?;
938        self.buffer.set_position(save);
939        Ok(())
940    }
941
942    /// Writes a u32 at `header_pos + field_offset` without moving the cursor.
943    fn write_u32_at(
944        &mut self,
945        header_pos: u64,
946        field_offset: usize,
947        val: u32,
948    ) -> Result<(), MdlError> {
949        let save = self.buffer.position();
950        self.seek_to(header_pos, field_offset);
951        write_u32(&mut self.buffer, val)?;
952        self.buffer.set_position(save);
953        Ok(())
954    }
955
956    /// Writes an i32 at `header_pos + field_offset` without moving the cursor.
957    fn write_i32_at(
958        &mut self,
959        header_pos: u64,
960        field_offset: usize,
961        val: i32,
962    ) -> Result<(), MdlError> {
963        let save = self.buffer.position();
964        self.seek_to(header_pos, field_offset);
965        write_i32(&mut self.buffer, val)?;
966        self.buffer.set_position(save);
967        Ok(())
968    }
969
970    /// Writes a u16 at `header_pos + field_offset` without moving the cursor.
971    fn write_u16_at(
972        &mut self,
973        header_pos: u64,
974        field_offset: usize,
975        val: u16,
976    ) -> Result<(), MdlError> {
977        let save = self.buffer.position();
978        self.seek_to(header_pos, field_offset);
979        write_u16(&mut self.buffer, val)?;
980        self.buffer.set_position(save);
981        Ok(())
982    }
983
984    /// Writes a u8 at `header_pos + field_offset` without moving the cursor.
985    fn write_u8_at(
986        &mut self,
987        header_pos: u64,
988        field_offset: usize,
989        val: u8,
990    ) -> Result<(), MdlError> {
991        let save = self.buffer.position();
992        self.seek_to(header_pos, field_offset);
993        self.buffer.write_all(&[val])?;
994        self.buffer.set_position(save);
995        Ok(())
996    }
997
998    /// Writes a 3×f32 vector at `header_pos + field_offset` without moving the cursor.
999    fn write_vec3_at(
1000        &mut self,
1001        header_pos: u64,
1002        field_offset: usize,
1003        v: &[f32; 3],
1004    ) -> Result<(), MdlError> {
1005        let save = self.buffer.position();
1006        self.seek_to(header_pos, field_offset);
1007        write_f32(&mut self.buffer, v[0])?;
1008        write_f32(&mut self.buffer, v[1])?;
1009        write_f32(&mut self.buffer, v[2])?;
1010        self.buffer.set_position(save);
1011        Ok(())
1012    }
1013
1014    /// Writes a null-terminated string at `header_pos + field_offset`,
1015    /// padded with zeros to `max_len` bytes.
1016    fn write_string_at(
1017        &mut self,
1018        header_pos: u64,
1019        field_offset: usize,
1020        s: &str,
1021        max_len: usize,
1022    ) -> Result<(), MdlError> {
1023        let save = self.buffer.position();
1024        self.seek_to(header_pos, field_offset);
1025        crate::binary::write_fixed_c_string(&mut self.buffer, s, max_len)?;
1026        self.buffer.set_position(save);
1027        Ok(())
1028    }
1029
1030    /// Writes the 332-byte mesh extra header from typed fields.
1031    ///
1032    /// Returns the header position for deferred backpatching by
1033    /// [`write_mesh_arrays`].
1034    fn write_mesh_header(&mut self, mesh: &super::MdlMesh) -> Result<u64, MdlError> {
1035        let header_pos = self.buffer.position();
1036        // Zero-filled 332-byte mesh extra header; typed fields are overlaid below.
1037        self.buffer.write_all(&[0u8; MESH_EXTRA_SIZE])?;
1038
1039        // Toolset function pointer stubs (extra +0x00, +0x04).
1040        self.write_u32_at(
1041            header_pos,
1042            mesh_offsets::FN_PTR_GEN_VERTICES,
1043            mesh.fn_ptr_gen_vertices,
1044        )?;
1045        self.write_u32_at(
1046            header_pos,
1047            mesh_offsets::FN_PTR_REMOVE_TEMP_ARRAY,
1048            mesh.fn_ptr_remove_temp_array,
1049        )?;
1050
1051        // Bounding box and sphere.
1052        self.write_vec3_at(header_pos, mesh_offsets::BOUNDING_MIN, &mesh.bounding_min)?;
1053        self.write_vec3_at(header_pos, mesh_offsets::BOUNDING_MAX, &mesh.bounding_max)?;
1054        self.write_f32_at(
1055            header_pos,
1056            mesh_offsets::BSPHERE_RADIUS,
1057            mesh.bsphere_radius,
1058        )?;
1059        self.write_vec3_at(
1060            header_pos,
1061            mesh_offsets::BSPHERE_CENTER,
1062            &mesh.bsphere_center,
1063        )?;
1064
1065        // Colors.
1066        self.write_vec3_at(header_pos, mesh_offsets::DIFFUSE_COLOR, &mesh.diffuse_color)?;
1067        self.write_vec3_at(header_pos, mesh_offsets::AMBIENT_COLOR, &mesh.ambient_color)?;
1068
1069        // Transparency.
1070        self.write_i32_at(
1071            header_pos,
1072            mesh_offsets::TRANSPARENCY_HINT,
1073            mesh.transparency_hint,
1074        )?;
1075
1076        // Texture names.
1077        self.write_string_at(
1078            header_pos,
1079            mesh_offsets::TEXTURE_0,
1080            &mesh.texture_0,
1081            mesh_offsets::TEXTURE_NAME_SIZE,
1082        )?;
1083        self.write_string_at(
1084            header_pos,
1085            mesh_offsets::TEXTURE_1,
1086            &mesh.texture_1,
1087            mesh_offsets::TEXTURE_NAME_SIZE,
1088        )?;
1089
1090        // Shared index scalars.
1091        {
1092            let save = self.buffer.position();
1093            self.seek_to(header_pos, mesh_offsets::SHARED_INDEX_OFFSET);
1094            write_i32(&mut self.buffer, mesh.shared_index_offset)?;
1095            write_i32(&mut self.buffer, mesh.shared_index_pool)?;
1096            write_i32(&mut self.buffer, mesh.shared_index_size)?;
1097            write_u32(&mut self.buffer, mesh.indices_per_face)?;
1098            self.buffer.set_position(save);
1099        }
1100
1101        // UV animation.
1102        self.write_i32_at(header_pos, mesh_offsets::ANIMATE_UV, mesh.animate_uv)?;
1103        self.write_f32_at(
1104            header_pos,
1105            mesh_offsets::UV_DIRECTION_X,
1106            mesh.uv_direction_x,
1107        )?;
1108        self.write_f32_at(
1109            header_pos,
1110            mesh_offsets::UV_DIRECTION_Y,
1111            mesh.uv_direction_y,
1112        )?;
1113        self.write_f32_at(header_pos, mesh_offsets::UV_JITTER, mesh.uv_jitter)?;
1114        self.write_f32_at(
1115            header_pos,
1116            mesh_offsets::UV_JITTER_SPEED,
1117            mesh.uv_jitter_speed,
1118        )?;
1119
1120        // Vertex count and channel count.
1121        self.write_u16_at(header_pos, mesh_offsets::VERTEX_COUNT, mesh.vertex_count)?;
1122        self.write_u16_at(
1123            header_pos,
1124            mesh_offsets::TEXTURE_CHANNEL_COUNT,
1125            mesh.texture_channel_count,
1126        )?;
1127
1128        // Boolean flags.
1129        self.write_u8_at(
1130            header_pos,
1131            mesh_offsets::LIGHT_MAPPED,
1132            u8::from(mesh.light_mapped),
1133        )?;
1134        self.write_u8_at(
1135            header_pos,
1136            mesh_offsets::ROTATE_TEXTURE,
1137            u8::from(mesh.rotate_texture),
1138        )?;
1139        self.write_u8_at(
1140            header_pos,
1141            mesh_offsets::IS_BACKGROUND_GEOMETRY,
1142            u8::from(mesh.is_background_geometry),
1143        )?;
1144        self.write_u8_at(header_pos, mesh_offsets::SHADOW, u8::from(mesh.shadow))?;
1145        self.write_u8_at(header_pos, mesh_offsets::BEAMING, u8::from(mesh.beaming))?;
1146        self.write_u8_at(header_pos, mesh_offsets::RENDER, u8::from(mesh.render))?;
1147
1148        // Total surface area.
1149        self.write_f32_at(
1150            header_pos,
1151            mesh_offsets::TOTAL_SURFACE_AREA,
1152            mesh.total_surface_area,
1153        )?;
1154
1155        // MDX layout sentinel defaults for offset slots that are always -1
1156        // in vanilla. The active slots (+0xFC stride, +0x100 flags,
1157        // +0x104..+0x120 attribute offsets) are filled by write_mesh_mdx_data.
1158        // The 3 reserved slots at +0x124..+0x12C are never used by K1 but
1159        // the constructor initializes them to -1.
1160        {
1161            let save = self.buffer.position();
1162            // 8 active attribute offset slots (+0x104..+0x120) default to -1.
1163            // write_mesh_mdx_data overwrites the ones it uses.
1164            self.seek_to(header_pos, mesh_offsets::MDX_POSITION_OFFSET);
1165            for _ in 0..8 {
1166                write_u32(&mut self.buffer, 0xFFFF_FFFF)?;
1167            }
1168            // 3 reserved slots at +0x124..+0x12C: always -1.
1169            for _ in 0..3 {
1170                write_u32(&mut self.buffer, 0xFFFF_FFFF)?;
1171            }
1172            self.buffer.set_position(save);
1173        }
1174
1175        Ok(header_pos)
1176    }
1177
1178    /// Writes a raw CExoArrayList payload and backpatches the ptr/count/alloc
1179    /// Writes face array data and backpatches the mesh header's face pointer.
1180    ///
1181    /// Must be called after all contiguous headers (mesh + subtype) are written,
1182    /// so the array data sits after the header block - matching the binary format
1183    /// layout where headers are contiguous and data follows.
1184    fn write_mesh_arrays(
1185        &mut self,
1186        mesh: &super::MdlMesh,
1187        mesh_header_pos: u64,
1188    ) -> Result<Option<u32>, MdlError> {
1189        let face_count = count_u32(mesh.faces.len(), "face_count")?;
1190        let mut face_offset = 0u32;
1191
1192        if face_count > 0 {
1193            face_offset = self.content_position()?;
1194            for face in &mesh.faces {
1195                write_f32(&mut self.buffer, face.plane_normal[0])?;
1196                write_f32(&mut self.buffer, face.plane_normal[1])?;
1197                write_f32(&mut self.buffer, face.plane_normal[2])?;
1198                write_f32(&mut self.buffer, face.plane_distance)?;
1199                write_u32(&mut self.buffer, face.surface_id)?;
1200                write_u16(&mut self.buffer, face.adjacent[0])?;
1201                write_u16(&mut self.buffer, face.adjacent[1])?;
1202                write_u16(&mut self.buffer, face.adjacent[2])?;
1203                write_u16(&mut self.buffer, face.vertex_indices[0])?;
1204                write_u16(&mut self.buffer, face.vertex_indices[1])?;
1205                write_u16(&mut self.buffer, face.vertex_indices[2])?;
1206            }
1207        }
1208
1209        // Backpatch face CExoArrayList: ptr, count, alloc
1210        let save_end = self.buffer.position();
1211        self.seek_to(mesh_header_pos, mesh_offsets::FACE_ARRAY_OFFSET);
1212        write_u32(&mut self.buffer, face_offset)?;
1213        write_u32(&mut self.buffer, face_count)?;
1214        write_u32(&mut self.buffer, face_count)?; // alloc = count on disk
1215        self.buffer.set_position(save_end);
1216
1217        // --- Write TriMesh internal CExoArrayList data payloads ---
1218        //
1219        // The 5 CExoArrayLists at +0x98..+0xC8:
1220        //   +0x98 vertex_indices - dead in KotOR, always zeros
1221        //   +0xA4 left_over_faces - always empty in vanilla (ptr=0, count=0)
1222        //   +0xB0 vertex_indices_count - single u32 (face_count*3) or embedded positions
1223        //   +0xBC mdx_offsets - single u32 content pointer -> packed u16 face indices
1224        //   +0xC8 index_buffer_pools - single u32 inverted counter
1225        //
1226        // Corpus analysis (76,767 meshes): vertex_indices and left_over_faces
1227        // are ALWAYS ptr=0 in vanilla. Only the last 3 have data payloads.
1228        //
1229        // Write ordering for the 3 active arrays: sort by source pointers when
1230        // available (vanilla-backed), otherwise use default order.
1231        // Packed u16 face indices are written after all CExoArrayList payloads,
1232        // then the mdx_offsets placeholder is backpatched to point to them.
1233        //
1234        // See `docs/notes/mesh_derived_fields.md` for full documentation.
1235
1236        // +0x98: vertex_indices - dead field, always zeros (already zeroed from init).
1237
1238        // +0xA4: left_over_faces - always empty in vanilla binary files.
1239        self.backpatch_cexolist(
1240            mesh_header_pos,
1241            mesh_offsets::LEFT_OVER_FACES_ARRAY_PTR,
1242            0,
1243            0,
1244            0,
1245        )?;
1246
1247        // Write the 3 active CExoArrayList data payloads in canonical order.
1248        let vertex_count = usize::from(mesh.vertex_count);
1249        let can_embed_positions = mesh.has_embedded_positions
1250            && !mesh.positions.is_empty()
1251            && mesh.positions.len() >= vertex_count;
1252
1253        let mut vertex_indices_count_data_ptr = 0u32;
1254
1255        // +0xB0: vertex_indices_count - single u32 (face_count*3) or embedded positions.
1256        if can_embed_positions {
1257            let ptr = self.content_position()?;
1258            vertex_indices_count_data_ptr = ptr;
1259            write_u32(&mut self.buffer, face_count * 3)?;
1260            for pos in mesh.positions.iter().take(vertex_count) {
1261                write_f32(&mut self.buffer, pos[0])?;
1262                write_f32(&mut self.buffer, pos[1])?;
1263                write_f32(&mut self.buffer, pos[2])?;
1264            }
1265            self.backpatch_cexolist(
1266                mesh_header_pos,
1267                mesh_offsets::VERTEX_INDICES_COUNT_ARRAY_PTR,
1268                ptr,
1269                1,
1270                1,
1271            )?;
1272        } else if face_count > 0 {
1273            let ptr = self.content_position()?;
1274            vertex_indices_count_data_ptr = ptr;
1275            write_u32(&mut self.buffer, face_count * 3)?;
1276            self.backpatch_cexolist(
1277                mesh_header_pos,
1278                mesh_offsets::VERTEX_INDICES_COUNT_ARRAY_PTR,
1279                ptr,
1280                1,
1281                1,
1282            )?;
1283        } else {
1284            self.backpatch_cexolist(
1285                mesh_header_pos,
1286                mesh_offsets::VERTEX_INDICES_COUNT_ARRAY_PTR,
1287                0,
1288                0,
1289                0,
1290            )?;
1291        }
1292
1293        // +0xBC: mdx_offsets - single u32 content pointer to packed u16 face indices.
1294        if face_count > 0 {
1295            let ptr = self.content_position()?;
1296            write_u32(&mut self.buffer, 0)?; // placeholder, backpatched below
1297            self.backpatch_cexolist(
1298                mesh_header_pos,
1299                mesh_offsets::MDX_OFFSETS_ARRAY_PTR,
1300                ptr,
1301                1,
1302                1,
1303            )?;
1304        } else {
1305            self.backpatch_cexolist(
1306                mesh_header_pos,
1307                mesh_offsets::MDX_OFFSETS_ARRAY_PTR,
1308                0,
1309                0,
1310                0,
1311            )?;
1312        }
1313
1314        // +0xC8: index_buffer_pools - inverted counter value.
1315        if mesh.inverted_counter > 0 || face_count > 0 {
1316            let ptr = self.content_position()?;
1317            write_u32(&mut self.buffer, mesh.inverted_counter)?;
1318            self.backpatch_cexolist(
1319                mesh_header_pos,
1320                mesh_offsets::INDEX_BUFFER_POOLS_ARRAY_PTR,
1321                ptr,
1322                1,
1323                1,
1324            )?;
1325        } else {
1326            self.backpatch_cexolist(
1327                mesh_header_pos,
1328                mesh_offsets::INDEX_BUFFER_POOLS_ARRAY_PTR,
1329                0,
1330                0,
1331                0,
1332            )?;
1333        }
1334
1335        // Write packed u16 face indices after all CExoArrayList payloads,
1336        // then backpatch the mdx_offsets placeholder to point here.
1337        if face_count > 0 {
1338            let packed_ptr = self.content_position()?;
1339            for face in &mesh.faces {
1340                write_u16(&mut self.buffer, face.vertex_indices[0])?;
1341                write_u16(&mut self.buffer, face.vertex_indices[1])?;
1342                write_u16(&mut self.buffer, face.vertex_indices[2])?;
1343            }
1344            self.backpatch_mdx_offsets_data(mesh_header_pos, packed_ptr)?;
1345        }
1346
1347        // NOTE: shared_index_offset/pool/size/indices_per_face already written
1348        // in write_mesh_header() - no need to repeat here.
1349
1350        let embedded_pos_ptr = if can_embed_positions && vertex_indices_count_data_ptr > 0 {
1351            Some(vertex_indices_count_data_ptr.saturating_add(4))
1352        } else {
1353            None
1354        };
1355
1356        Ok(embedded_pos_ptr)
1357    }
1358
1359    /// Backpatch a CExoArrayList header (ptr, count, alloc) at a mesh header offset.
1360    fn backpatch_cexolist(
1361        &mut self,
1362        mesh_header_pos: u64,
1363        ptr_field: usize,
1364        ptr: u32,
1365        count: u32,
1366        alloc: u32,
1367    ) -> Result<(), MdlError> {
1368        let save = self.buffer.position();
1369        self.seek_to(mesh_header_pos, ptr_field);
1370        write_u32(&mut self.buffer, ptr)?;
1371        write_u32(&mut self.buffer, count)?;
1372        write_u32(&mut self.buffer, alloc)?;
1373        self.buffer.set_position(save);
1374        Ok(())
1375    }
1376
1377    /// Backpatch the mdx_offsets (+0xBC) data value to point to packed face indices.
1378    ///
1379    /// The mdx_offsets CExoArrayList data is a single u32 containing a
1380    /// content-relative pointer to the packed u16 face vertex indices.
1381    /// The data payload was written as a placeholder 0 during the ordering loop.
1382    fn backpatch_mdx_offsets_data(
1383        &mut self,
1384        mesh_header_pos: u64,
1385        packed_ptr: u32,
1386    ) -> Result<(), MdlError> {
1387        // Read the mdx_offsets data pointer from the header.
1388        let save = self.buffer.position();
1389        self.seek_to(mesh_header_pos, mesh_offsets::MDX_OFFSETS_ARRAY_PTR);
1390        let mut ptr_bytes = [0u8; 4];
1391        std::io::Read::read_exact(&mut self.buffer, &mut ptr_bytes)?;
1392        let data_ptr = u32::from_le_bytes(ptr_bytes);
1393        if data_ptr > 0 {
1394            // Backpatch the placeholder u32 at the data location.
1395            self.buffer
1396                .set_position(u64::from(data_ptr) + MDL_WRAPPER_SIZE);
1397            write_u32(&mut self.buffer, packed_ptr)?;
1398        }
1399        self.buffer.set_position(save);
1400        Ok(())
1401    }
1402
1403    /// Writes position-only vertex data into the MDL content blob and backpatches
1404    /// the mesh header's `mdx_data_offset` field (+0x148) to point to it.
1405    ///
1406    /// The binary format stores position data (12 bytes/vertex = 3×f32) within
1407    /// the MDL content blob, referenced by `mdx_data_offset` which is an MDL
1408    /// content-relative offset (NOT an MDX file offset). The engine relocates
1409    /// this pointer against the MDL content base during Reset.
1410    ///
1411    /// See `docs/notes/mdl_mdx.md` -- MDX Data Offset Semantics.
1412    fn write_mesh_content_positions(
1413        &mut self,
1414        mesh: &super::MdlMesh,
1415        mesh_header_pos: u64,
1416        embedded_pos_ptr: Option<u32>,
1417    ) -> Result<(), MdlError> {
1418        if let Some(pos_ptr) = embedded_pos_ptr {
1419            let save = self.buffer.position();
1420            self.seek_to(mesh_header_pos, mesh_offsets::VERT_ARRAY_OFFSET);
1421            write_u32(&mut self.buffer, pos_ptr)?;
1422            self.buffer.set_position(save);
1423            return Ok(());
1424        }
1425
1426        if mesh.positions.is_empty() {
1427            return Ok(());
1428        }
1429
1430        // Write position data into MDL content buffer.
1431        let pos_start = self.content_position()?; // content-relative
1432
1433        for pos in &mesh.positions {
1434            write_f32(&mut self.buffer, pos[0])?;
1435            write_f32(&mut self.buffer, pos[1])?;
1436            write_f32(&mut self.buffer, pos[2])?;
1437        }
1438
1439        // Backpatch vert_array_offset at mesh header +0x148.
1440        let save = self.buffer.position();
1441        self.seek_to(mesh_header_pos, mesh_offsets::VERT_ARRAY_OFFSET);
1442        write_u32(&mut self.buffer, pos_start)?;
1443        self.buffer.set_position(save);
1444
1445        Ok(())
1446    }
1447
1448    /// Writes vertex attribute data into the MDX buffer and backpatches the mesh
1449    /// header's MDX-related fields (stride, flags, attribute offsets).
1450    ///
1451    /// Returns the computed stride (bytes per vertex). Returns 0 if no vertex
1452    /// data was written.
1453    ///
1454    /// The canonical layout order follows the engine's `InternalPostProcess`:
1455    /// position -> normal -> vertex_colors -> UV1 -> UV2 -> UV3 -> UV4 -> tangent_space.
1456    fn write_mesh_mdx_data(
1457        &mut self,
1458        mesh: &super::MdlMesh,
1459        skin: Option<&super::MdlSkin>,
1460        mesh_header_pos: u64,
1461        skin_header_pos: Option<u64>,
1462    ) -> Result<u32, MdlError> {
1463        #[derive(Clone, Copy)]
1464        struct MdxLayout {
1465            stride: u32,
1466            flags: u32,
1467            pos_off: i32,
1468            norm_off: i32,
1469            color_off: i32,
1470            uv1_off: i32,
1471            uv2_off: i32,
1472            uv3_off: i32,
1473            uv4_off: i32,
1474            tangent_off: i32,
1475            bone_weights_off: i32,
1476            bone_indices_off: i32,
1477        }
1478
1479        let vertex_count = usize::from(mesh.vertex_count);
1480        if vertex_count == 0 {
1481            return Ok(0);
1482        }
1483
1484        // Determine the effective vertex count for MDX output.
1485        //
1486        // The mesh header declares `vertex_count` which may be larger than
1487        // the number of vertices we actually have data for. This happens in
1488        // vanilla K1 item models (e.g., i_adrnaline_001) where the MDX offset
1489        // points deep into a shared MDX buffer and the reader truncates when
1490        // data runs out. The writer must only emit the vertices we have.
1491        //
1492        // All populated attribute arrays must have the same length - if they
1493        // disagree, that's an actual data error.
1494        let mut effective_vertex_count = vertex_count;
1495        macro_rules! clamp_attr {
1496            ($arr:expr, $name:expr) => {
1497                if !$arr.is_empty() {
1498                    if effective_vertex_count == vertex_count {
1499                        // First populated attribute sets the bound
1500                        effective_vertex_count = $arr.len();
1501                    } else if $arr.len() != effective_vertex_count {
1502                        return Err(MdlError::InvalidData(format!(
1503                            "mesh {}: {} has {} elements but {} has {}",
1504                            $name,
1505                            stringify!($arr),
1506                            $arr.len(),
1507                            "other attribute",
1508                            effective_vertex_count
1509                        )));
1510                    }
1511                }
1512            };
1513        }
1514        clamp_attr!(mesh.positions, "positions");
1515        clamp_attr!(mesh.normals, "normals");
1516        clamp_attr!(mesh.vertex_colors, "vertex_colors");
1517        clamp_attr!(mesh.uv1, "uv1");
1518        clamp_attr!(mesh.uv2, "uv2");
1519        clamp_attr!(mesh.uv3, "uv3");
1520        clamp_attr!(mesh.uv4, "uv4");
1521        clamp_attr!(mesh.tangent_space, "tangent_space");
1522        if let Some(s) = skin {
1523            clamp_attr!(s.bone_weights, "bone_weights");
1524            clamp_attr!(s.bone_indices, "bone_indices");
1525        }
1526
1527        // Determine which attributes are present.
1528        let has_pos = !mesh.positions.is_empty();
1529        let has_norm = !mesh.normals.is_empty();
1530        let has_color = !mesh.vertex_colors.is_empty();
1531        let has_uv1 = !mesh.uv1.is_empty();
1532        let has_uv2 = !mesh.uv2.is_empty();
1533        let has_uv3 = !mesh.uv3.is_empty();
1534        let has_uv4 = !mesh.uv4.is_empty();
1535        let has_tangent = !mesh.tangent_space.is_empty();
1536        let has_bone_weights = skin.is_some_and(|s| !s.bone_weights.is_empty());
1537        let has_bone_indices = skin.is_some_and(|s| !s.bone_indices.is_empty());
1538
1539        // Compute canonical layout: position -> normal -> color -> UV1..4 -> tangent -> bone weights -> bone indices.
1540        // Each attribute gets a byte offset within the per-vertex stride.
1541        let mut current_byte = 0u32;
1542
1543        let pos_off: i32 = if has_pos {
1544            let o = i32::try_from(current_byte)
1545                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1546            current_byte += 12;
1547            o
1548        } else {
1549            -1
1550        };
1551        let norm_off: i32 = if has_norm {
1552            let o = i32::try_from(current_byte)
1553                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1554            current_byte += 12;
1555            o
1556        } else {
1557            -1
1558        };
1559        let color_off: i32 = if has_color {
1560            let o = i32::try_from(current_byte)
1561                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1562            current_byte += 4;
1563            o
1564        } else {
1565            -1
1566        };
1567        let uv1_off: i32 = if has_uv1 {
1568            let o = i32::try_from(current_byte)
1569                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1570            current_byte += 8;
1571            o
1572        } else {
1573            -1
1574        };
1575        let uv2_off: i32 = if has_uv2 {
1576            let o = i32::try_from(current_byte)
1577                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1578            current_byte += 8;
1579            o
1580        } else {
1581            -1
1582        };
1583        let uv3_off: i32 = if has_uv3 {
1584            let o = i32::try_from(current_byte)
1585                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1586            current_byte += 8;
1587            o
1588        } else {
1589            -1
1590        };
1591        let uv4_off: i32 = if has_uv4 {
1592            let o = i32::try_from(current_byte)
1593                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1594            current_byte += 8;
1595            o
1596        } else {
1597            -1
1598        };
1599        let tangent_off: i32 = if has_tangent {
1600            let o = i32::try_from(current_byte)
1601                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1602            current_byte += 36;
1603            o
1604        } else {
1605            -1
1606        };
1607        let bone_weights_off: i32 = if has_bone_weights {
1608            let o = i32::try_from(current_byte)
1609                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1610            current_byte += 16;
1611            o
1612        } else {
1613            -1
1614        };
1615        let bone_indices_off: i32 = if has_bone_indices {
1616            let o = i32::try_from(current_byte)
1617                .map_err(|_| MdlError::ValueOverflow("mdx_attribute_offset"))?;
1618            current_byte += 16;
1619            o
1620        } else {
1621            -1
1622        };
1623
1624        let canonical_stride = current_byte;
1625
1626        // Compute flags (vertex colors have no flag bit - presence is
1627        // determined by offset != -1, per InternalPostProcess evidence).
1628        let mut canonical_flags = 0u32;
1629        if has_pos {
1630            canonical_flags |= 0x01;
1631        }
1632        if has_norm {
1633            canonical_flags |= 0x20;
1634        }
1635        if has_uv1 {
1636            canonical_flags |= 0x02;
1637        }
1638        if has_uv2 {
1639            canonical_flags |= 0x04;
1640        }
1641        if has_uv3 {
1642            canonical_flags |= 0x08;
1643        }
1644        if has_uv4 {
1645            canonical_flags |= 0x10;
1646        }
1647        if has_tangent {
1648            canonical_flags |= 0x80;
1649        }
1650
1651        let layout = MdxLayout {
1652            stride: canonical_stride,
1653            flags: canonical_flags,
1654            pos_off,
1655            norm_off,
1656            color_off,
1657            uv1_off,
1658            uv2_off,
1659            uv3_off,
1660            uv4_off,
1661            tangent_off,
1662            bone_weights_off,
1663            bone_indices_off,
1664        };
1665
1666        if layout.stride == 0 {
1667            return Ok(0); // No attributes to write
1668        }
1669
1670        // Canonical interleaved write: position -> normal -> color -> UV1..4 -> tangent -> bone weights -> bone indices.
1671        for i in 0..effective_vertex_count {
1672            if has_pos {
1673                let p = &mesh.positions[i];
1674                write_f32(&mut self.mdx_buffer, p[0])?;
1675                write_f32(&mut self.mdx_buffer, p[1])?;
1676                write_f32(&mut self.mdx_buffer, p[2])?;
1677            }
1678            if has_norm {
1679                let n = &mesh.normals[i];
1680                write_f32(&mut self.mdx_buffer, n[0])?;
1681                write_f32(&mut self.mdx_buffer, n[1])?;
1682                write_f32(&mut self.mdx_buffer, n[2])?;
1683            }
1684            if has_color {
1685                let c = &mesh.vertex_colors[i];
1686                self.mdx_buffer.write_all(c)?;
1687            }
1688            if has_uv1 {
1689                let u = &mesh.uv1[i];
1690                write_f32(&mut self.mdx_buffer, u[0])?;
1691                write_f32(&mut self.mdx_buffer, u[1])?;
1692            }
1693            if has_uv2 {
1694                let u = &mesh.uv2[i];
1695                write_f32(&mut self.mdx_buffer, u[0])?;
1696                write_f32(&mut self.mdx_buffer, u[1])?;
1697            }
1698            if has_uv3 {
1699                let u = &mesh.uv3[i];
1700                write_f32(&mut self.mdx_buffer, u[0])?;
1701                write_f32(&mut self.mdx_buffer, u[1])?;
1702            }
1703            if has_uv4 {
1704                let u = &mesh.uv4[i];
1705                write_f32(&mut self.mdx_buffer, u[0])?;
1706                write_f32(&mut self.mdx_buffer, u[1])?;
1707            }
1708            if has_tangent {
1709                let t = &mesh.tangent_space[i];
1710                for row in t {
1711                    write_f32(&mut self.mdx_buffer, row[0])?;
1712                    write_f32(&mut self.mdx_buffer, row[1])?;
1713                    write_f32(&mut self.mdx_buffer, row[2])?;
1714                }
1715            }
1716            if has_bone_weights {
1717                // skin is guaranteed Some when has_bone_weights is true.
1718                let bw = &skin
1719                    .expect("has_bone_weights implies skin is Some")
1720                    .bone_weights[i];
1721                write_f32(&mut self.mdx_buffer, bw[0])?;
1722                write_f32(&mut self.mdx_buffer, bw[1])?;
1723                write_f32(&mut self.mdx_buffer, bw[2])?;
1724                write_f32(&mut self.mdx_buffer, bw[3])?;
1725            }
1726            if has_bone_indices {
1727                // skin is guaranteed Some when has_bone_indices is true.
1728                let bi = &skin
1729                    .expect("has_bone_indices implies skin is Some")
1730                    .bone_indices[i];
1731                write_f32(&mut self.mdx_buffer, bi[0])?;
1732                write_f32(&mut self.mdx_buffer, bi[1])?;
1733                write_f32(&mut self.mdx_buffer, bi[2])?;
1734                write_f32(&mut self.mdx_buffer, bi[3])?;
1735            }
1736        }
1737
1738        // Backpatch mesh header with MDX layout fields.
1739        let save = self.buffer.position();
1740
1741        // Stride at +0xFC
1742        self.seek_to(mesh_header_pos, mesh_offsets::VERTEX_STRUCT_SIZE);
1743        write_u32(&mut self.buffer, layout.stride)?;
1744
1745        // Flags at +0x100
1746        self.seek_to(mesh_header_pos, mesh_offsets::MDX_VERTEX_FLAGS);
1747        write_u32(&mut self.buffer, layout.flags)?;
1748
1749        // Per-attribute byte offsets at +0x104..+0x120 (contiguous)
1750        self.seek_to(mesh_header_pos, mesh_offsets::MDX_POSITION_OFFSET);
1751        write_i32(&mut self.buffer, layout.pos_off)?;
1752        write_i32(&mut self.buffer, layout.norm_off)?;
1753        write_i32(&mut self.buffer, layout.color_off)?;
1754        write_i32(&mut self.buffer, layout.uv1_off)?;
1755        write_i32(&mut self.buffer, layout.uv2_off)?;
1756        write_i32(&mut self.buffer, layout.uv3_off)?;
1757        write_i32(&mut self.buffer, layout.uv4_off)?;
1758        write_i32(&mut self.buffer, layout.tangent_off)?;
1759
1760        // NOTE: mdx_data_offset (+0x148) is backpatched by write_deferred_mdx()
1761        // with the MDX file offset, not set here. When not writing MDX, it's
1762        // backpatched by write_mesh_content_positions() with MDL content offset.
1763
1764        // Backpatch skin header bone weight/index offsets with the canonical
1765        // layout values computed above (the initial values from write_skin_header
1766        // may differ when the source model used a different attribute ordering).
1767        if let Some(shp) = skin_header_pos {
1768            self.write_i32_at(
1769                shp,
1770                skin_offsets::MDX_BONE_WEIGHTS_OFFSET,
1771                layout.bone_weights_off,
1772            )?;
1773            self.write_i32_at(
1774                shp,
1775                skin_offsets::MDX_BONE_INDICES_OFFSET,
1776                layout.bone_indices_off,
1777            )?;
1778        }
1779
1780        self.buffer.set_position(save);
1781        Ok(layout.stride)
1782    }
1783
1784    /// Writes a per-mesh terminator row into the MDX buffer.
1785    ///
1786    /// BioWare's build tools emit one stride-sized row of sentinel floats after
1787    /// each mesh's vertex data, followed by zero-padding to the next 16-byte
1788    /// boundary. Non-skin meshes use 10,000,000.0 as the sentinel; skin meshes
1789    /// use 1,000,000.0.
1790    ///
1791    /// See `docs/notes/mdl_mdx.md` §MDX Per-Mesh Terminator Rows.
1792    fn write_mdx_terminator(&mut self, stride: u32, is_skin: bool) -> Result<(), MdlError> {
1793        if stride == 0 {
1794            return Ok(());
1795        }
1796
1797        let stride = usize::try_from(stride).expect("u32 stride fits in usize");
1798        let sentinel: f32 = if is_skin { 1_000_000.0 } else { 10_000_000.0 };
1799
1800        // Write 3× sentinel float (position-like xyz), then zero-fill remaining bytes.
1801        write_f32(&mut self.mdx_buffer, sentinel)?;
1802        write_f32(&mut self.mdx_buffer, sentinel)?;
1803        write_f32(&mut self.mdx_buffer, sentinel)?;
1804
1805        // Fill the rest of the stride with zeros.
1806        let remaining = stride.saturating_sub(12);
1807        if remaining > 0 {
1808            let zeros = vec![0u8; remaining];
1809            self.mdx_buffer.write_all(&zeros)?;
1810        }
1811
1812        // Save position before alignment - the final mesh's padding will be
1813        // trimmed by truncating to this position after all nodes are written.
1814        self.mdx_pre_align_pos = self.mdx_buffer.position();
1815
1816        // Pad to 16-byte alignment (will be trimmed for the last mesh).
1817        let pos =
1818            usize::try_from(self.mdx_buffer.position()).expect("mdx buffer position fits in usize");
1819        let aligned = (pos + 15) & !15;
1820        let pad = aligned - pos;
1821        if pad > 0 {
1822            let zeros = vec![0u8; pad];
1823            self.mdx_buffer.write_all(&zeros)?;
1824        }
1825
1826        Ok(())
1827    }
1828
1829    /// Writes the 100-byte Skin extra header from typed fields.
1830    ///
1831    /// Emits 100 zero bytes then backpatches the inline scalar fields:
1832    /// MDX bone weight/index offsets (+0x0C/+0x10) and bone_node_numbers (+0x40).
1833    /// CExoArrayList pointers are backpatched later by [`write_skin_arrays`].
1834    fn write_skin_header(&mut self, skin: &super::MdlSkin) -> Result<u64, MdlError> {
1835        let header_pos = self.buffer.position();
1836        self.buffer.write_all(&[0u8; SKIN_EXTRA_SIZE])?;
1837
1838        let save = self.buffer.position();
1839
1840        // MDX bone weight/index offsets at +0x0C/+0x10.
1841        self.write_i32_at(
1842            header_pos,
1843            skin_offsets::MDX_BONE_WEIGHTS_OFFSET,
1844            skin.mdx_bone_weights_offset,
1845        )?;
1846        self.write_i32_at(
1847            header_pos,
1848            skin_offsets::MDX_BONE_INDICES_OFFSET,
1849            skin.mdx_bone_indices_offset,
1850        )?;
1851
1852        // bone_node_numbers: 16 × u16 at +0x40.
1853        self.seek_to(header_pos, skin_offsets::BONE_NODE_NUMBERS);
1854        for &n in &skin.bone_node_numbers {
1855            write_u16(&mut self.buffer, n)?;
1856        }
1857
1858        // +0x60..+0x63: Padding - write zeros (vanilla has leaked pointers here
1859        // in ~74 models, but the engine never reads this field).
1860
1861        self.buffer.set_position(save);
1862        Ok(header_pos)
1863    }
1864
1865    /// Writes the skin data arrays and backpatches the skin header pointers.
1866    fn write_skin_arrays(
1867        &mut self,
1868        skin: &super::MdlSkin,
1869        skin_header_pos: u64,
1870    ) -> Result<(), MdlError> {
1871        // Write bonemap (+0x14 pointer, +0x18 count).
1872        let bonemap_count = count_u32(skin.bonemap.len(), "bonemap_count")?;
1873        let mut bonemap_ptr = 0u32;
1874        if bonemap_count > 0 {
1875            bonemap_ptr = self.content_position()?;
1876            for &idx in &skin.bonemap {
1877                write_u32(&mut self.buffer, idx)?;
1878            }
1879        }
1880
1881        // Write qbone_ref_inv (Quaternion = 4 × f32 each)
1882        let qbone_count = count_u32(skin.qbone_ref_inv.len(), "qbone_count")?;
1883        let mut qbone_ptr = 0u32;
1884        if qbone_count > 0 {
1885            qbone_ptr = self.content_position()?;
1886            for q in &skin.qbone_ref_inv {
1887                write_f32(&mut self.buffer, q[0])?;
1888                write_f32(&mut self.buffer, q[1])?;
1889                write_f32(&mut self.buffer, q[2])?;
1890                write_f32(&mut self.buffer, q[3])?;
1891            }
1892        }
1893
1894        // Write tbone_ref_inv (Vector = 3 × f32 each)
1895        let tbone_count = count_u32(skin.tbone_ref_inv.len(), "tbone_count")?;
1896        let mut tbone_ptr = 0u32;
1897        if tbone_count > 0 {
1898            tbone_ptr = self.content_position()?;
1899            for t in &skin.tbone_ref_inv {
1900                write_f32(&mut self.buffer, t[0])?;
1901                write_f32(&mut self.buffer, t[1])?;
1902                write_f32(&mut self.buffer, t[2])?;
1903            }
1904        }
1905
1906        // Write bone_constant_indices (i32 each)
1907        let bone_idx_count = count_u32(skin.bone_constant_indices.len(), "bone_idx_count")?;
1908        let mut bone_idx_ptr = 0u32;
1909        if bone_idx_count > 0 {
1910            bone_idx_ptr = self.content_position()?;
1911            for &idx in &skin.bone_constant_indices {
1912                write_i32(&mut self.buffer, idx)?;
1913            }
1914        }
1915
1916        // Backpatch CExoArrayList headers: ptr, count, alloc
1917        let save_end = self.buffer.position();
1918
1919        // Weights CExoArrayList at +0x00: always zeros in binary files (already zeroed from init).
1920
1921        // bonemap pointer/count at +0x14/+0x18
1922        self.seek_to(skin_header_pos, skin_offsets::BONEMAP_PTR);
1923        write_u32(&mut self.buffer, bonemap_ptr)?;
1924        write_u32(&mut self.buffer, bonemap_count)?;
1925
1926        // qbone_ref_inv at skin extra +0x1C
1927        self.seek_to(skin_header_pos, skin_offsets::QBONE_REF_INV_PTR);
1928        write_u32(&mut self.buffer, qbone_ptr)?;
1929        write_u32(&mut self.buffer, qbone_count)?;
1930        write_u32(&mut self.buffer, qbone_count)?; // alloc = count on disk
1931
1932        // tbone_ref_inv at skin extra +0x28
1933        self.seek_to(skin_header_pos, skin_offsets::TBONE_REF_INV_PTR);
1934        write_u32(&mut self.buffer, tbone_ptr)?;
1935        write_u32(&mut self.buffer, tbone_count)?;
1936        write_u32(&mut self.buffer, tbone_count)?; // alloc = count on disk
1937
1938        // bone_constant_indices at skin extra +0x34
1939        self.seek_to(skin_header_pos, skin_offsets::BONE_CONSTANT_INDICES_PTR);
1940        write_u32(&mut self.buffer, bone_idx_ptr)?;
1941        write_u32(&mut self.buffer, bone_idx_count)?;
1942        write_u32(&mut self.buffer, bone_idx_count)?; // alloc = count on disk
1943
1944        self.buffer.set_position(save_end);
1945        Ok(())
1946    }
1947
1948    /// Writes the 56-byte AnimMesh extra header from typed fields.
1949    ///
1950    /// Returns the header position for deferred backpatching of the two
1951    /// CExoArrayList pointers by [`write_anim_mesh_arrays`].
1952    fn write_anim_mesh_header(&mut self, anim: &super::MdlAnimMesh) -> Result<u64, MdlError> {
1953        let header_pos = self.buffer.position();
1954
1955        // Write 56 zero bytes as base, then backpatch typed fields.
1956        self.buffer.write_all(&[0u8; ANIM_MESH_EXTRA_SIZE])?;
1957        let save = self.buffer.position();
1958
1959        // sample_period at +0x00
1960        self.write_f32_at(
1961            header_pos,
1962            anim_mesh_offsets::SAMPLE_PERIOD,
1963            anim.sample_period,
1964        )?;
1965        // CExoArrayList pointers at +0x04 and +0x10 are backpatched by write_anim_mesh_arrays.
1966
1967        // Runtime-only fields at +0x1C..+0x37 (always zero in authored files,
1968        // preserved for roundtrip fidelity).
1969        self.write_u32_at(header_pos, anim_mesh_offsets::DATA_PTR_1, anim.data_ptr_1)?;
1970        self.write_u32_at(
1971            header_pos,
1972            anim_mesh_offsets::DATA_COUNT_1,
1973            anim.data_count_1,
1974        )?;
1975        self.write_u32_at(header_pos, anim_mesh_offsets::PADDING_24, anim.padding_24)?;
1976        self.write_u32_at(
1977            header_pos,
1978            anim_mesh_offsets::ANIM_VERTICES_PTR,
1979            anim.anim_vertices_ptr,
1980        )?;
1981        self.write_u32_at(
1982            header_pos,
1983            anim_mesh_offsets::ANIM_TEX_VERTICES_PTR,
1984            anim.anim_tex_vertices_ptr,
1985        )?;
1986        self.write_u32_at(
1987            header_pos,
1988            anim_mesh_offsets::ANIM_VERTICES_COUNT,
1989            anim.anim_vertices_count,
1990        )?;
1991        self.write_u32_at(
1992            header_pos,
1993            anim_mesh_offsets::ANIM_TEX_VERTICES_COUNT,
1994            anim.anim_tex_vertices_count,
1995        )?;
1996
1997        self.buffer.set_position(save);
1998        Ok(header_pos)
1999    }
2000
2001    /// Writes the two animated vertex arrays and backpatches the anim header pointers.
2002    fn write_anim_mesh_arrays(
2003        &mut self,
2004        anim: &super::MdlAnimMesh,
2005        anim_header_pos: u64,
2006    ) -> Result<(), MdlError> {
2007        // Write anim_verts (Vector = 3 × f32 each)
2008        let anim_verts_count = count_u32(anim.anim_verts.len(), "anim_verts_count")?;
2009        let mut anim_verts_ptr = 0u32;
2010        if anim_verts_count > 0 {
2011            anim_verts_ptr = self.content_position()?;
2012            for v in &anim.anim_verts {
2013                write_f32(&mut self.buffer, v[0])?;
2014                write_f32(&mut self.buffer, v[1])?;
2015                write_f32(&mut self.buffer, v[2])?;
2016            }
2017        }
2018
2019        // Write anim_t_verts (Vector = 3 × f32 each)
2020        let anim_t_verts_count = count_u32(anim.anim_t_verts.len(), "anim_t_verts_count")?;
2021        let mut anim_t_verts_ptr = 0u32;
2022        if anim_t_verts_count > 0 {
2023            anim_t_verts_ptr = self.content_position()?;
2024            for v in &anim.anim_t_verts {
2025                write_f32(&mut self.buffer, v[0])?;
2026                write_f32(&mut self.buffer, v[1])?;
2027                write_f32(&mut self.buffer, v[2])?;
2028            }
2029        }
2030
2031        // Backpatch CExoArrayList headers: ptr, count, alloc
2032        let save_end = self.buffer.position();
2033
2034        // anim_verts at anim extra +0x04
2035        self.seek_to(anim_header_pos, anim_mesh_offsets::ANIM_VERTS_PTR);
2036        write_u32(&mut self.buffer, anim_verts_ptr)?;
2037        write_u32(&mut self.buffer, anim_verts_count)?;
2038        write_u32(&mut self.buffer, anim_verts_count)?; // alloc = count on disk
2039
2040        // anim_t_verts at anim extra +0x10
2041        self.seek_to(anim_header_pos, anim_mesh_offsets::ANIM_T_VERTS_PTR);
2042        write_u32(&mut self.buffer, anim_t_verts_ptr)?;
2043        write_u32(&mut self.buffer, anim_t_verts_count)?;
2044        write_u32(&mut self.buffer, anim_t_verts_count)?; // alloc = count on disk
2045
2046        self.buffer.set_position(save_end);
2047        Ok(())
2048    }
2049
2050    fn write_controllers(
2051        &mut self,
2052        controllers: &[MdlController],
2053        orphan_data: &[f32],
2054    ) -> Result<(u32, u32, u32, u32), MdlError> {
2055        if controllers.is_empty() && orphan_data.is_empty() {
2056            return Ok((0, 0, 0, 0));
2057        }
2058
2059        // Orphan data: key_count=0 but data_count>0. Write the data array
2060        // only so the engine sees the same data_ptr/data_count header values.
2061        if controllers.is_empty() {
2062            let data_ptr = self.content_position()?;
2063            for &val in orphan_data {
2064                write_f32(&mut self.buffer, val)?;
2065            }
2066            return Ok((
2067                0,
2068                0,
2069                data_ptr,
2070                count_u32(orphan_data.len(), "orphan_data_count")?,
2071            ));
2072        }
2073
2074        let key_ptr = self.content_position()?;
2075        let key_count = count_u32(controllers.len(), "controller_count")?;
2076
2077        // Reserve headers
2078        let headers_start = self.buffer.position();
2079        self.buffer.write_all(&vec![
2080            0u8;
2081            usize::try_from(key_count)
2082                .expect("key_count fits in usize")
2083                * 16
2084        ])?;
2085
2086        // Build flat data array
2087        let mut master_float_data = Vec::new();
2088        let mut controller_indices = Vec::new(); // (time_idx, data_idx)
2089
2090        for c in controllers {
2091            let time_idx = master_float_data.len();
2092            for key in &c.keys {
2093                master_float_data.push(key.time);
2094            }
2095
2096            let data_idx = master_float_data.len();
2097            for key in &c.keys {
2098                master_float_data.extend_from_slice(&key.values);
2099            }
2100
2101            controller_indices.push((time_idx, data_idx));
2102        }
2103
2104        // Write Float Data
2105        let data_start_pos = headers_start + u64::from(key_count) * 16;
2106        self.buffer.set_position(data_start_pos);
2107
2108        let real_data_ptr = u32::try_from(
2109            data_start_pos
2110                .checked_sub(MDL_WRAPPER_SIZE)
2111                .ok_or(MdlError::ValueOverflow("ctrl_data_position underflow"))?,
2112        )
2113        .map_err(|_| MdlError::ValueOverflow("ctrl_data_position"))?;
2114
2115        for val in &master_float_data {
2116            write_f32(&mut self.buffer, *val)?;
2117        }
2118        let real_data_count = count_u32(master_float_data.len(), "controller_data_count")?;
2119
2120        // Fill Headers
2121        let final_end = self.buffer.position();
2122        self.buffer.set_position(headers_start);
2123
2124        for (i, c) in controllers.iter().enumerate() {
2125            let row_count = count_u16(c.keys.len(), "key_row_count")?;
2126            let (t_idx, d_idx) = controller_indices[i];
2127
2128            write_u32(&mut self.buffer, c.controller_type.raw())?; // 0x00
2129
2130            // 0x04: Unknown(2) + RowCount(2)
2131            self.buffer.write_all(&c.key_unknown_04)?;
2132            write_u16(&mut self.buffer, row_count)?;
2133
2134            // 0x08: TimeIndex(2) + DataIndex(2)
2135            write_u16(
2136                &mut self.buffer,
2137                u16::try_from(t_idx).map_err(|_| MdlError::ValueOverflow("time_index"))?,
2138            )?;
2139            write_u16(
2140                &mut self.buffer,
2141                u16::try_from(d_idx).map_err(|_| MdlError::ValueOverflow("data_index"))?,
2142            )?;
2143
2144            // 0x0C: ColumnCount(1) + Unknown(3)
2145            // Use the raw column_count byte (preserves Bezier flag and
2146            // integral orientation encoding) rather than deriving from
2147            // values.len(), which would lose the flag bits.
2148            self.buffer.write_all(&[c.raw_column_count])?;
2149            self.buffer.write_all(&c.key_unknown_0d)?;
2150        }
2151
2152        self.buffer.set_position(final_end);
2153
2154        Ok((key_ptr, key_count, real_data_ptr, real_data_count))
2155    }
2156
2157    /// Writes a Light node header (92 bytes) with placeholder CExoArray pointers.
2158    ///
2159    /// Returns the header start position for backpatching by `write_light_arrays`.
2160    /// The 5 CExoArrayList headers at +0x04..+0x3F are written as zeros here;
2161    /// the actual data pointers are backpatched after array payloads are emitted.
2162    fn write_light_header(&mut self, light: &super::MdlLight) -> Result<u64, MdlError> {
2163        let header_pos = self.buffer.position();
2164
2165        // Write 92 bytes of zeros as base, then backpatch typed fields.
2166        self.buffer.write_all(&[0u8; LIGHT_EXTRA_SIZE])?;
2167
2168        // flare_radius: f32 at +0x00
2169        self.write_f32_at(header_pos, light_offsets::FLARE_RADIUS, light.flare_radius)?;
2170
2171        // texture_safe_ptrs: 3×u32 at +0x04 (runtime-only, preserved for roundtrip)
2172        let sp_off = light_offsets::TEXTURE_SAFE_PTRS_PTR;
2173        self.write_u32_at(header_pos, sp_off, light.texture_safe_ptrs[0])?;
2174        self.write_u32_at(header_pos, sp_off + 4, light.texture_safe_ptrs[1])?;
2175        self.write_u32_at(header_pos, sp_off + 8, light.texture_safe_ptrs[2])?;
2176
2177        // CExoArrayList headers at +0x10..+0x3F left as zeros (backpatched in write_light_arrays)
2178
2179        // Scalar fields at +0x40..+0x5C
2180        self.write_i32_at(header_pos, light_offsets::PRIORITY, light.priority)?;
2181        self.write_i32_at(
2182            header_pos,
2183            light_offsets::NUM_DYNAMIC_TYPES,
2184            light.num_dynamic_types,
2185        )?;
2186        self.write_i32_at(
2187            header_pos,
2188            light_offsets::AFFECTDYNAMIC,
2189            light.affectdynamic,
2190        )?;
2191        self.write_i32_at(header_pos, light_offsets::SHADOW, light.shadow)?;
2192        self.write_i32_at(header_pos, light_offsets::AMBIENTONLY, light.ambientonly)?;
2193        self.write_i32_at(
2194            header_pos,
2195            light_offsets::GENERATEFLARE,
2196            light.generateflare,
2197        )?;
2198        self.write_i32_at(header_pos, light_offsets::FADING_LIGHT, light.fading_light)?;
2199
2200        Ok(header_pos)
2201    }
2202
2203    /// Writes Light flare data arrays and backpatches CExoArrayList pointers.
2204    ///
2205    /// Four CExoArrayList payloads are written:
2206    /// - flare_sizes: `Vec<f32>` (4 bytes each)
2207    /// - flare_positions: `Vec<f32>` (4 bytes each)
2208    /// - flare_color_shifts: `Vec<[f32; 3]>` (12 bytes each)
2209    /// - flare_texture_names: `Vec<String>` as pointer-to-pointer chain
2210    ///
2211    /// Each payload writes data, then backpatches the CExoArrayList header
2212    /// (ptr, count, alloc=count) in the light extra header.
2213    fn write_light_arrays(
2214        &mut self,
2215        light: &super::MdlLight,
2216        header_pos: u64,
2217    ) -> Result<(), MdlError> {
2218        // Helper: write a CExoArrayList header (ptr + count + alloc) into the light header.
2219        let write_cexo_header = |writer: &mut Self,
2220                                 ptr_offset: usize,
2221                                 data_ptr: u32,
2222                                 count: u32|
2223         -> Result<(), MdlError> {
2224            writer.write_u32_at(header_pos, ptr_offset, data_ptr)?;
2225            writer.write_u32_at(header_pos, ptr_offset + 4, count)?;
2226            // alloc = count (ptr_offset + 8)
2227            writer.write_u32_at(header_pos, ptr_offset + 8, count)?;
2228            Ok(())
2229        };
2230
2231        // Flare sizes: CExoArrayList<float> at +0x10
2232        let count = count_u32(light.flare_sizes.len(), "flare_sizes_count")?;
2233        if count > 0 {
2234            let data_ptr = self.content_position()?; // content-relative
2235            for &size in &light.flare_sizes {
2236                write_f32(&mut self.buffer, size)?;
2237            }
2238            write_cexo_header(self, light_offsets::FLARE_SIZES_PTR, data_ptr, count)?;
2239        }
2240
2241        // Flare positions: CExoArrayList<float> at +0x1C
2242        let count = count_u32(light.flare_positions.len(), "flare_positions_count")?;
2243        if count > 0 {
2244            let data_ptr = self.content_position()?; // content-relative
2245            for &pos in &light.flare_positions {
2246                write_f32(&mut self.buffer, pos)?;
2247            }
2248            write_cexo_header(self, light_offsets::FLARE_POSITIONS_PTR, data_ptr, count)?;
2249        }
2250
2251        // Flare color shifts: CExoArrayList<Vector> at +0x28
2252        let count = count_u32(light.flare_color_shifts.len(), "flare_color_shifts_count")?;
2253        if count > 0 {
2254            let data_ptr = self.content_position()?; // content-relative
2255            for shift in &light.flare_color_shifts {
2256                write_f32(&mut self.buffer, shift[0])?;
2257                write_f32(&mut self.buffer, shift[1])?;
2258                write_f32(&mut self.buffer, shift[2])?;
2259            }
2260            write_cexo_header(self, light_offsets::FLARE_COLOR_SHIFTS_PTR, data_ptr, count)?;
2261        }
2262
2263        // Flare texture names: CExoArrayList<char*> at +0x34
2264        // Each entry is a u32 content-relative pointer to a null-terminated string.
2265        let count = count_u32(light.flare_texture_names.len(), "flare_tex_names_count")?;
2266        if count > 0 {
2267            // Phase 1: Write the pointer array (u32 per name)
2268            let ptr_array_pos = self.buffer.position();
2269            let ptr_array_content_rel = self.content_position()?;
2270            // Write placeholder pointers
2271            for _ in 0..count {
2272                write_u32(&mut self.buffer, 0)?;
2273            }
2274
2275            // Phase 2: Write each string and backpatch its pointer
2276            for (i, name) in light.flare_texture_names.iter().enumerate() {
2277                let str_ptr = self.content_position()?; // content-relative
2278                                                        // Write null-terminated string
2279                self.buffer.write_all(name.as_bytes())?;
2280                self.buffer.write_all(&[0])?; // null terminator
2281
2282                // Backpatch the pointer
2283                let save = self.buffer.position();
2284                self.seek_to(ptr_array_pos, i * 4);
2285                write_u32(&mut self.buffer, str_ptr)?;
2286                self.buffer.set_position(save);
2287            }
2288
2289            write_cexo_header(
2290                self,
2291                light_offsets::FLARE_TEX_NAMES_PTR,
2292                ptr_array_content_rel,
2293                count,
2294            )?;
2295        }
2296
2297        Ok(())
2298    }
2299
2300    /// Writes a Reference node header (36 bytes).
2301    fn write_reference_header(&mut self, reference: &super::MdlReference) -> Result<(), MdlError> {
2302        // ref_model: char[32] at +0x00
2303        self.write_fixed_string(&reference.ref_model, 32)?;
2304        // reattachable: i32 at +0x20
2305        write_i32(&mut self.buffer, reference.reattachable)?;
2306        Ok(())
2307    }
2308
2309    /// Writes an Emitter node header (224 bytes).
2310    fn write_emitter_header(&mut self, emitter: &super::MdlEmitter) -> Result<(), MdlError> {
2311        // Scalars: +0x00..+0x20 (8 fields × 4 bytes = 32 bytes)
2312        write_f32(&mut self.buffer, emitter.deadspace)?;
2313        write_f32(&mut self.buffer, emitter.blast_radius)?;
2314        write_f32(&mut self.buffer, emitter.blast_length)?;
2315        write_i32(&mut self.buffer, emitter.num_branches)?;
2316        write_i32(&mut self.buffer, emitter.control_pt_smoothing)?;
2317        write_i32(&mut self.buffer, emitter.x_grid)?;
2318        write_i32(&mut self.buffer, emitter.y_grid)?;
2319        write_i32(&mut self.buffer, emitter.spawn_type)?;
2320
2321        // Strings: 4 × char[32] + 1 × char[16] = 144 bytes
2322        self.write_fixed_string(&emitter.update, 32)?; // +0x20
2323        self.write_fixed_string(&emitter.render, 32)?; // +0x40
2324        self.write_fixed_string(&emitter.blend, 32)?; // +0x60
2325        self.write_fixed_string(&emitter.texture, 32)?; // +0x80
2326        self.write_fixed_string(&emitter.chunk_name, 16)?; // +0xA0
2327
2328        // Trailing scalars: +0xB0..+0xBB (10 bytes)
2329        write_i32(&mut self.buffer, emitter.two_sided_tex)?;
2330        write_i32(&mut self.buffer, emitter.loop_emitter)?;
2331        write_u16(&mut self.buffer, emitter.render_order)?;
2332        self.buffer
2333            .write_all(&[if emitter.frame_blending { 1 } else { 0 }])?;
2334
2335        // depth_texture_name: char[16] at +0xBB
2336        self.write_fixed_string(&emitter.depth_texture_name, 16)?;
2337
2338        // Reserved: 21 bytes at +0xCB..+0xE0 - write verbatim
2339        self.buffer.write_all(&emitter.reserved)?;
2340
2341        Ok(())
2342    }
2343
2344    /// Writes the 28-byte DanglyMesh extra header (inline scalars only).
2345    ///
2346    /// Returns the header position for deferred backpatching of the constraint
2347    /// array pointer by [`write_dangly_arrays`].
2348    fn write_dangly_header(&mut self, dangly: &super::MdlDangly) -> Result<u64, MdlError> {
2349        let header_pos = self.buffer.position();
2350
2351        // Write 28 zero bytes as base, then fill typed fields.
2352        // CExoArrayList (ptr/count/alloc at +0x00) and data pointer (+0x18)
2353        // are backpatched by write_dangly_arrays and write_dangly_pre_mesh_payload.
2354        self.buffer.write_all(&[0u8; DANGLY_EXTRA_SIZE])?;
2355
2356        let save = self.buffer.position();
2357        self.seek_to(header_pos, dangly_offsets::DISPLACEMENT);
2358        write_f32(&mut self.buffer, dangly.displacement)?;
2359        write_f32(&mut self.buffer, dangly.tightness)?;
2360        write_f32(&mut self.buffer, dangly.period)?;
2361        self.buffer.set_position(save);
2362
2363        Ok(header_pos)
2364    }
2365
2366    /// Writes the dangly per-vertex positions (+0x18 pointer) before TriMesh arrays.
2367    ///
2368    /// Emits `vertex_count` vec3 positions (12 bytes each), then backpatches the
2369    /// content-relative pointer at dangly extra +0x18.
2370    fn write_dangly_pre_mesh_payload(
2371        &mut self,
2372        dangly: &super::MdlDangly,
2373        dangly_header_pos: u64,
2374    ) -> Result<(), MdlError> {
2375        let ptr = if !dangly.dangly_vertices.is_empty() {
2376            let p = self.content_position()?;
2377            for v in &dangly.dangly_vertices {
2378                write_f32(&mut self.buffer, v[0])?;
2379                write_f32(&mut self.buffer, v[1])?;
2380                write_f32(&mut self.buffer, v[2])?;
2381            }
2382            p
2383        } else {
2384            0
2385        };
2386
2387        self.write_u32_at(dangly_header_pos, dangly_offsets::DATA_PTR, ptr)?;
2388        Ok(())
2389    }
2390
2391    /// Writes the constraint float array and backpatches the dangly header pointer.
2392    fn write_dangly_arrays(
2393        &mut self,
2394        dangly: &super::MdlDangly,
2395        dangly_header_pos: u64,
2396    ) -> Result<(), MdlError> {
2397        let constraints_count = count_u32(dangly.constraints.len(), "constraints_count")?;
2398        let mut constraints_ptr = 0u32;
2399
2400        if constraints_count > 0 {
2401            constraints_ptr = self.content_position()?;
2402            for &val in &dangly.constraints {
2403                write_f32(&mut self.buffer, val)?;
2404            }
2405        }
2406
2407        // Backpatch CExoArrayList: ptr, count, alloc
2408        let save_end = self.buffer.position();
2409        self.seek_to(dangly_header_pos, dangly_offsets::CONSTRAINTS_PTR);
2410        write_u32(&mut self.buffer, constraints_ptr)?;
2411        write_u32(&mut self.buffer, constraints_count)?;
2412        write_u32(&mut self.buffer, constraints_count)?; // alloc = count on disk
2413        self.buffer.set_position(save_end);
2414
2415        Ok(())
2416    }
2417
2418    /// Writes the 4-byte AABB extra header (placeholder for tree root pointer).
2419    ///
2420    /// The root pointer is backpatched by [`write_aabb_arrays`] after the tree
2421    /// payload has been emitted.
2422    fn write_aabb_header(&mut self) -> Result<u64, MdlError> {
2423        let header_pos = self.buffer.position();
2424        self.buffer.write_all(&[0u8; AABB_EXTRA_SIZE])?;
2425        Ok(header_pos)
2426    }
2427
2428    /// Writes the AABB tree payload and backpatches the root pointer in the header.
2429    fn write_aabb_arrays(
2430        &mut self,
2431        aabb: &super::MdlAabb,
2432        aabb_header_pos: u64,
2433    ) -> Result<(), MdlError> {
2434        let ptr = if let Some(tree) = &aabb.aabb_tree {
2435            self.write_aabb_tree(tree)?
2436        } else {
2437            0
2438        };
2439
2440        let save = self.buffer.position();
2441        self.seek_to(aabb_header_pos, aabb_offsets::TREE_PTR);
2442        write_u32(&mut self.buffer, ptr)?;
2443        self.buffer.set_position(save);
2444        Ok(())
2445    }
2446
2447    /// Recursively writes an AABB tree node in DFS preorder (matching mdledit).
2448    ///
2449    /// Returns the content-relative offset of the written node. Child pointers
2450    /// are backpatched after each subtree is written.
2451    fn write_aabb_tree(&mut self, node: &super::AabbNode) -> Result<u32, MdlError> {
2452        let node_ptr = self.content_position()?; // content-relative
2453
2454        // Bounding box (24 bytes)
2455        for &v in &node.box_min {
2456            write_f32(&mut self.buffer, v)?;
2457        }
2458        for &v in &node.box_max {
2459            write_f32(&mut self.buffer, v)?;
2460        }
2461
2462        // Placeholder child pointers: right at +0x18, left at +0x1C
2463        let right_ph = self.buffer.position();
2464        write_u32(&mut self.buffer, 0)?;
2465        let left_ph = self.buffer.position();
2466        write_u32(&mut self.buffer, 0)?;
2467
2468        // Face index and split direction flags
2469        write_i32(&mut self.buffer, node.face_index)?;
2470        write_u32(&mut self.buffer, node.split_direction_flags)?;
2471
2472        // Recurse left child, backpatch pointer
2473        if let Some(left) = &node.left {
2474            let left_ptr = self.write_aabb_tree(left)?;
2475            let save = self.buffer.position();
2476            self.buffer.set_position(left_ph);
2477            write_u32(&mut self.buffer, left_ptr)?;
2478            self.buffer.set_position(save);
2479        }
2480
2481        // Recurse right child, backpatch pointer
2482        if let Some(right) = &node.right {
2483            let right_ptr = self.write_aabb_tree(right)?;
2484            let save = self.buffer.position();
2485            self.buffer.set_position(right_ph);
2486            write_u32(&mut self.buffer, right_ptr)?;
2487            self.buffer.set_position(save);
2488        }
2489
2490        Ok(node_ptr)
2491    }
2492
2493    /// Writes the 20-byte Saber extra header from typed fields.
2494    ///
2495    /// Data pointers are placeholders (backpatched by [`write_saber_arrays`]).
2496    /// GL pool IDs are preserved for roundtrip fidelity (runtime-only values).
2497    fn write_saber_header(&mut self, saber: &super::MdlSaber) -> Result<u64, MdlError> {
2498        let header_pos = self.buffer.position();
2499
2500        // Write 20 zero bytes as base, then backpatch GL pool IDs.
2501        self.buffer.write_all(&[0u8; SABER_EXTRA_SIZE])?;
2502        let save = self.buffer.position();
2503
2504        // Data pointers at +0x00, +0x04, +0x08 are backpatched by write_saber_arrays.
2505        self.write_u32_at(header_pos, saber_offsets::GL_POOL_VERT, saber.gl_pool_vert)?;
2506        self.write_u32_at(
2507            header_pos,
2508            saber_offsets::GL_POOL_INDEX,
2509            saber.gl_pool_index,
2510        )?;
2511
2512        self.buffer.set_position(save);
2513        Ok(header_pos)
2514    }
2515
2516    /// Writes the three saber vertex arrays and backpatches header pointers.
2517    fn write_saber_arrays(
2518        &mut self,
2519        saber: &super::MdlSaber,
2520        saber_header_pos: u64,
2521    ) -> Result<(), MdlError> {
2522        // Write saber_verts (vec3 × N)
2523        let verts_ptr = if !saber.saber_verts.is_empty() {
2524            let p = self.content_position()?;
2525            for v in &saber.saber_verts {
2526                write_f32(&mut self.buffer, v[0])?;
2527                write_f32(&mut self.buffer, v[1])?;
2528                write_f32(&mut self.buffer, v[2])?;
2529            }
2530            p
2531        } else {
2532            0
2533        };
2534
2535        // Write saber_uvs (vec2 × N)
2536        let uvs_ptr = if !saber.saber_uvs.is_empty() {
2537            let p = self.content_position()?;
2538            for v in &saber.saber_uvs {
2539                write_f32(&mut self.buffer, v[0])?;
2540                write_f32(&mut self.buffer, v[1])?;
2541            }
2542            p
2543        } else {
2544            0
2545        };
2546
2547        // Write saber_normals (vec3 × N)
2548        let normals_ptr = if !saber.saber_normals.is_empty() {
2549            let p = self.content_position()?;
2550            for v in &saber.saber_normals {
2551                write_f32(&mut self.buffer, v[0])?;
2552                write_f32(&mut self.buffer, v[1])?;
2553                write_f32(&mut self.buffer, v[2])?;
2554            }
2555            p
2556        } else {
2557            0
2558        };
2559
2560        // Backpatch data pointers in the saber header.
2561        self.write_u32_at(saber_header_pos, saber_offsets::VERTS_PTR, verts_ptr)?;
2562        self.write_u32_at(saber_header_pos, saber_offsets::UVS_PTR, uvs_ptr)?;
2563        self.write_u32_at(saber_header_pos, saber_offsets::NORMALS_PTR, normals_ptr)?;
2564
2565        Ok(())
2566    }
2567
2568    /// Writes a null-terminated string into a fixed-size field, zero-padded.
2569    fn write_fixed_string(&mut self, s: &str, field_size: usize) -> Result<(), MdlError> {
2570        crate::binary::write_fixed_c_string(&mut self.buffer, s, field_size)?;
2571        Ok(())
2572    }
2573}