ttmp-rs/src/mpd_encoder.rs

651 lines
24 KiB
Rust

use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::{BufWriter, Read, Seek, SeekFrom, Write};
use flate2::Compression;
use flate2::write::DeflateEncoder;
use sha3::{Digest, Sha3_384};
use sqpack::{DatBlockHeader, DatStdFileBlockInfos, FileKind, LodBlock, ModelBlock, SqPackFileInfo, SqPackFileInfoHeader};
use sqpack::binrw::{self, BinWriterExt};
use crate::error::{Error, Result};
use crate::model::{ManifestKind, SimpleMod};
use crate::util::{MAX_MODEL_LODS, MAX_TEXTURE_LODS, read_struct};
const ALIGN: usize = 128;
pub struct MpdEncoder {
pub manifest: ManifestKind,
pub writer: BufWriter<File>,
hashes: HashMap<Vec<u8>, HashInfo>,
}
#[derive(Hash, Eq, PartialEq)]
pub struct FileInfo {
pub game_path: String,
pub group: Option<String>,
pub option: Option<String>,
}
struct HashInfo {
pub offset: usize,
pub size: usize,
pub files: HashSet<FileInfo>,
}
impl HashInfo {
pub fn new(offset: usize, size: usize) -> Self {
Self {
offset,
size,
files: Default::default(),
}
}
}
impl MpdEncoder {
const BLOCK_SIZE: usize = 16_000;
pub fn new(writer: File, manifest: ManifestKind) -> Self {
Self {
manifest,
writer: BufWriter::new(writer),
hashes: Default::default(),
}
}
pub fn finalize(mut self) -> Result<(ManifestKind, File)> {
let pos = self.writer.stream_position().map_err(Error::Io)?;
// potentially truncate if necessary
let file = self.writer.into_inner().map_err(Error::BufWriterIntoInner)?;
file.set_len(pos + 1).map_err(Error::Io)?;
// update the manifest
match &mut self.manifest {
ManifestKind::V1(mods) => {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
ManifestKind::V2(pack) => {
if let Some(mods) = &mut pack.simple_mods_list {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
if let Some(pages) = &mut pack.mod_pack_pages {
for page in pages {
for group in &mut page.mod_groups {
for option in &mut group.option_list {
Self::finalize_v1(
&self.hashes,
&mut option.mods_jsons,
Some(group.group_name.clone()),
Some(option.name.clone()),
)?;
}
}
}
}
}
}
Ok((self.manifest, file))
}
fn finalize_v1(hashes: &HashMap<Vec<u8>, HashInfo>, mods: &mut [SimpleMod], group: Option<String>, option: Option<String>) -> Result<()> {
for simple in mods {
let file_info = FileInfo {
game_path: simple.full_path.clone(),
group: group.clone(),
option: option.clone(),
};
let info = hashes.iter()
.find(|(_, info)| info.files.contains(&file_info))
.map(|(_, info)| info)
.ok_or(Error::MissingHash)?;
simple.mod_size = info.size;
simple.mod_offset = info.offset;
}
Ok(())
}
pub fn add_texture_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read) -> Result<()> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawTextureHeader {
_attributes: u32,
_format: u32,
_width: u16,
_height: u16,
_depth: u16,
mip_count: u16,
_lod_offset: [u32; MAX_TEXTURE_LODS],
offset_to_surface: [u32; 13],
}
const HEADER_SIZE: usize = std::mem::size_of::<RawTextureHeader>();
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Sha3_384::default();
// read the texture file's header
let header: RawTextureHeader = read_struct(&mut data, &mut buf)?;
hasher.update(&buf[..HEADER_SIZE]);
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate the header size
let mut sub_blocks_len = 0;
for i in 0..header.mip_count {
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let mip_size = if next == 0 {
size as u32 - offset
} else {
next - offset
};
sub_blocks_len += Self::calculate_blocks_required(mip_size as usize);
}
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<LodBlock>() * header.mip_count as usize
+ std::mem::size_of::<u16>() * sub_blocks_len;
let header_align = ALIGN - (header_size % ALIGN);
// make room for the header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let compressed_offset_base = self.writer.stream_position().map_err(Error::Io)?;
// write the raw texture file's header back
self.writer.write_all(&buf[..HEADER_SIZE]).map_err(Error::Io)?;
let initial_pos = self.writer.stream_position().map_err(Error::Io)?;
let mut lod_blocks = Vec::with_capacity(header.mip_count as usize);
let mut sub_blocks = Vec::with_capacity(header.mip_count as usize);
let mut total_blocks = 0;
for i in 0..header.mip_count {
let before_this_mip = self.writer.stream_position().map_err(Error::Io)?;
let compressed_offset = before_this_mip - compressed_offset_base;
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let infos = if next == 0 {
// read to eof
self.write_blocks(&mut data, &mut hasher)?
} else {
let read = (&mut data).take((next - offset) as u64);
self.write_blocks(read, &mut hasher)?
};
let compressed_size = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>()
+ (std::mem::size_of::<DatStdFileBlockInfos>() * infos.len()) as u32;
let decompressed_size = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
lod_blocks.push(LodBlock {
compressed_offset: compressed_offset as u32,
compressed_size,
decompressed_size,
block_offset: total_blocks,
block_count: infos.len() as u32,
});
total_blocks += infos.len() as u32;
sub_blocks.extend(infos);
}
assert_eq!(sub_blocks_len, sub_blocks.len());
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - initial_pos;
// seek before all the data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
let hash = hasher.finalize();
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(());
}
// write the headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Texture,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME
number_of_blocks: lod_blocks.len() as u32,
}).map_err(Error::BinRwWrite)?;
// write the lod blocks out
for block in &lod_blocks {
self.writer.write_le(block).map_err(Error::BinRwWrite)?;
}
// write the sizes of each sub-block
for info in sub_blocks {
self.writer.write_le(&info.compressed_size).map_err(Error::BinRwWrite)?;
}
// seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(())
}
fn alignment_necessary(size: usize) -> usize {
ALIGN - (size % ALIGN)
}
pub fn add_model_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read) -> Result<()> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawModelHeader {
version: u32,
stack_size: u32,
runtime_size: u32,
vertex_declaration_count: u16,
material_count: u16,
vertex_offset: [u32; MAX_MODEL_LODS],
index_offset: [u32; MAX_MODEL_LODS],
vertex_buffer_size: [u32; MAX_MODEL_LODS],
index_buffer_size: [u32; MAX_MODEL_LODS],
lod_count: u8,
enable_index_buffer_streaming: u8,
enable_edge_geometry: u8,
padding: u8,
}
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Sha3_384::default();
// read the model file's header
let header: RawModelHeader = read_struct(&mut data, &mut buf)?;
if header.enable_edge_geometry > 0 {
return Err(Error::EdgeGeometry);
}
// save the start of the mdl file for later (or maybe this just isn't present in the sqpack)
let mdl_header_bytes = buf[..std::mem::size_of::<RawModelHeader>()].to_vec();
hasher.update(&mdl_header_bytes);
let mut sqpack_header = ModelBlock {
version: header.version,
vertex_declaration_num: header.vertex_declaration_count,
material_num: header.material_count,
num_lods: header.lod_count,
index_buffer_streaming_enabled: header.enable_index_buffer_streaming,
edge_geometry_enabled: header.enable_edge_geometry,
padding: header.padding,
..Default::default()
};
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate size of header
let num_blocks: usize = Self::calculate_blocks_required(header.stack_size as usize)
+ Self::calculate_blocks_required(header.runtime_size as usize)
+ header.index_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>()
+ header.vertex_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>();
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<ModelBlock>()
+ num_blocks * std::mem::size_of::<u16>();
let header_align = ALIGN - (header_size % ALIGN);
// make room for header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let offset_base = self.writer.stream_position().map_err(Error::Io)?;
let mut block_index: u16 = 0;
let mut block_sizes = Vec::with_capacity(num_blocks);
let infos = self.write_blocks((&mut data).take(header.stack_size as u64), &mut hasher)?;
sqpack_header.block_num.stack = infos.len() as u16;
sqpack_header.block_index.stack = 0; // stack will always be block index 0
sqpack_header.offset.stack = 0; // stack is always offset 0
sqpack_header.size.stack = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.stack += Self::alignment_necessary(sqpack_header.size.stack as usize) as u32;
sqpack_header.compressed_size.stack = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// set next section's block index and offset
sqpack_header.block_index.runtime = block_index;
sqpack_header.offset.runtime = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_blocks((&mut data).take(header.runtime_size as u64), &mut hasher)?;
sqpack_header.block_num.runtime = infos.len() as u16;
sqpack_header.size.runtime = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.runtime += Self::alignment_necessary(sqpack_header.size.runtime as usize) as u32;
sqpack_header.compressed_size.runtime = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// ASSUMING MDL IS LAID OUT [(VERTEX, INDEX); 3]
for lod in 0..MAX_MODEL_LODS {
sqpack_header.offset.vertex_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.vertex_offset, &header.vertex_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.vertex_buffer[lod] = block_index;
sqpack_header.block_num.vertex_buffer[lod] = infos.len() as u16;
sqpack_header.size.vertex_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.vertex_buffer[lod] += Self::alignment_necessary(sqpack_header.size.vertex_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.vertex_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
sqpack_header.offset.index_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.index_offset, &header.index_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.index_buffer[lod] = block_index;
sqpack_header.block_num.index_buffer[lod] = infos.len() as u16;
sqpack_header.size.index_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.index_buffer[lod] += Self::alignment_necessary(sqpack_header.size.index_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.index_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
}
// ensure our math was correct
assert_eq!(num_blocks, block_sizes.len());
sqpack_header.number_of_blocks = block_index as u32;
sqpack_header.used_number_of_blocks = block_index as u32;
// store how long all this data we just wrote is
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - offset_base;
// now seek back to before we wrote all this data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// check if we already have a file inserted with this hash
let hash = hasher.finalize();
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(());
}
// write the file header
let file_header = SqPackFileInfoHeader {
kind: FileKind::Model,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
};
self.writer.write_le(&file_header).map_err(Error::BinRwWrite)?;
// write the model header
self.writer.write_le(&sqpack_header).map_err(Error::BinRwWrite)?;
// write out all the block sizes, in order, as u16s
for size in block_sizes {
self.writer.write_le(&size).map_err(Error::BinRwWrite)?;
}
// now seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(())
}
fn write_lod(&mut self, lod: usize, lod_count: u8, offsets: &[u32], sizes: &[u32], mut data: impl Read, hasher: &mut impl Digest) -> Result<Vec<DatStdFileBlockInfos>> {
// only write out the lods we have
if lod_count == 0 || lod > lod_count as usize - 1 {
return Ok(Default::default());
}
let _offset = offsets[lod];
let size = sizes[lod];
let read = (&mut data).take(size as u64);
let infos = self.write_blocks(read, hasher)?;
// let padding = self.align_to(ALIGN)?;
Ok(infos)
}
fn calculate_blocks_required(size: usize) -> usize {
if size == 0 {
return 0;
}
if size <= Self::BLOCK_SIZE {
return 1;
}
let mut num_blocks = size / Self::BLOCK_SIZE;
if size > Self::BLOCK_SIZE * num_blocks {
num_blocks += 1;
}
num_blocks
}
pub fn add_standard_file(&mut self, file_info: FileInfo, size: usize, data: impl Read) -> Result<()> {
// store position before doing anything
let pos = self.writer.stream_position().map_err(Error::Io)?;
// calculate the number of blocks
let num_blocks = Self::calculate_blocks_required(size);
// calculate the header size, then write zeroes where it will be
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<DatStdFileBlockInfos>() * num_blocks;
let header_align = ALIGN - (header_size % ALIGN);
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let after_header = self.writer.stream_position().map_err(Error::Io)?;
// write the data
let mut hasher = Sha3_384::default();
let infos = self.write_blocks(data, &mut hasher)?;
let hash = hasher.finalize();
// ensure we did our math correctly
assert_eq!(num_blocks, infos.len());
let after_blocks = self.writer.stream_position().map_err(Error::Io)?;
let blocks_size = after_blocks - after_header;
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| HashInfo::new(pos as usize, header_size + header_align + blocks_size as usize))
.files
.insert(file_info);
// seek back to before chunks to add headers
self.writer.seek(SeekFrom::Start(pos)).map_err(Error::Io)?;
if contained {
return Ok(());
}
// add headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Standard,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME: seen [0, 0], [2, 2], [3, 3], and [4, 4]
number_of_blocks: infos.len() as u32,
}).map_err(Error::BinRwWrite)?;
for info in &infos {
self.writer.write_le(info).map_err(Error::BinRwWrite)?;
}
// seek past data
self.writer.seek(SeekFrom::Start(after_blocks)).map_err(Error::Io)?;
Ok(())
}
fn align_to(&mut self, n: usize) -> Result<usize> {
let current_pos = self.writer.stream_position().map_err(Error::Io)? as usize;
let bytes_to_pad = n - (current_pos % n);
if bytes_to_pad > 0 {
let zeroes = std::iter::repeat(0)
.take(bytes_to_pad)
.collect::<Vec<u8>>();
// write padding bytes
self.writer.write_all(&zeroes).map_err(Error::Io)?;
}
Ok(bytes_to_pad)
}
fn write_blocks(&mut self, mut data: impl Read, hasher: &mut impl Digest) -> Result<Vec<DatStdFileBlockInfos>> {
let mut total_written = 0;
let mut infos = Vec::new();
// read 16kb chunks and compress them
let mut buf = [0; Self::BLOCK_SIZE];
let mut buf_idx: usize = 0;
'outer: loop {
// read up to 16kb from the data stream
loop {
let size = data.read(&mut buf[buf_idx..]).map_err(Error::Io)?;
if size == 0 {
// end of file
if buf_idx == 0 {
break 'outer;
}
break;
}
buf_idx += size;
}
// update hasher
hasher.update(&buf[..buf_idx]);
let offset = total_written;
// get position before chunk
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// make space for chunk header
self.writer.write_all(&vec![0; std::mem::size_of::<DatBlockHeader>()]).map_err(Error::Io)?;
total_written += std::mem::size_of::<DatBlockHeader>() as u64;
// write compressed chunk to writer
let mut encoder = DeflateEncoder::new(&mut self.writer, Compression::best());
encoder.write_all(&buf[..buf_idx]).map_err(Error::Io)?;
encoder.finish().map_err(Error::Io)?;
// calculate the size of compressed data
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let mut compressed_size = after_data - before_header;
total_written += compressed_size;
// seek back to before header
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// write chunk header
let header = DatBlockHeader {
size: std::mem::size_of::<DatBlockHeader>() as u32,
uncompressed_size: buf_idx as u32,
compressed_size: (compressed_size - std::mem::size_of::<DatBlockHeader>() as u64) as u32,
_unk_0: 0,
};
self.writer.write_le(&header).map_err(Error::BinRwWrite)?;
// seek past chunk
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
// pad to 128 bytes
let padded = self.align_to(ALIGN)?;
// add padding bytes to the compressed size because that's just
// how sqpack do
compressed_size += padded as u64;
// total_written += padded as u64;
infos.push(DatStdFileBlockInfos {
offset: offset as u32,
uncompressed_size: buf_idx as u16,
compressed_size: compressed_size as u16,
});
// end of file was reached
if buf_idx < Self::BLOCK_SIZE {
break;
}
buf_idx = 0;
}
Ok(infos)
}
}