feat: add mpd encoder

This commit is contained in:
Anna 2022-11-30 19:09:26 -05:00
parent a06360de19
commit 1a3939163a
Signed by: anna
GPG Key ID: 0B391D8F06FCD9E0
14 changed files with 806 additions and 49 deletions

View File

@ -10,9 +10,10 @@ autoexamples = true
flate2 = "1"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
sha3 = "0.10"
thiserror = "1"
zip = { version = "0.6", default-features = false, features = ["deflate"] }
sqpack = { git = "https://git.anna.lgbt/ascclemens/sqpack-rs", features = ["read"] }
sqpack = { git = "https://git.anna.lgbt/ascclemens/sqpack-rs", features = ["read", "write"] }
[dev-dependencies]
criterion = "0.4"

View File

@ -1,11 +1,11 @@
use std::collections::{HashMap, HashSet};
use std::collections::HashMap;
use std::fs::File;
use std::io::{Cursor, Seek, SeekFrom, Write};
use std::io::{Seek, SeekFrom};
use std::path::Path;
use sha3::{Digest, Sha3_256};
use ttmp::ttmp_extractor::{ModFile, TtmpExtractor};
use ttmp::ttmp_extractor::TtmpExtractor;
pub fn main() {
let mut sha = Sha3_256::default();
@ -16,20 +16,39 @@ pub fn main() {
let files = extractor.all_files_sorted();
let mut data_file = zip.by_name("TTMPD.mpd").unwrap();
// let mut data = Vec::new();
// data_file.read_to_end(&mut data).unwrap();
// let mut cursor = Cursor::new(data);
std::fs::create_dir_all("files").unwrap();
let mut hashes: HashMap<String, Vec<SavedFile>> = HashMap::with_capacity(files.len());
let mut temp = tempfile::tempfile().unwrap();
let mut last_offset = None;
for file in files {
// handle deduped ttmps
if Some(file.file.mod_offset) == last_offset {
println!("already seen offset {}", file.file.mod_offset);
continue;
}
last_offset = Some(file.file.mod_offset);
temp.set_len(0).unwrap();
temp.seek(SeekFrom::Start(0)).unwrap();
println!("{:#?}", file);
// write each file into a temp file, then hash
// mod files can get quite large, so storing them entirely in memory is probably a bad idea
// let mut cursor = Cursor::new(Vec::with_capacity(file.file.mod_size));
// let before = cursor.position();
// println!("before: {}", before);
TtmpExtractor::extract_one_into(&file, &mut data_file, &mut temp).unwrap();
// let after = cursor.position();
// println!("after: {}", after);
// println!("size: {}", after - before);
// let data = cursor.into_inner();
// sha.update(&data);
temp.seek(SeekFrom::Start(0)).unwrap();
@ -39,9 +58,6 @@ pub fn main() {
let hash = hex::encode(&*hash);
let new = !hashes.contains_key(&hash);
let saved = SavedFile {
author: extractor.manifest().author.clone(),
package: extractor.manifest().name.clone(),
package_version: extractor.manifest().version.clone(),
game_path: file.file.full_path.clone(),
group: file.group.map(ToOwned::to_owned),
option: file.option.map(ToOwned::to_owned),
@ -50,9 +66,8 @@ pub fn main() {
if new {
let path = Path::new("files").join(&hash);
std::io::copy(&mut temp, &mut File::create(&path).unwrap()).unwrap();
// std::fs::write(&path, data).unwrap();
println!("writing {}", path.to_string_lossy());
std::io::copy(&mut temp, &mut File::create(&path).unwrap()).unwrap();
}
}
@ -61,9 +76,6 @@ pub fn main() {
#[derive(Debug)]
pub struct SavedFile {
pub author: String,
pub package: String,
pub package_version: String,
pub game_path: String,
pub group: Option<String>,
pub option: Option<String>,

74
examples/repack.rs Normal file
View File

@ -0,0 +1,74 @@
use std::fs::{File, OpenOptions};
use std::io::{BufReader, BufWriter, Cursor, Seek, SeekFrom, Write};
use zip::{CompressionMethod, ZipWriter};
use zip::write::FileOptions;
use ttmp::model::ManifestKind;
use ttmp::mpd_encoder::{FileInfo, MpdEncoder};
use ttmp::ttmp_extractor::TtmpExtractor;
fn main() {
let args: Vec<String> = std::env::args().skip(1).collect();
let ttmp_path = &args[0];
let file = BufReader::new(File::open(ttmp_path).unwrap());
let extractor = TtmpExtractor::new(file).unwrap();
let mut zip = extractor.zip().borrow_mut();
let mut data_file = zip.by_name("TTMPD.mpd").unwrap();
let temp_mpd = OpenOptions::new()
.create(true)
.write(true)
.read(true)
.open("temp.mpd")
.unwrap();
let mut encoder = MpdEncoder::new(temp_mpd, extractor.manifest().clone());
for file in extractor.all_files_sorted() {
let mut data = Cursor::new(Vec::new());
println!("extracting {}", file.file.full_path);
TtmpExtractor::extract_one_into(&file, &mut data_file, &mut data).unwrap();
data.set_position(0);
let file_info = FileInfo {
group: file.group.map(ToOwned::to_owned),
option: file.option.map(ToOwned::to_owned),
game_path: file.file.full_path.clone(),
};
println!("adding {}", file.file.full_path);
if file.file.full_path.ends_with(".mdl") {
encoder.add_model_file(file_info, data.get_ref().len(), &mut data).unwrap();
} else if file.file.full_path.ends_with(".atex") || file.file.full_path.ends_with(".tex") {
encoder.add_texture_file(file_info, data.get_ref().len(), &mut data).unwrap();
} else {
encoder.add_standard_file(file_info, data.get_ref().len(), &mut data).unwrap();
}
}
let (manifest, mut file) = encoder.finalize().unwrap();
file.seek(SeekFrom::Start(0)).unwrap();
let repacked = BufWriter::new(File::create("repacked.ttmp2").unwrap());
let mut zip = ZipWriter::new(repacked);
zip.start_file("TTMPD.mpd", FileOptions::default().compression_method(CompressionMethod::Stored)).unwrap();
std::io::copy(&mut file, &mut zip).unwrap();
zip.start_file("TTMPL.mpl", FileOptions::default().compression_method(CompressionMethod::Deflated)).unwrap();
match manifest {
ManifestKind::V1(mods) => {
for mod_ in mods {
serde_json::to_writer(&mut zip, &mod_).unwrap();
zip.write_all(b"\n").unwrap();
}
}
ManifestKind::V2(pack) => {
serde_json::to_writer(&mut zip, &pack).unwrap();
}
}
zip.finish().unwrap();
}

View File

@ -1,3 +1,5 @@
use std::fs::File;
use std::io::BufWriter;
use thiserror::Error;
pub type Result<T, E = Error> = std::result::Result<T, E>;
@ -16,4 +18,12 @@ pub enum Error {
SqPackError(#[from] sqpack::binrw::Error),
#[error("error writing to output")]
BinRwWrite(sqpack::binrw::Error),
#[error("error parsing input")]
BinRwRead(sqpack::binrw::Error),
#[error("model files with edge geometry are not supported")]
EdgeGeometry,
#[error("a hash was missing during encoding - this is a bug")]
MissingHash,
#[error("error finalising writer")]
BufWriterIntoInner(#[from] std::io::IntoInnerError<BufWriter<File>>),
}

View File

@ -2,7 +2,7 @@ use std::io::{Read, Seek};
use serde::de::Error as _;
use serde::Deserialize;
use serde_json::{Deserializer, StreamDeserializer};
use serde_json::StreamDeserializer;
use serde_json::de::IoRead;
pub use zip::{read::ZipFile, ZipArchive};
@ -14,6 +14,8 @@ pub mod model;
pub mod error;
pub(crate) mod tracking_reader;
pub mod ttmp_extractor;
pub mod mpd_encoder;
pub(crate) mod util;
pub fn from_value(value: serde_json::Value) -> Result<ManifestKind> {
let manifest = if value.is_array() {

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::{ModPack, SimpleMod};
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ManifestKind {
V1(Vec<SimpleMod>),

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::{ModOption, SelectionType};
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase")]
pub struct ModGroup {
pub group_name: String,

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::{SelectionType, SimpleMod};
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase")]
pub struct ModOption {
pub name: String,

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::{ModPackPage, SimpleMod};
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase")]
pub struct ModPack {
pub minimum_framework_version: Option<String>,

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::ModGroup;
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase")]
pub struct ModPackPage {
pub page_index: i32,

View File

@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize};
use crate::model::ModPack;
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "PascalCase")]
pub struct SimpleMod {
pub name: String,

650
src/mpd_encoder.rs Normal file
View File

@ -0,0 +1,650 @@
use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::{BufWriter, Read, Seek, SeekFrom, Write};
use flate2::Compression;
use flate2::write::DeflateEncoder;
use sha3::{Digest, Sha3_384};
use sqpack::{DatBlockHeader, DatStdFileBlockInfos, FileKind, LodBlock, ModelBlock, SqPackFileInfo, SqPackFileInfoHeader};
use sqpack::binrw::{self, BinWriterExt};
use crate::error::{Error, Result};
use crate::model::{ManifestKind, SimpleMod};
use crate::util::{MAX_MODEL_LODS, MAX_TEXTURE_LODS, read_struct};
const ALIGN: usize = 128;
pub struct MpdEncoder {
pub manifest: ManifestKind,
pub writer: BufWriter<File>,
hashes: HashMap<Vec<u8>, HashInfo>,
}
#[derive(Hash, Eq, PartialEq)]
pub struct FileInfo {
pub game_path: String,
pub group: Option<String>,
pub option: Option<String>,
}
struct HashInfo {
pub offset: usize,
pub size: usize,
pub files: HashSet<FileInfo>,
}
impl HashInfo {
pub fn new(offset: usize, size: usize) -> Self {
Self {
offset,
size,
files: Default::default(),
}
}
}
impl MpdEncoder {
const BLOCK_SIZE: usize = 16_000;
pub fn new(writer: File, manifest: ManifestKind) -> Self {
Self {
manifest,
writer: BufWriter::new(writer),
hashes: Default::default(),
}
}
pub fn finalize(mut self) -> Result<(ManifestKind, File)> {
let pos = self.writer.stream_position().map_err(Error::Io)?;
// potentially truncate if necessary
let file = self.writer.into_inner().map_err(Error::BufWriterIntoInner)?;
file.set_len(pos + 1).map_err(Error::Io)?;
// update the manifest
match &mut self.manifest {
ManifestKind::V1(mods) => {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
ManifestKind::V2(pack) => {
if let Some(mods) = &mut pack.simple_mods_list {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
if let Some(pages) = &mut pack.mod_pack_pages {
for page in pages {
for group in &mut page.mod_groups {
for option in &mut group.option_list {
Self::finalize_v1(
&self.hashes,
&mut option.mods_jsons,
Some(group.group_name.clone()),
Some(option.name.clone()),
)?;
}
}
}
}
}
}
Ok((self.manifest, file))
}
fn finalize_v1(hashes: &HashMap<Vec<u8>, HashInfo>, mods: &mut [SimpleMod], group: Option<String>, option: Option<String>) -> Result<()> {
for simple in mods {
let file_info = FileInfo {
game_path: simple.full_path.clone(),
group: group.clone(),
option: option.clone(),
};
let info = hashes.iter()
.find(|(_, info)| info.files.contains(&file_info))
.map(|(_, info)| info)
.ok_or(Error::MissingHash)?;
simple.mod_size = info.size;
simple.mod_offset = info.offset;
}
Ok(())
}
pub fn add_texture_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read) -> Result<()> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawTextureHeader {
_attributes: u32,
_format: u32,
_width: u16,
_height: u16,
_depth: u16,
mip_count: u16,
_lod_offset: [u32; MAX_TEXTURE_LODS],
offset_to_surface: [u32; 13],
}
const HEADER_SIZE: usize = std::mem::size_of::<RawTextureHeader>();
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Sha3_384::default();
// read the texture file's header
let header: RawTextureHeader = read_struct(&mut data, &mut buf)?;
hasher.update(&buf[..HEADER_SIZE]);
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate the header size
let mut sub_blocks_len = 0;
for i in 0..header.mip_count {
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let mip_size = if next == 0 {
size as u32 - offset
} else {
next - offset
};
sub_blocks_len += Self::calculate_blocks_required(mip_size as usize);
}
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<LodBlock>() * header.mip_count as usize
+ std::mem::size_of::<u16>() * sub_blocks_len;
let header_align = ALIGN - (header_size % ALIGN);
// make room for the header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let compressed_offset_base = self.writer.stream_position().map_err(Error::Io)?;
// write the raw texture file's header back
self.writer.write_all(&buf[..HEADER_SIZE]).map_err(Error::Io)?;
let initial_pos = self.writer.stream_position().map_err(Error::Io)?;
let mut lod_blocks = Vec::with_capacity(header.mip_count as usize);
let mut sub_blocks = Vec::with_capacity(header.mip_count as usize);
let mut total_blocks = 0;
for i in 0..header.mip_count {
let before_this_mip = self.writer.stream_position().map_err(Error::Io)?;
let compressed_offset = before_this_mip - compressed_offset_base;
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let infos = if next == 0 {
// read to eof
self.write_blocks(&mut data, &mut hasher)?
} else {
let read = (&mut data).take((next - offset) as u64);
self.write_blocks(read, &mut hasher)?
};
let compressed_size = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>()
+ (std::mem::size_of::<DatStdFileBlockInfos>() * infos.len()) as u32;
let decompressed_size = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
lod_blocks.push(LodBlock {
compressed_offset: compressed_offset as u32,
compressed_size,
decompressed_size,
block_offset: total_blocks,
block_count: infos.len() as u32,
});
total_blocks += infos.len() as u32;
sub_blocks.extend(infos);
}
assert_eq!(sub_blocks_len, sub_blocks.len());
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - initial_pos;
// seek before all the data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
let hash = hasher.finalize();
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(());
}
// write the headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Texture,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME
number_of_blocks: lod_blocks.len() as u32,
}).map_err(Error::BinRwWrite)?;
// write the lod blocks out
for block in &lod_blocks {
self.writer.write_le(block).map_err(Error::BinRwWrite)?;
}
// write the sizes of each sub-block
for info in sub_blocks {
self.writer.write_le(&info.compressed_size).map_err(Error::BinRwWrite)?;
}
// seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(())
}
fn alignment_necessary(size: usize) -> usize {
ALIGN - (size % ALIGN)
}
pub fn add_model_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read) -> Result<()> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawModelHeader {
version: u32,
stack_size: u32,
runtime_size: u32,
vertex_declaration_count: u16,
material_count: u16,
vertex_offset: [u32; MAX_MODEL_LODS],
index_offset: [u32; MAX_MODEL_LODS],
vertex_buffer_size: [u32; MAX_MODEL_LODS],
index_buffer_size: [u32; MAX_MODEL_LODS],
lod_count: u8,
enable_index_buffer_streaming: u8,
enable_edge_geometry: u8,
padding: u8,
}
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Sha3_384::default();
// read the model file's header
let header: RawModelHeader = read_struct(&mut data, &mut buf)?;
if header.enable_edge_geometry > 0 {
return Err(Error::EdgeGeometry);
}
// save the start of the mdl file for later (or maybe this just isn't present in the sqpack)
let mdl_header_bytes = buf[..std::mem::size_of::<RawModelHeader>()].to_vec();
hasher.update(&mdl_header_bytes);
let mut sqpack_header = ModelBlock {
version: header.version,
vertex_declaration_num: header.vertex_declaration_count,
material_num: header.material_count,
num_lods: header.lod_count,
index_buffer_streaming_enabled: header.enable_index_buffer_streaming,
edge_geometry_enabled: header.enable_edge_geometry,
padding: header.padding,
..Default::default()
};
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate size of header
let num_blocks: usize = Self::calculate_blocks_required(header.stack_size as usize)
+ Self::calculate_blocks_required(header.runtime_size as usize)
+ header.index_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>()
+ header.vertex_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>();
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<ModelBlock>()
+ num_blocks * std::mem::size_of::<u16>();
let header_align = ALIGN - (header_size % ALIGN);
// make room for header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let offset_base = self.writer.stream_position().map_err(Error::Io)?;
let mut block_index: u16 = 0;
let mut block_sizes = Vec::with_capacity(num_blocks);
let infos = self.write_blocks((&mut data).take(header.stack_size as u64), &mut hasher)?;
sqpack_header.block_num.stack = infos.len() as u16;
sqpack_header.block_index.stack = 0; // stack will always be block index 0
sqpack_header.offset.stack = 0; // stack is always offset 0
sqpack_header.size.stack = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.stack += Self::alignment_necessary(sqpack_header.size.stack as usize) as u32;
sqpack_header.compressed_size.stack = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// set next section's block index and offset
sqpack_header.block_index.runtime = block_index;
sqpack_header.offset.runtime = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_blocks((&mut data).take(header.runtime_size as u64), &mut hasher)?;
sqpack_header.block_num.runtime = infos.len() as u16;
sqpack_header.size.runtime = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.runtime += Self::alignment_necessary(sqpack_header.size.runtime as usize) as u32;
sqpack_header.compressed_size.runtime = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// ASSUMING MDL IS LAID OUT [(VERTEX, INDEX); 3]
for lod in 0..MAX_MODEL_LODS {
sqpack_header.offset.vertex_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.vertex_offset, &header.vertex_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.vertex_buffer[lod] = block_index;
sqpack_header.block_num.vertex_buffer[lod] = infos.len() as u16;
sqpack_header.size.vertex_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.vertex_buffer[lod] += Self::alignment_necessary(sqpack_header.size.vertex_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.vertex_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
sqpack_header.offset.index_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.index_offset, &header.index_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.index_buffer[lod] = block_index;
sqpack_header.block_num.index_buffer[lod] = infos.len() as u16;
sqpack_header.size.index_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.index_buffer[lod] += Self::alignment_necessary(sqpack_header.size.index_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.index_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
}
// ensure our math was correct
assert_eq!(num_blocks, block_sizes.len());
sqpack_header.number_of_blocks = block_index as u32;
sqpack_header.used_number_of_blocks = block_index as u32;
// store how long all this data we just wrote is
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - offset_base;
// now seek back to before we wrote all this data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// check if we already have a file inserted with this hash
let hash = hasher.finalize();
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(());
}
// write the file header
let file_header = SqPackFileInfoHeader {
kind: FileKind::Model,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
};
self.writer.write_le(&file_header).map_err(Error::BinRwWrite)?;
// write the model header
self.writer.write_le(&sqpack_header).map_err(Error::BinRwWrite)?;
// write out all the block sizes, in order, as u16s
for size in block_sizes {
self.writer.write_le(&size).map_err(Error::BinRwWrite)?;
}
// now seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(())
}
fn write_lod(&mut self, lod: usize, lod_count: u8, offsets: &[u32], sizes: &[u32], mut data: impl Read, hasher: &mut impl Digest) -> Result<Vec<DatStdFileBlockInfos>> {
// only write out the lods we have
if lod_count == 0 || lod > lod_count as usize - 1 {
return Ok(Default::default());
}
let _offset = offsets[lod];
let size = sizes[lod];
let read = (&mut data).take(size as u64);
let infos = self.write_blocks(read, hasher)?;
// let padding = self.align_to(ALIGN)?;
Ok(infos)
}
fn calculate_blocks_required(size: usize) -> usize {
if size == 0 {
return 0;
}
if size <= Self::BLOCK_SIZE {
return 1;
}
let mut num_blocks = size / Self::BLOCK_SIZE;
if size > Self::BLOCK_SIZE * num_blocks {
num_blocks += 1;
}
num_blocks
}
pub fn add_standard_file(&mut self, file_info: FileInfo, size: usize, data: impl Read) -> Result<()> {
// store position before doing anything
let pos = self.writer.stream_position().map_err(Error::Io)?;
// calculate the number of blocks
let num_blocks = Self::calculate_blocks_required(size);
// calculate the header size, then write zeroes where it will be
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<DatStdFileBlockInfos>() * num_blocks;
let header_align = ALIGN - (header_size % ALIGN);
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let after_header = self.writer.stream_position().map_err(Error::Io)?;
// write the data
let mut hasher = Sha3_384::default();
let infos = self.write_blocks(data, &mut hasher)?;
let hash = hasher.finalize();
// ensure we did our math correctly
assert_eq!(num_blocks, infos.len());
let after_blocks = self.writer.stream_position().map_err(Error::Io)?;
let blocks_size = after_blocks - after_header;
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| HashInfo::new(pos as usize, header_size + header_align + blocks_size as usize))
.files
.insert(file_info);
// seek back to before chunks to add headers
self.writer.seek(SeekFrom::Start(pos)).map_err(Error::Io)?;
if contained {
return Ok(());
}
// add headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Standard,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME: seen [0, 0], [2, 2], [3, 3], and [4, 4]
number_of_blocks: infos.len() as u32,
}).map_err(Error::BinRwWrite)?;
for info in &infos {
self.writer.write_le(info).map_err(Error::BinRwWrite)?;
}
// seek past data
self.writer.seek(SeekFrom::Start(after_blocks)).map_err(Error::Io)?;
Ok(())
}
fn align_to(&mut self, n: usize) -> Result<usize> {
let current_pos = self.writer.stream_position().map_err(Error::Io)? as usize;
let bytes_to_pad = n - (current_pos % n);
if bytes_to_pad > 0 {
let zeroes = std::iter::repeat(0)
.take(bytes_to_pad)
.collect::<Vec<u8>>();
// write padding bytes
self.writer.write_all(&zeroes).map_err(Error::Io)?;
}
Ok(bytes_to_pad)
}
fn write_blocks(&mut self, mut data: impl Read, hasher: &mut impl Digest) -> Result<Vec<DatStdFileBlockInfos>> {
let mut total_written = 0;
let mut infos = Vec::new();
// read 16kb chunks and compress them
let mut buf = [0; Self::BLOCK_SIZE];
let mut buf_idx: usize = 0;
'outer: loop {
// read up to 16kb from the data stream
loop {
let size = data.read(&mut buf[buf_idx..]).map_err(Error::Io)?;
if size == 0 {
// end of file
if buf_idx == 0 {
break 'outer;
}
break;
}
buf_idx += size;
}
// update hasher
hasher.update(&buf[..buf_idx]);
let offset = total_written;
// get position before chunk
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// make space for chunk header
self.writer.write_all(&vec![0; std::mem::size_of::<DatBlockHeader>()]).map_err(Error::Io)?;
total_written += std::mem::size_of::<DatBlockHeader>() as u64;
// write compressed chunk to writer
let mut encoder = DeflateEncoder::new(&mut self.writer, Compression::best());
encoder.write_all(&buf[..buf_idx]).map_err(Error::Io)?;
encoder.finish().map_err(Error::Io)?;
// calculate the size of compressed data
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let mut compressed_size = after_data - before_header;
total_written += compressed_size;
// seek back to before header
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// write chunk header
let header = DatBlockHeader {
size: std::mem::size_of::<DatBlockHeader>() as u32,
uncompressed_size: buf_idx as u32,
compressed_size: (compressed_size - std::mem::size_of::<DatBlockHeader>() as u64) as u32,
_unk_0: 0,
};
self.writer.write_le(&header).map_err(Error::BinRwWrite)?;
// seek past chunk
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
// pad to 128 bytes
let padded = self.align_to(ALIGN)?;
// add padding bytes to the compressed size because that's just
// how sqpack do
compressed_size += padded as u64;
// total_written += padded as u64;
infos.push(DatStdFileBlockInfos {
offset: offset as u32,
uncompressed_size: buf_idx as u16,
compressed_size: compressed_size as u16,
});
// end of file was reached
if buf_idx < Self::BLOCK_SIZE {
break;
}
buf_idx = 0;
}
Ok(infos)
}
}

View File

@ -5,7 +5,6 @@ use std::io::{Cursor, Read, Seek, SeekFrom, Write};
use flate2::read::DeflateDecoder;
use sqpack::{DatBlockHeader, DatStdFileBlockInfos, FileKind, LodBlock, ModelBlock, SqPackFileInfo, SqPackFileInfoHeader};
use sqpack::binrw::{BinRead, BinWriterExt, VecArgs};
use sqpack::binrw::meta::ReadEndian;
use zip::ZipArchive;
use crate::Error;
@ -13,6 +12,7 @@ use crate::error::Result;
use crate::model::manifest_kind::ManifestKind;
use crate::model::SimpleMod;
use crate::tracking_reader::TrackingReader;
use crate::util::{MAX_MODEL_LODS, read_struct};
#[doc(hidden)]
pub trait WriteSeek: Write + Seek {}
@ -54,7 +54,7 @@ impl<R: Read + Seek> TtmpExtractor<R> {
let mut buf = [0; 4096];
for mod_file in all_files {
let file = &*mod_file.file;
let file = mod_file.file;
data_file.read = 0;
// get the writer to write this file into
@ -62,7 +62,7 @@ impl<R: Read + Seek> TtmpExtractor<R> {
.map_err(Error::Io)?;
let expected = file.mod_size;
let info: SqPackFileInfoHeader = Self::read_struct(&mut data_file, &mut buf)?;
let info: SqPackFileInfoHeader = read_struct(&mut data_file, &mut buf)?;
match info.kind {
FileKind::Empty => todo!(),
FileKind::Standard => {
@ -131,10 +131,10 @@ impl<R: Read> TtmpExtractor<R> {
pub fn extract_one_into<W: Write + Seek>(mod_file: &ModFile, mut reader: R, mut writer: W) -> Result<()> {
let mut reader = TrackingReader::new(&mut reader);
let mut buf = [0; 4096];
let file = &*mod_file.file;
let file = mod_file.file;
let expected = file.mod_size;
let info: SqPackFileInfoHeader = Self::read_struct(&mut reader, &mut buf)?;
let info: SqPackFileInfoHeader = read_struct(&mut reader, &mut buf)?;
match info.kind {
FileKind::Empty => todo!(),
FileKind::Standard => {
@ -157,9 +157,9 @@ impl<R: Read> TtmpExtractor<R> {
}
fn extract_standard_file<T: Read, W: Write>(info: &SqPackFileInfoHeader, mut data_file: T, mut writer: W, buf: &mut [u8]) -> Result<()> {
let std_info: SqPackFileInfo = Self::read_struct(&mut data_file, buf)?;
let std_info: SqPackFileInfo = read_struct(&mut data_file, buf)?;
let blocks: Vec<DatStdFileBlockInfos> = (0..std_info.number_of_blocks)
.map(|_| Self::read_struct(&mut data_file, buf))
.map(|_| read_struct(&mut data_file, buf))
.collect::<Result<_>>()?;
let skip_amt = info.size as usize
@ -176,7 +176,7 @@ impl<R: Read> TtmpExtractor<R> {
}
fn extract_model_file<T: Read, W: Write + Seek>(info: &SqPackFileInfoHeader, mut reader: T, mut writer: W, buf: &mut [u8]) -> Result<()> {
let model_info: ModelBlock = Self::read_struct(&mut reader, buf)?;
let model_info: ModelBlock = read_struct(&mut reader, buf)?;
let block_counts = &model_info.block_num;
let total_blocks = block_counts.stack
@ -214,14 +214,13 @@ impl<R: Read> TtmpExtractor<R> {
buf,
)?;
const MAX_LODS: usize = 3;
let mut vertex_data_offsets = [0u32; MAX_LODS];
let mut vertex_buffer_sizes = [0u32; MAX_LODS];
let mut vertex_data_offsets = [0u32; MAX_MODEL_LODS];
let mut vertex_buffer_sizes = [0u32; MAX_MODEL_LODS];
let mut index_data_offsets = [0u32; MAX_LODS];
let mut index_buffer_sizes = [0u32; MAX_LODS];
let mut index_data_offsets = [0u32; MAX_MODEL_LODS];
let mut index_buffer_sizes = [0u32; MAX_MODEL_LODS];
for lod_index in 0..MAX_LODS {
for lod_index in 0..MAX_MODEL_LODS {
// Vertex buffer
let block_count = model_info.block_num.vertex_buffer[lod_index];
if block_count != 0 {
@ -293,9 +292,9 @@ impl<R: Read> TtmpExtractor<R> {
}
fn extract_texture_file<T: Read, W: Write>(info: &SqPackFileInfoHeader, mut reader: T, mut writer: W, buf: &mut [u8]) -> Result<()> {
let std_info: SqPackFileInfo = Self::read_struct(&mut reader, buf)?;
let std_info: SqPackFileInfo = read_struct(&mut reader, buf)?;
let blocks: Vec<LodBlock> = (0..std_info.number_of_blocks)
.map(|_| Self::read_struct(&mut reader, buf))
.map(|_| read_struct(&mut reader, buf))
.collect::<Result<_>>()?;
let sub_block_count = blocks
@ -327,21 +326,8 @@ impl<R: Read> TtmpExtractor<R> {
Ok(())
}
fn read_struct<S: BinRead + ReadEndian, T: Read>(mut reader: T, buf: &mut [u8]) -> Result<S>
where S::Args: Default,
{
let size = std::mem::size_of::<S>();
if size > buf.len() {
panic!();
}
reader.read_exact(&mut buf[..size]).map_err(Error::Io)?;
S::read(&mut Cursor::new(&buf[..size]))
.map_err(Error::SqPackError)
}
fn read_block_into<T: Read, W: Write>(mut reader: T, mut writer: W, buf: &mut [u8], size: usize) -> Result<u64> {
let header: DatBlockHeader = Self::read_struct(&mut reader, buf)?;
let header: DatBlockHeader = read_struct(&mut reader, buf)?;
let (read, actual) = if header.compressed_size == 32_000 {
// uncompressed

22
src/util.rs Normal file
View File

@ -0,0 +1,22 @@
use std::io::{Cursor, Read};
use sqpack::binrw::BinRead;
use sqpack::binrw::meta::ReadEndian;
use crate::error::{Error, Result};
pub const MAX_MODEL_LODS: usize = 3;
pub const MAX_TEXTURE_LODS: usize = 3;
pub fn read_struct<S: BinRead + ReadEndian, T: Read>(mut reader: T, buf: &mut [u8]) -> Result<S>
where S::Args: Default,
{
let size = std::mem::size_of::<S>();
if size > buf.len() {
panic!();
}
reader.read_exact(&mut buf[..size]).map_err(Error::Io)?;
S::read(&mut Cursor::new(&buf[..size]))
.map_err(Error::SqPackError)
}