ttmp-rs/src/mpd_encoder.rs

830 lines
31 KiB
Rust

use std::collections::{HashMap, HashSet};
use std::fs::File;
use std::io::{BufWriter, Cursor, Read, Seek, SeekFrom, Write};
use std::sync::{Arc, Condvar, Mutex};
use blake3::Hasher as Blake3;
use blake3::traits::digest::Digest;
use crossbeam_channel::{Receiver, Sender};
use flate2::Compression;
use flate2::write::DeflateEncoder;
use sqpack::{DatBlockHeader, DatStdFileBlockInfos, FileKind, LodBlock, ModelBlock, SqPackFileInfo, SqPackFileInfoHeader};
use sqpack::binrw::{self, BinWriterExt};
use crate::error::{Error, Result};
use crate::model::{ManifestKind, SimpleMod};
use crate::util::{MAX_MODEL_LODS, MAX_TEXTURE_LODS, read_struct};
const ALIGN: usize = 128;
pub struct MpdEncoder {
pub manifest: ManifestKind,
pub writer: BufWriter<File>,
pub compression_level: u32,
hashes: HashMap<Vec<u8>, HashInfo>,
pub to_pool: Sender<(usize, Vec<u8>)>,
pub from_pool: Receiver<Result<(Vec<u8>, usize), std::io::Error>>,
pub current_chunk: Arc<(Condvar, Mutex<usize>)>,
}
#[derive(Hash, Eq, PartialEq)]
pub struct FileInfo {
pub game_path: String,
pub group: Option<String>,
pub option: Option<String>,
}
struct HashInfo {
pub offset: usize,
pub size: usize,
pub files: HashSet<FileInfo>,
}
impl HashInfo {
pub fn new(offset: usize, size: usize) -> Self {
Self {
offset,
size,
files: Default::default(),
}
}
}
impl MpdEncoder {
const BLOCK_SIZE: usize = 16_000;
pub fn new(writer: File, manifest: ManifestKind, num_threads: impl Into<Option<usize>>) -> Self {
Self::with_compression_level(writer, manifest, num_threads, 9)
}
pub fn with_compression_level(writer: File, manifest: ManifestKind, num_threads: impl Into<Option<usize>>, compression_level: u32) -> Self {
let num_threads = num_threads.into().unwrap_or_else(num_cpus::get);
let (to_pool_tx, to_pool_rx) = crossbeam_channel::bounded(0);
let (from_pool_tx, from_pool_rx) = crossbeam_channel::bounded(0);
let current_chunk = Arc::new((Condvar::new(), Mutex::new(0)));
for _ in 0..num_threads {
let to_pool_rx = to_pool_rx.clone();
let from_pool_tx = from_pool_tx.clone();
let current_chunk = Arc::clone(&current_chunk);
std::thread::spawn(move || {
loop {
// println!("[{i}] waiting");
// receive a chunk of data
let (idx, data): (usize, Vec<u8>) = match to_pool_rx.recv() {
Ok(data) => data,
Err(_) => break,
};
// println!("[{i}] got a chunk!");
// compress it in memory
let cursor = Cursor::new(Vec::with_capacity(data.len()));
let mut encoder = DeflateEncoder::new(cursor, Compression::new(compression_level));
let res = encoder.write_all(&data)
.and_then(|_| encoder.finish())
.map(|cursor| (cursor.into_inner(), data.len()));
// println!("[{i}] locking");
let (cvar, lock) = &*current_chunk;
let mut current = lock.lock().unwrap();
while *current != idx {
// println!("[{i}] {} != {idx}", *current);
current = cvar.wait(current).unwrap();
}
// drop the lock, lest we deadlock while blocking on send
drop(current);
// send back the result containing compressed data
// println!("[{i}] sending");
from_pool_tx.send(res).ok();
// println!("[{i}] sent");
}
});
}
Self {
manifest,
writer: BufWriter::new(writer),
compression_level,
hashes: Default::default(),
to_pool: to_pool_tx,
from_pool: from_pool_rx,
current_chunk,
}
}
pub fn finalize(mut self) -> Result<(ManifestKind, File)> {
let pos = self.writer.stream_position().map_err(Error::Io)?;
// potentially truncate if necessary
let file = self.writer.into_inner().map_err(Error::BufWriterIntoInner)?;
file.set_len(pos + 1).map_err(Error::Io)?;
// update the manifest
match &mut self.manifest {
ManifestKind::V1(mods) => {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
ManifestKind::V2(pack) => {
if let Some(mods) = &mut pack.simple_mods_list {
Self::finalize_v1(&self.hashes, mods, None, None)?;
}
if let Some(pages) = &mut pack.mod_pack_pages {
for page in pages {
for group in &mut page.mod_groups {
for option in &mut group.option_list {
Self::finalize_v1(
&self.hashes,
&mut option.mods_jsons,
Some(group.group_name.clone()),
Some(option.name.clone()),
)?;
}
}
}
}
}
}
Ok((self.manifest, file))
}
fn finalize_v1(hashes: &HashMap<Vec<u8>, HashInfo>, mods: &mut [SimpleMod], group: Option<String>, option: Option<String>) -> Result<()> {
for simple in mods {
let file_info = FileInfo {
game_path: simple.full_path.clone(),
group: group.clone(),
option: option.clone(),
};
let info = hashes.iter()
.find(|(_, info)| info.files.contains(&file_info))
.map(|(_, info)| info)
.ok_or(Error::MissingHash)?;
simple.mod_size = info.size;
simple.mod_offset = info.offset;
}
Ok(())
}
pub fn add_file_info(&mut self, hash: &[u8], file_info: FileInfo) {
self.hashes.entry(hash.to_vec())
.and_modify(|info| {
info.files.insert(file_info);
});
}
pub fn add_texture_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read + Send) -> Result<Vec<u8>> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawTextureHeader {
_attributes: u32,
_format: u32,
_width: u16,
_height: u16,
_depth: u16,
mip_count: u16,
_lod_offset: [u32; MAX_TEXTURE_LODS],
offset_to_surface: [u32; 13],
}
const HEADER_SIZE: usize = std::mem::size_of::<RawTextureHeader>();
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Blake3::default();
// read the texture file's header
let header: RawTextureHeader = read_struct(&mut data, &mut buf)?;
hasher.update(&buf[..HEADER_SIZE]);
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate the header size
let mut sub_blocks_len = 0;
for i in 0..header.mip_count {
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let mip_size = if next == 0 {
size as u32 - offset.min(size as u32)
} else {
next - offset
};
sub_blocks_len += Self::calculate_blocks_required(mip_size as usize);
}
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<LodBlock>() * header.mip_count as usize
+ std::mem::size_of::<u16>() * sub_blocks_len;
let header_align = ALIGN - (header_size % ALIGN);
// make room for the header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let compressed_offset_base = self.writer.stream_position().map_err(Error::Io)?;
// write the raw texture file's header back
self.writer.write_all(&buf[..HEADER_SIZE]).map_err(Error::Io)?;
let initial_pos = self.writer.stream_position().map_err(Error::Io)?;
let mut lod_blocks = Vec::with_capacity(header.mip_count as usize);
let mut sub_blocks = Vec::with_capacity(header.mip_count as usize);
let mut total_blocks = 0;
for i in 0..header.mip_count {
let before_this_mip = self.writer.stream_position().map_err(Error::Io)?;
let compressed_offset = before_this_mip - compressed_offset_base;
let offset = header.offset_to_surface[i as usize];
let next = if i < 12 {
header.offset_to_surface[(i + 1) as usize]
} else {
0
};
let infos = if next == 0 {
// read to eof
self.write_blocks(&mut data, &mut hasher)?
} else {
let read = (&mut data).take((next - offset) as u64);
self.write_blocks(read, &mut hasher)?
};
let compressed_size = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>()
+ (std::mem::size_of::<DatStdFileBlockInfos>() * infos.len()) as u32;
let decompressed_size = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
if compressed_size == 0 || decompressed_size == 0 {
continue;
}
lod_blocks.push(LodBlock {
compressed_offset: compressed_offset as u32,
compressed_size,
decompressed_size,
block_offset: total_blocks,
block_count: infos.len() as u32,
});
total_blocks += infos.len() as u32;
sub_blocks.extend(infos);
}
// FIXME: fails on minions defined
assert_eq!(sub_blocks_len, sub_blocks.len());
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - initial_pos;
// seek before all the data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
let hash = hasher.finalize();
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(hash.to_vec());
}
// write the headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Texture,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME
number_of_blocks: lod_blocks.len() as u32,
}).map_err(Error::BinRwWrite)?;
// write the lod blocks out
for block in &lod_blocks {
self.writer.write_le(block).map_err(Error::BinRwWrite)?;
}
// write the sizes of each sub-block
for info in sub_blocks {
self.writer.write_le(&info.compressed_size).map_err(Error::BinRwWrite)?;
}
// seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(hash.to_vec())
}
fn alignment_necessary(size: usize) -> usize {
ALIGN - (size % ALIGN)
}
pub fn add_model_file(&mut self, file_info: FileInfo, size: usize, mut data: impl Read + Send) -> Result<Vec<u8>> {
#[derive(binrw::BinRead)]
#[br(little)]
struct RawModelHeader {
version: u32,
stack_size: u32,
runtime_size: u32,
vertex_declaration_count: u16,
material_count: u16,
vertex_offset: [u32; MAX_MODEL_LODS],
index_offset: [u32; MAX_MODEL_LODS],
vertex_buffer_size: [u32; MAX_MODEL_LODS],
index_buffer_size: [u32; MAX_MODEL_LODS],
lod_count: u8,
enable_index_buffer_streaming: u8,
enable_edge_geometry: u8,
padding: u8,
}
let mut buf = [0; Self::BLOCK_SIZE];
let mut hasher = Blake3::default();
// read the model file's header
let header: RawModelHeader = read_struct(&mut data, &mut buf)?;
if header.enable_edge_geometry > 0 {
return Err(Error::EdgeGeometry);
}
// save the start of the mdl file for later (or maybe this just isn't present in the sqpack)
let mdl_header_bytes = buf[..std::mem::size_of::<RawModelHeader>()].to_vec();
hasher.update(&mdl_header_bytes);
let mut sqpack_header = ModelBlock {
version: header.version,
vertex_declaration_num: header.vertex_declaration_count,
material_num: header.material_count,
num_lods: header.lod_count,
index_buffer_streaming_enabled: header.enable_index_buffer_streaming,
edge_geometry_enabled: header.enable_edge_geometry,
padding: header.padding,
..Default::default()
};
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// calculate size of header
let num_blocks: usize = Self::calculate_blocks_required(header.stack_size as usize)
+ Self::calculate_blocks_required(header.runtime_size as usize)
+ header.index_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>()
+ header.vertex_buffer_size
.iter()
.take(header.lod_count as usize)
.map(|&size| Self::calculate_blocks_required(size as usize))
.sum::<usize>();
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<ModelBlock>()
+ num_blocks * std::mem::size_of::<u16>();
let header_align = ALIGN - (header_size % ALIGN);
// make room for header
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let offset_base = self.writer.stream_position().map_err(Error::Io)?;
let mut block_index: u16 = 0;
let mut block_sizes = Vec::with_capacity(num_blocks);
let infos = self.write_blocks((&mut data).take(header.stack_size as u64), &mut hasher)?;
sqpack_header.block_num.stack = infos.len() as u16;
sqpack_header.block_index.stack = 0; // stack will always be block index 0
sqpack_header.offset.stack = 0; // stack is always offset 0
sqpack_header.size.stack = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.stack += Self::alignment_necessary(sqpack_header.size.stack as usize) as u32;
sqpack_header.compressed_size.stack = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// set next section's block index and offset
sqpack_header.block_index.runtime = block_index;
sqpack_header.offset.runtime = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_blocks((&mut data).take(header.runtime_size as u64), &mut hasher)?;
sqpack_header.block_num.runtime = infos.len() as u16;
sqpack_header.size.runtime = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.runtime += Self::alignment_necessary(sqpack_header.size.runtime as usize) as u32;
sqpack_header.compressed_size.runtime = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
// ASSUMING MDL IS LAID OUT [(VERTEX, INDEX); 3]
for lod in 0..MAX_MODEL_LODS {
sqpack_header.offset.vertex_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.vertex_offset, &header.vertex_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.vertex_buffer[lod] = block_index;
sqpack_header.block_num.vertex_buffer[lod] = infos.len() as u16;
sqpack_header.size.vertex_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.vertex_buffer[lod] += Self::alignment_necessary(sqpack_header.size.vertex_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.vertex_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
sqpack_header.offset.index_buffer[lod] = (self.writer.stream_position().map_err(Error::Io)? - offset_base) as u32;
let infos = self.write_lod(lod, header.lod_count, &header.index_offset, &header.index_buffer_size, &mut data, &mut hasher)?;
sqpack_header.block_index.index_buffer[lod] = block_index;
sqpack_header.block_num.index_buffer[lod] = infos.len() as u16;
sqpack_header.size.index_buffer[lod] = infos.iter()
.map(|info| info.uncompressed_size as u32)
.sum();
sqpack_header.size.index_buffer[lod] += Self::alignment_necessary(sqpack_header.size.index_buffer[lod] as usize) as u32;
sqpack_header.compressed_size.index_buffer[lod] = infos.iter()
.map(|info| info.compressed_size as u32)
.sum::<u32>(); // + padding as u32;
block_index += infos.len() as u16;
block_sizes.extend(infos.iter().map(|x| x.compressed_size));
}
// ensure our math was correct
assert_eq!(num_blocks, block_sizes.len());
sqpack_header.number_of_blocks = block_index as u32;
sqpack_header.used_number_of_blocks = block_index as u32;
// store how long all this data we just wrote is
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let data_len = after_data - offset_base;
// now seek back to before we wrote all this data
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// check if we already have a file inserted with this hash
let hash = hasher.finalize();
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| {
let total_size = header_size + header_align + data_len as usize;
HashInfo::new(before_header as usize, total_size)
})
.files
.insert(file_info);
if contained {
return Ok(hash.to_vec());
}
// write the file header
let file_header = SqPackFileInfoHeader {
kind: FileKind::Model,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
};
self.writer.write_le(&file_header).map_err(Error::BinRwWrite)?;
// write the model header
self.writer.write_le(&sqpack_header).map_err(Error::BinRwWrite)?;
// write out all the block sizes, in order, as u16s
for size in block_sizes {
self.writer.write_le(&size).map_err(Error::BinRwWrite)?;
}
// now seek past the data
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
Ok(hash.to_vec())
}
fn write_lod(&mut self, lod: usize, lod_count: u8, offsets: &[u32], sizes: &[u32], mut data: impl Read + Send, hasher: &mut (impl Digest + Send)) -> Result<Vec<DatStdFileBlockInfos>> {
// only write out the lods we have
if lod_count == 0 || lod > lod_count as usize - 1 {
return Ok(Default::default());
}
let _offset = offsets[lod];
let size = sizes[lod];
let read = (&mut data).take(size as u64);
let infos = self.write_blocks(read, hasher)?;
// let padding = self.align_to(ALIGN)?;
Ok(infos)
}
fn calculate_blocks_required(size: usize) -> usize {
if size == 0 {
return 0;
}
if size <= Self::BLOCK_SIZE {
return 1;
}
let mut num_blocks = size / Self::BLOCK_SIZE;
if size > Self::BLOCK_SIZE * num_blocks {
num_blocks += 1;
}
num_blocks
}
pub fn add_standard_file(&mut self, file_info: FileInfo, size: usize, data: impl Read + Send) -> Result<Vec<u8>> {
// store position before doing anything
let pos = self.writer.stream_position().map_err(Error::Io)?;
// calculate the number of blocks
let num_blocks = Self::calculate_blocks_required(size);
// calculate the header size, then write zeroes where it will be
let header_size = std::mem::size_of::<SqPackFileInfoHeader>()
+ std::mem::size_of::<SqPackFileInfo>()
+ std::mem::size_of::<DatStdFileBlockInfos>() * num_blocks;
let header_align = ALIGN - (header_size % ALIGN);
self.writer.write_all(&vec![0; header_size + header_align]).map_err(Error::Io)?;
let after_header = self.writer.stream_position().map_err(Error::Io)?;
// write the data
let mut hasher = Blake3::default();
let infos = self.write_blocks(data, &mut hasher)?;
let hash = hasher.finalize();
// ensure we did our math correctly
assert_eq!(num_blocks, infos.len());
let after_blocks = self.writer.stream_position().map_err(Error::Io)?;
let blocks_size = after_blocks - after_header;
// check if we already have a file inserted with this hash
let contained = self.hashes.contains_key(&*hash);
self.hashes.entry(hash.to_vec())
.or_insert_with(|| HashInfo::new(pos as usize, header_size + header_align + blocks_size as usize))
.files
.insert(file_info);
// seek back to before chunks to add headers
self.writer.seek(SeekFrom::Start(pos)).map_err(Error::Io)?;
if contained {
return Ok(hash.to_vec());
}
// add headers
self.writer.write_le(&SqPackFileInfoHeader {
kind: FileKind::Standard,
size: (header_size + header_align) as u32,
raw_file_size: size as u32,
}).map_err(Error::BinRwWrite)?;
self.writer.write_le(&SqPackFileInfo {
_unk_0: [0, 0], // FIXME: seen [0, 0], [2, 2], [3, 3], and [4, 4]
number_of_blocks: infos.len() as u32,
}).map_err(Error::BinRwWrite)?;
for info in &infos {
self.writer.write_le(info).map_err(Error::BinRwWrite)?;
}
// seek past data
self.writer.seek(SeekFrom::Start(after_blocks)).map_err(Error::Io)?;
Ok(hash.to_vec())
}
fn align_to(&mut self, n: usize) -> Result<usize> {
let current_pos = self.writer.stream_position().map_err(Error::Io)? as usize;
let bytes_to_pad = n - (current_pos % n);
if bytes_to_pad > 0 {
let zeroes = std::iter::repeat(0)
.take(bytes_to_pad)
.collect::<Vec<u8>>();
// write padding bytes
self.writer.write_all(&zeroes).map_err(Error::Io)?;
}
Ok(bytes_to_pad)
}
fn write_blocks(&mut self, mut data: impl Read + Send, hasher: &mut (impl Digest + Send)) -> Result<Vec<DatStdFileBlockInfos>> {
let mut total_written = 0;
let mut infos = Vec::new();
// in order to make encoding faster, we have a threadpool waiting to do
// compression jobs
// we'll read out 16kb chunks here, then send them to threadpool to be
// compressed
// the threadpool will send them back, and we'll write out the results
// in order
// set the current chunk back to 0
{
// println!("[main] locking 1");
*self.current_chunk.1.lock().unwrap() = 0;
// println!("[main] done 1");
}
let (finished_tx, finished_rx) = crossbeam_channel::bounded(0);
// spawn a thread to read the data and send it
// println!("[main] spawning scoped thread");
let infos = crossbeam_utils::thread::scope(|s| {
let handle = s.spawn(|_| {
// println!("[scoped] started");
// read 16kb chunks and compress them
let mut chunk_idx = 0;
let mut buf = [0; Self::BLOCK_SIZE];
let mut buf_idx: usize = 0;
'outer: loop {
// read up to 16kb from the data stream
loop {
let size = data.read(&mut buf[buf_idx..]).map_err(Error::Io).unwrap();
if size == 0 {
// end of file
if buf_idx == 0 {
break 'outer;
}
break;
}
buf_idx += size;
}
// update hasher
hasher.update(&buf[..buf_idx]);
// send the data to be compressed
// // println!("[scoped] sending data to pool");
self.to_pool.send((chunk_idx, buf[..buf_idx].to_vec())).ok();
// println!("[scoped] sent");
chunk_idx += 1;
buf_idx = 0;
}
// println!("[scoped] sending finished msg");
finished_tx.send(chunk_idx).ok();
// println!("[scoped] done");
});
let mut num_chunks = None;
// receive the compressed chunks
loop {
if let Some(chunks) = num_chunks {
// println!("[main] locking 2");
if *self.current_chunk.1.lock().unwrap() >= chunks {
// println!("[main] done 2 (break)");
break;
}
// println!("[main] done 2");
}
// println!("[main] notifying all");
let (cvar, _) = &*self.current_chunk;
cvar.notify_all();
// println!("[main] waiting for data");
let (data, uncompressed_size) = crossbeam_channel::select! {
recv(self.from_pool) -> x => x,
recv(finished_rx) -> chunks => {
let chunks = chunks.unwrap();
num_chunks = Some(chunks);
continue;
}
}.unwrap().map_err(Error::Io)?;
// println!("[main] got data");
// println!("[main] a");
let offset = total_written;
// get position before chunk
let before_header = self.writer.stream_position().map_err(Error::Io)?;
// println!("[main] b");
// make space for chunk header
self.writer.write_all(&vec![0; std::mem::size_of::<DatBlockHeader>()]).map_err(Error::Io)?;
total_written += std::mem::size_of::<DatBlockHeader>() as u64;
// println!("[main] c");
// write compressed chunk to writer
self.writer.write_all(&data).map_err(Error::Io)?;
// println!("[main] d");
// calculate the size of compressed data
let after_data = self.writer.stream_position().map_err(Error::Io)?;
let mut compressed_size = after_data - before_header;
total_written += compressed_size;
// println!("[main] e");
// seek back to before header
self.writer.seek(SeekFrom::Start(before_header)).map_err(Error::Io)?;
// println!("[main] f");
// write chunk header
let header = DatBlockHeader {
size: std::mem::size_of::<DatBlockHeader>() as u32,
uncompressed_size: uncompressed_size as u32,
compressed_size: (compressed_size - std::mem::size_of::<DatBlockHeader>() as u64) as u32,
_unk_0: 0,
};
self.writer.write_le(&header).map_err(Error::BinRwWrite)?;
// println!("[main] g");
// seek past chunk
self.writer.seek(SeekFrom::Start(after_data)).map_err(Error::Io)?;
// println!("[main] h");
// pad to 128 bytes
let padded = {
let current_pos = self.writer.stream_position().map_err(Error::Io)? as usize;
let bytes_to_pad = 128 - (current_pos % 128);
if bytes_to_pad > 0 {
let zeroes = std::iter::repeat(0)
.take(bytes_to_pad)
.collect::<Vec<u8>>();
// write padding bytes
self.writer.write_all(&zeroes).map_err(Error::Io)?;
}
bytes_to_pad
};
// add padding bytes to the compressed size because that's just
// how sqpack do
compressed_size += padded as u64;
// total_written += padded as u64;
infos.push(DatStdFileBlockInfos {
offset: offset as u32,
uncompressed_size: uncompressed_size as u16,
compressed_size: compressed_size as u16,
});
// println!("[main] i");
// end of file was reached
if uncompressed_size < Self::BLOCK_SIZE {
// println!("[main] i (break)");
break;
}
// println!("[main] j");
// println!("[main] locking 3");
let (_, lock) = &*self.current_chunk;
*lock.lock().unwrap() += 1;
// println!("[main] done 3");
}
// at this point, we no longer care about receiving the finished
// message, so drop the receiver. the scoped thread can hang waiting
// to send the message if this isn't done
drop(finished_rx);
// println!("[main] waiting on scoped handle");
handle.join().unwrap();
// println!("[main] joined");
Ok(infos)
}).unwrap();
// println!("[main] returnin'");
infos
}
}