mirror of
https://github.com/xavo95/repak.git
synced 2025-01-18 19:04:07 +00:00
Fix writing compressed encoded entries
This commit is contained in:
parent
8fddd3961f
commit
0e05acadb0
2 changed files with 52 additions and 54 deletions
|
@ -44,7 +44,7 @@ pub struct Entry {
|
||||||
pub hash: Option<[u8; 20]>,
|
pub hash: Option<[u8; 20]>,
|
||||||
pub blocks: Option<Vec<Block>>,
|
pub blocks: Option<Vec<Block>>,
|
||||||
pub encrypted: bool,
|
pub encrypted: bool,
|
||||||
pub block_uncompressed: Option<u32>,
|
pub compression_block_size: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Entry {
|
impl Entry {
|
||||||
|
@ -112,10 +112,11 @@ impl Entry {
|
||||||
},
|
},
|
||||||
encrypted: version.version_major() >= VersionMajor::CompressionEncryption
|
encrypted: version.version_major() >= VersionMajor::CompressionEncryption
|
||||||
&& reader.read_bool()?,
|
&& reader.read_bool()?,
|
||||||
block_uncompressed: match version.version_major() >= VersionMajor::CompressionEncryption
|
compression_block_size: match version.version_major()
|
||||||
|
>= VersionMajor::CompressionEncryption
|
||||||
{
|
{
|
||||||
true => Some(reader.read_u32::<LE>()?),
|
true => reader.read_u32::<LE>()?,
|
||||||
false => None,
|
false => 0,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -127,7 +128,10 @@ impl Entry {
|
||||||
location: EntryLocation,
|
location: EntryLocation,
|
||||||
) -> Result<(), super::Error> {
|
) -> Result<(), super::Error> {
|
||||||
if version >= super::Version::V10 && location == EntryLocation::Index {
|
if version >= super::Version::V10 && location == EntryLocation::Index {
|
||||||
let compression_block_size = self.block_uncompressed.unwrap_or_default();
|
let mut compression_block_size = (self.compression_block_size >> 11) & 0x3f;
|
||||||
|
if (compression_block_size << 11) != self.compression_block_size {
|
||||||
|
compression_block_size = 0x3f;
|
||||||
|
}
|
||||||
let compression_blocks_count = if self.compression != Compression::None {
|
let compression_blocks_count = if self.compression != Compression::None {
|
||||||
self.blocks.as_ref().unwrap().len() as u32
|
self.blocks.as_ref().unwrap().len() as u32
|
||||||
} else {
|
} else {
|
||||||
|
@ -147,6 +151,10 @@ impl Entry {
|
||||||
|
|
||||||
writer.write_u32::<LE>(flags)?;
|
writer.write_u32::<LE>(flags)?;
|
||||||
|
|
||||||
|
if compression_block_size == 0x3f {
|
||||||
|
writer.write_u32::<LE>(self.compression_block_size)?;
|
||||||
|
}
|
||||||
|
|
||||||
if is_offset_32_bit_safe {
|
if is_offset_32_bit_safe {
|
||||||
writer.write_u32::<LE>(self.offset as u32)?;
|
writer.write_u32::<LE>(self.offset as u32)?;
|
||||||
} else {
|
} else {
|
||||||
|
@ -168,7 +176,7 @@ impl Entry {
|
||||||
|
|
||||||
assert!(self.blocks.is_some());
|
assert!(self.blocks.is_some());
|
||||||
let blocks = self.blocks.as_ref().unwrap();
|
let blocks = self.blocks.as_ref().unwrap();
|
||||||
if blocks.len() > 1 || (blocks.len() == 1 && self.encrypted) {
|
if blocks.len() > 1 && !(blocks.len() == 1 && !self.encrypted) {
|
||||||
for b in blocks {
|
for b in blocks {
|
||||||
let block_size = b.end - b.start;
|
let block_size = b.end - b.start;
|
||||||
writer.write_u64::<LE>(block_size)?
|
writer.write_u64::<LE>(block_size)?
|
||||||
|
@ -211,7 +219,7 @@ impl Entry {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writer.write_bool(self.encrypted)?;
|
writer.write_bool(self.encrypted)?;
|
||||||
writer.write_u32::<LE>(self.block_uncompressed.unwrap_or_default())?;
|
writer.write_u32::<LE>(self.compression_block_size)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -230,12 +238,12 @@ impl Entry {
|
||||||
|
|
||||||
let encrypted = (bits & (1 << 22)) != 0;
|
let encrypted = (bits & (1 << 22)) != 0;
|
||||||
let compression_block_count: u32 = (bits >> 6) & 0xffff;
|
let compression_block_count: u32 = (bits >> 6) & 0xffff;
|
||||||
let mut block_uncompressed = bits & 0x3f;
|
let mut compression_block_size = bits & 0x3f;
|
||||||
|
|
||||||
if block_uncompressed == 0x3f {
|
if compression_block_size == 0x3f {
|
||||||
block_uncompressed = reader.read_u32::<LE>()?;
|
compression_block_size = reader.read_u32::<LE>()?;
|
||||||
} else {
|
} else {
|
||||||
block_uncompressed <<= 11;
|
compression_block_size <<= 11;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut var_int = |bit: u32| -> Result<_, super::Error> {
|
let mut var_int = |bit: u32| -> Result<_, super::Error> {
|
||||||
|
@ -253,14 +261,6 @@ impl Entry {
|
||||||
_ => var_int(29)?,
|
_ => var_int(29)?,
|
||||||
};
|
};
|
||||||
|
|
||||||
block_uncompressed = if compression_block_count == 0 {
|
|
||||||
0
|
|
||||||
} else if uncompressed < block_uncompressed.into() {
|
|
||||||
uncompressed.try_into().unwrap()
|
|
||||||
} else {
|
|
||||||
block_uncompressed
|
|
||||||
};
|
|
||||||
|
|
||||||
let offset_base =
|
let offset_base =
|
||||||
match version.version_major() >= VersionMajor::RelativeChunkOffsets {
|
match version.version_major() >= VersionMajor::RelativeChunkOffsets {
|
||||||
true => 0,
|
true => 0,
|
||||||
|
@ -303,7 +303,7 @@ impl Entry {
|
||||||
hash: None,
|
hash: None,
|
||||||
blocks,
|
blocks,
|
||||||
encrypted,
|
encrypted,
|
||||||
block_uncompressed: Some(block_uncompressed),
|
compression_block_size,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -162,7 +162,7 @@ impl<W: Write + Seek> PakWriter<W> {
|
||||||
hash: Some(hasher.finalize().into()),
|
hash: Some(hasher.finalize().into()),
|
||||||
blocks: None,
|
blocks: None,
|
||||||
encrypted: false,
|
encrypted: false,
|
||||||
block_uncompressed: None,
|
compression_block_size: 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
entry.write(
|
entry.write(
|
||||||
|
@ -345,6 +345,20 @@ impl Pak {
|
||||||
index_writer.write_u32::<LE>(record_count)?;
|
index_writer.write_u32::<LE>(record_count)?;
|
||||||
index_writer.write_u64::<LE>(path_hash_seed)?;
|
index_writer.write_u64::<LE>(path_hash_seed)?;
|
||||||
|
|
||||||
|
let (encoded_entries, offsets) = {
|
||||||
|
let mut offsets = Vec::with_capacity(self.index.entries.len());
|
||||||
|
let mut encoded_entries = io::Cursor::new(vec![]);
|
||||||
|
for entry in self.index.entries.values() {
|
||||||
|
offsets.push(encoded_entries.get_ref().len() as u32);
|
||||||
|
entry.write(
|
||||||
|
&mut encoded_entries,
|
||||||
|
self.version,
|
||||||
|
super::entry::EntryLocation::Index,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
(encoded_entries.into_inner(), offsets)
|
||||||
|
};
|
||||||
|
|
||||||
// The index is organized sequentially as:
|
// The index is organized sequentially as:
|
||||||
// - Index Header, which contains:
|
// - Index Header, which contains:
|
||||||
// - Mount Point (u32 len + string w/ terminating byte)
|
// - Mount Point (u32 len + string w/ terminating byte)
|
||||||
|
@ -377,11 +391,7 @@ impl Pak {
|
||||||
size += 4; // has full directory index (since we're generating, always true)
|
size += 4; // has full directory index (since we're generating, always true)
|
||||||
size += 8 + 8 + 20; // full directory index offset, size and hash
|
size += 8 + 8 + 20; // full directory index offset, size and hash
|
||||||
size += 4; // encoded entry size
|
size += 4; // encoded entry size
|
||||||
size += self.index.entries.len() as u64 * {
|
size += encoded_entries.len() as u64;
|
||||||
4 // flags
|
|
||||||
+ 4 // offset
|
|
||||||
+ 4 // size
|
|
||||||
};
|
|
||||||
size += 4; // unused file count
|
size += 4; // unused file count
|
||||||
size
|
size
|
||||||
};
|
};
|
||||||
|
@ -390,13 +400,18 @@ impl Pak {
|
||||||
|
|
||||||
let mut phi_buf = vec![];
|
let mut phi_buf = vec![];
|
||||||
let mut phi_writer = io::Cursor::new(&mut phi_buf);
|
let mut phi_writer = io::Cursor::new(&mut phi_buf);
|
||||||
generate_path_hash_index(&mut phi_writer, path_hash_seed, &self.index.entries)?;
|
generate_path_hash_index(
|
||||||
|
&mut phi_writer,
|
||||||
|
path_hash_seed,
|
||||||
|
&self.index.entries,
|
||||||
|
&offsets,
|
||||||
|
)?;
|
||||||
|
|
||||||
let full_directory_index_offset = path_hash_index_offset + phi_buf.len() as u64;
|
let full_directory_index_offset = path_hash_index_offset + phi_buf.len() as u64;
|
||||||
|
|
||||||
let mut fdi_buf = vec![];
|
let mut fdi_buf = vec![];
|
||||||
let mut fdi_writer = io::Cursor::new(&mut fdi_buf);
|
let mut fdi_writer = io::Cursor::new(&mut fdi_buf);
|
||||||
generate_full_directory_index(&mut fdi_writer, &self.index.entries)?;
|
generate_full_directory_index(&mut fdi_writer, &self.index.entries, &offsets)?;
|
||||||
|
|
||||||
index_writer.write_u32::<LE>(1)?; // we have path hash index
|
index_writer.write_u32::<LE>(1)?; // we have path hash index
|
||||||
index_writer.write_u64::<LE>(path_hash_index_offset)?;
|
index_writer.write_u64::<LE>(path_hash_index_offset)?;
|
||||||
|
@ -408,16 +423,8 @@ impl Pak {
|
||||||
index_writer.write_u64::<LE>(fdi_buf.len() as u64)?; // path hash index size
|
index_writer.write_u64::<LE>(fdi_buf.len() as u64)?; // path hash index size
|
||||||
index_writer.write_all(&hash(&fdi_buf))?;
|
index_writer.write_all(&hash(&fdi_buf))?;
|
||||||
|
|
||||||
let encoded_entries_size = self.index.entries.len() as u32 * ENCODED_ENTRY_SIZE;
|
index_writer.write_u32::<LE>(encoded_entries.len() as u32)?;
|
||||||
index_writer.write_u32::<LE>(encoded_entries_size)?;
|
index_writer.write_all(&encoded_entries)?;
|
||||||
|
|
||||||
for entry in self.index.entries.values() {
|
|
||||||
entry.write(
|
|
||||||
&mut index_writer,
|
|
||||||
self.version,
|
|
||||||
super::entry::EntryLocation::Index,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
index_writer.write_u32::<LE>(0)?;
|
index_writer.write_u32::<LE>(0)?;
|
||||||
|
|
||||||
|
@ -459,28 +466,21 @@ fn hash(data: &[u8]) -> [u8; 20] {
|
||||||
hasher.finalize().into()
|
hasher.finalize().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
const ENCODED_ENTRY_SIZE: u32 = {
|
|
||||||
4 // flags
|
|
||||||
+ 4 // offset
|
|
||||||
+ 4 // size
|
|
||||||
};
|
|
||||||
|
|
||||||
fn generate_path_hash_index<W: Write>(
|
fn generate_path_hash_index<W: Write>(
|
||||||
writer: &mut W,
|
writer: &mut W,
|
||||||
path_hash_seed: u64,
|
path_hash_seed: u64,
|
||||||
entries: &BTreeMap<String, super::entry::Entry>,
|
entries: &BTreeMap<String, super::entry::Entry>,
|
||||||
|
offsets: &Vec<u32>,
|
||||||
) -> Result<(), super::Error> {
|
) -> Result<(), super::Error> {
|
||||||
writer.write_u32::<LE>(entries.len() as u32)?;
|
writer.write_u32::<LE>(entries.len() as u32)?;
|
||||||
let mut offset = 0u32;
|
for (path, offset) in entries.keys().zip(offsets) {
|
||||||
for path in entries.keys() {
|
|
||||||
let utf16le_path = path
|
let utf16le_path = path
|
||||||
.encode_utf16()
|
.encode_utf16()
|
||||||
.flat_map(|c| c.to_le_bytes())
|
.flat_map(|c| c.to_le_bytes())
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let path_hash = fnv64(&utf16le_path, path_hash_seed);
|
let path_hash = fnv64(&utf16le_path, path_hash_seed);
|
||||||
writer.write_u64::<LE>(path_hash)?;
|
writer.write_u64::<LE>(path_hash)?;
|
||||||
writer.write_u32::<LE>(offset)?;
|
writer.write_u32::<LE>(*offset as u32)?;
|
||||||
offset += ENCODED_ENTRY_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.write_u32::<LE>(0)?;
|
writer.write_u32::<LE>(0)?;
|
||||||
|
@ -502,10 +502,10 @@ fn fnv64(data: &[u8], offset: u64) -> u64 {
|
||||||
fn generate_full_directory_index<W: Write>(
|
fn generate_full_directory_index<W: Write>(
|
||||||
writer: &mut W,
|
writer: &mut W,
|
||||||
entries: &BTreeMap<String, super::entry::Entry>,
|
entries: &BTreeMap<String, super::entry::Entry>,
|
||||||
|
offsets: &Vec<u32>,
|
||||||
) -> Result<(), super::Error> {
|
) -> Result<(), super::Error> {
|
||||||
let mut offset = 0u32;
|
|
||||||
let mut fdi = BTreeMap::new();
|
let mut fdi = BTreeMap::new();
|
||||||
for path in entries.keys() {
|
for (path, offset) in entries.keys().zip(offsets) {
|
||||||
let (directory, filename) = {
|
let (directory, filename) = {
|
||||||
let i = path.rfind('/').map(|i| i + 1); // we want to include the slash on the directory
|
let i = path.rfind('/').map(|i| i + 1); // we want to include the slash on the directory
|
||||||
match i {
|
match i {
|
||||||
|
@ -519,15 +519,13 @@ fn generate_full_directory_index<W: Write>(
|
||||||
|
|
||||||
fdi.entry(directory)
|
fdi.entry(directory)
|
||||||
.and_modify(|d: &mut BTreeMap<String, u32>| {
|
.and_modify(|d: &mut BTreeMap<String, u32>| {
|
||||||
d.insert(filename.clone(), offset);
|
d.insert(filename.clone(), *offset);
|
||||||
})
|
})
|
||||||
.or_insert_with(|| {
|
.or_insert_with(|| {
|
||||||
let mut files_and_offsets = BTreeMap::new();
|
let mut files_and_offsets = BTreeMap::new();
|
||||||
files_and_offsets.insert(filename.clone(), offset);
|
files_and_offsets.insert(filename.clone(), *offset);
|
||||||
files_and_offsets
|
files_and_offsets
|
||||||
});
|
});
|
||||||
|
|
||||||
offset += ENCODED_ENTRY_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.write_u32::<LE>(fdi.len() as u32)?;
|
writer.write_u32::<LE>(fdi.len() as u32)?;
|
||||||
|
|
Loading…
Reference in a new issue