mirror of
https://github.com/xavo95/repak.git
synced 2025-02-22 15:53:45 +00:00
Allow compression to be driven by user
This commit is contained in:
parent
5cfc8f52bd
commit
b2bc86683d
3 changed files with 56 additions and 25 deletions
|
@ -7,7 +7,7 @@ use crate::{
|
||||||
|
|
||||||
type Result<T, E = Error> = std::result::Result<T, E>;
|
type Result<T, E = Error> = std::result::Result<T, E>;
|
||||||
|
|
||||||
pub(crate) struct PartialEntry<D: AsRef<[u8]>> {
|
pub struct PartialEntry<D: AsRef<[u8]>> {
|
||||||
compression: Option<Compression>,
|
compression: Option<Compression>,
|
||||||
compressed_size: u64,
|
compressed_size: u64,
|
||||||
uncompressed_size: u64,
|
uncompressed_size: u64,
|
||||||
|
|
|
@ -6,7 +6,7 @@ mod ext;
|
||||||
mod footer;
|
mod footer;
|
||||||
mod pak;
|
mod pak;
|
||||||
|
|
||||||
pub use {error::*, pak::*};
|
pub use {data::PartialEntry, error::*, pak::*};
|
||||||
|
|
||||||
pub const MAGIC: u32 = 0x5A6F12E1;
|
pub const MAGIC: u32 = 0x5A6F12E1;
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::data::build_partial_entry;
|
use crate::data::build_partial_entry;
|
||||||
use crate::entry::Entry;
|
use crate::entry::Entry;
|
||||||
use crate::{Compression, Error};
|
use crate::{Compression, Error, PartialEntry};
|
||||||
|
|
||||||
use super::ext::{ReadExt, WriteExt};
|
use super::ext::{ReadExt, WriteExt};
|
||||||
use super::{Version, VersionMajor};
|
use super::{Version, VersionMajor};
|
||||||
|
@ -292,6 +292,37 @@ impl<W: Write + Seek> PakWriter<W> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn entry_builder(&self) -> EntryBuilder {
|
||||||
|
EntryBuilder {
|
||||||
|
allowed_compression: self.allowed_compression.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write_entry<D: AsRef<[u8]>>(
|
||||||
|
&mut self,
|
||||||
|
path: String,
|
||||||
|
partial_entry: PartialEntry<D>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let stream_position = self.writer.stream_position()?;
|
||||||
|
|
||||||
|
let entry = partial_entry.build_entry(
|
||||||
|
self.pak.version,
|
||||||
|
&mut self.pak.compression,
|
||||||
|
stream_position,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
entry.write(
|
||||||
|
&mut self.writer,
|
||||||
|
self.pak.version,
|
||||||
|
crate::entry::EntryLocation::Data,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.pak.index.add_entry(path, entry);
|
||||||
|
partial_entry.write_data(&mut self.writer)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn parallel<'scope, F, E>(&mut self, f: F) -> Result<&mut Self, E>
|
pub fn parallel<'scope, F, E>(&mut self, f: F) -> Result<&mut Self, E>
|
||||||
where
|
where
|
||||||
F: Send + Sync + FnOnce(ParallelPakWriter<'scope>) -> Result<(), E>,
|
F: Send + Sync + FnOnce(ParallelPakWriter<'scope>) -> Result<(), E>,
|
||||||
|
@ -299,38 +330,20 @@ impl<W: Write + Seek> PakWriter<W> {
|
||||||
{
|
{
|
||||||
use pariter::IteratorExt as _;
|
use pariter::IteratorExt as _;
|
||||||
|
|
||||||
let allowed_compression = self.allowed_compression.as_slice();
|
|
||||||
pariter::scope(|scope: &pariter::Scope<'_>| -> Result<(), E> {
|
pariter::scope(|scope: &pariter::Scope<'_>| -> Result<(), E> {
|
||||||
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
let (tx, rx) = std::sync::mpsc::sync_channel(0);
|
||||||
|
|
||||||
let handle = scope.spawn(|_| f(ParallelPakWriter { tx }));
|
let handle = scope.spawn(|_| f(ParallelPakWriter { tx }));
|
||||||
|
let entry_builder = self.entry_builder();
|
||||||
|
|
||||||
let result = rx
|
let result = rx
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.parallel_map_scoped(scope, |(path, compress, data)| -> Result<_, Error> {
|
.parallel_map_scoped(scope, move |(path, compress, data)| -> Result<_, Error> {
|
||||||
let compression = compress.then_some(allowed_compression).unwrap_or_default();
|
Ok((path, entry_builder.build_entry(compress, data)?))
|
||||||
let partial_entry = build_partial_entry(compression, data)?;
|
|
||||||
Ok((path, partial_entry))
|
|
||||||
})
|
})
|
||||||
.try_for_each(|message| -> Result<(), Error> {
|
.try_for_each(|message| -> Result<(), Error> {
|
||||||
let stream_position = self.writer.stream_position()?;
|
|
||||||
let (path, partial_entry) = message?;
|
let (path, partial_entry) = message?;
|
||||||
|
self.write_entry(path, partial_entry)
|
||||||
let entry = partial_entry.build_entry(
|
|
||||||
self.pak.version,
|
|
||||||
&mut self.pak.compression,
|
|
||||||
stream_position,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
entry.write(
|
|
||||||
&mut self.writer,
|
|
||||||
self.pak.version,
|
|
||||||
crate::entry::EntryLocation::Data,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
self.pak.index.add_entry(path, entry);
|
|
||||||
partial_entry.write_data(&mut self.writer)?;
|
|
||||||
Ok(())
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if let Err(err) = handle.join().unwrap() {
|
if let Err(err) = handle.join().unwrap() {
|
||||||
|
@ -375,6 +388,24 @@ impl AsRef<[u8]> for Data<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct EntryBuilder {
|
||||||
|
allowed_compression: Vec<Compression>,
|
||||||
|
}
|
||||||
|
impl EntryBuilder {
|
||||||
|
/// Builds an entry in memory (compressed if requested) which must be written out later
|
||||||
|
pub fn build_entry<D: AsRef<[u8]> + Send + Sync>(
|
||||||
|
&self,
|
||||||
|
compress: bool,
|
||||||
|
data: D,
|
||||||
|
) -> Result<PartialEntry<D>, Error> {
|
||||||
|
let compression = compress
|
||||||
|
.then_some(self.allowed_compression.as_slice())
|
||||||
|
.unwrap_or_default();
|
||||||
|
build_partial_entry(compression, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Pak {
|
impl Pak {
|
||||||
fn read<R: Read + Seek>(
|
fn read<R: Read + Seek>(
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
|
|
Loading…
Reference in a new issue