correctly update file size after write
This commit is contained in:
parent
ea3e2a76c4
commit
2ed107478e
11 changed files with 451 additions and 272 deletions
|
|
@ -5,7 +5,10 @@ use bitflags::bitflags;
|
|||
use chrono::{NaiveDate, NaiveDateTime, TimeDelta};
|
||||
use compact_str::CompactString;
|
||||
|
||||
use crate::FatFs;
|
||||
use crate::datetime::{Date, Time};
|
||||
use crate::iter::ClusterChainReader;
|
||||
use crate::subslice::SubSliceMut;
|
||||
use crate::utils::{load_u16_le, load_u32_le};
|
||||
|
||||
bitflags! {
|
||||
|
|
@ -67,6 +70,8 @@ pub struct DirEntry {
|
|||
|
||||
checksum: u8,
|
||||
long_name: Option<CompactString>,
|
||||
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
impl Display for DirEntry {
|
||||
|
|
@ -141,7 +146,7 @@ impl DirEntry {
|
|||
name
|
||||
}
|
||||
|
||||
pub fn load(bytes: &[u8]) -> anyhow::Result<DirEntry> {
|
||||
pub fn load(bytes: &[u8], offset: u64) -> anyhow::Result<DirEntry> {
|
||||
assert_eq!(bytes.len(), 32);
|
||||
|
||||
let attr = Attr::from_bits_truncate(bytes[11]);
|
||||
|
|
@ -196,12 +201,16 @@ impl DirEntry {
|
|||
file_size,
|
||||
long_name: None,
|
||||
checksum: Self::checksum(&bytes[..11]),
|
||||
offset,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write(&self, mut writer: impl Write) -> std::io::Result<()> {
|
||||
fn write(&self, mut writer: impl Write) -> std::io::Result<()> {
|
||||
let mut buf = [0; 32];
|
||||
|
||||
// fill name with 0x20
|
||||
buf[..11].copy_from_slice(&[0x20; 11]);
|
||||
|
||||
let mut name = self.name();
|
||||
|
||||
if name[0] == b'.' && self.is_hidden() {
|
||||
|
|
@ -246,11 +255,21 @@ impl DirEntry {
|
|||
|
||||
buf[28..].copy_from_slice(&self.file_size.to_le_bytes());
|
||||
|
||||
eprintln!("writing new dir entry: {:?}", buf);
|
||||
|
||||
writer.write_all(&buf)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// write this DisEntry back to the underlying data
|
||||
pub fn update(&self, fat_fs: &FatFs) -> std::io::Result<()> {
|
||||
eprintln!("making new SubSliceMut at offset {:#X}", self.offset);
|
||||
let sub_slice = SubSliceMut::new(fat_fs.inner.clone(), self.offset, 32);
|
||||
|
||||
self.write(sub_slice)
|
||||
}
|
||||
|
||||
/// indicates this DirEntry is empty
|
||||
///
|
||||
/// can be either simply empty (0xe5) or the sentinel (0x00) that indicates that all following
|
||||
|
|
@ -344,25 +363,6 @@ impl DirEntry {
|
|||
s.push('.');
|
||||
}
|
||||
|
||||
// s += name;
|
||||
|
||||
// for &c in self.name[..8].trim_ascii_end() {
|
||||
// // stem
|
||||
|
||||
// if !c.is_ascii()
|
||||
// || c < 0x20
|
||||
// || !(c.is_ascii_alphanumeric() || VALID_SYMBOLS.contains(&c))
|
||||
// {
|
||||
// // replace invalid character
|
||||
// // characters above 127 are also ignored, even tho allowed
|
||||
// s.push('?');
|
||||
|
||||
// continue;
|
||||
// }
|
||||
|
||||
// s.push(c as char);
|
||||
// }
|
||||
|
||||
const VALID_SYMBOLS: &[u8] = &[
|
||||
b'$', b'%', b'\'', b'-', b'_', b'@', b'~', b'`', b'!', b'(', b')', b'{', b'}', b'^',
|
||||
b'#', b'&',
|
||||
|
|
@ -432,6 +432,10 @@ impl DirEntry {
|
|||
self.file_size
|
||||
}
|
||||
|
||||
pub fn update_file_size(&mut self, file_size: u32) {
|
||||
self.file_size = file_size
|
||||
}
|
||||
|
||||
pub fn checksum(name: &[u8]) -> u8 {
|
||||
let mut checksum: u8 = 0;
|
||||
|
||||
|
|
@ -526,7 +530,7 @@ enum DirEntryWrapper {
|
|||
}
|
||||
|
||||
impl DirEntryWrapper {
|
||||
pub fn load(bytes: &[u8]) -> anyhow::Result<DirEntryWrapper> {
|
||||
pub fn load(bytes: &[u8], offset: u64) -> anyhow::Result<DirEntryWrapper> {
|
||||
assert_eq!(bytes.len(), 32);
|
||||
|
||||
let attr = Attr::from_bits_truncate(bytes[11]);
|
||||
|
|
@ -534,7 +538,7 @@ impl DirEntryWrapper {
|
|||
let dir_entry = if attr == Attr::LongName {
|
||||
DirEntryWrapper::LongName(LongNameDirEntry::load(bytes)?)
|
||||
} else {
|
||||
DirEntryWrapper::Regular(DirEntry::load(bytes)?)
|
||||
DirEntryWrapper::Regular(DirEntry::load(bytes, offset)?)
|
||||
};
|
||||
|
||||
Ok(dir_entry)
|
||||
|
|
@ -642,14 +646,14 @@ impl LongFilenameBuf {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct DirIter<R: Read> {
|
||||
reader: R,
|
||||
pub struct DirIter<'a> {
|
||||
reader: ClusterChainReader<'a>,
|
||||
|
||||
long_filename_buf: LongFilenameBuf,
|
||||
}
|
||||
|
||||
impl<R: Read> DirIter<R> {
|
||||
pub fn new(reader: R) -> DirIter<R> {
|
||||
impl<'a> DirIter<'a> {
|
||||
pub fn new(reader: ClusterChainReader<'a>) -> Self {
|
||||
DirIter {
|
||||
reader,
|
||||
long_filename_buf: Default::default(),
|
||||
|
|
@ -658,13 +662,16 @@ impl<R: Read> DirIter<R> {
|
|||
|
||||
/// inner function for iterator
|
||||
fn next_impl(&mut self) -> anyhow::Result<Option<DirEntry>> {
|
||||
let offset = self.reader.current_offset();
|
||||
|
||||
let mut chunk = [0; 32];
|
||||
|
||||
if self.reader.read_exact(&mut chunk).is_err() {
|
||||
// nothing we can do here since we might be in an invalid state after a partial read
|
||||
return Ok(None);
|
||||
anyhow::bail!("read failed");
|
||||
}
|
||||
|
||||
let dir_entry = DirEntryWrapper::load(&chunk)
|
||||
let dir_entry = DirEntryWrapper::load(&chunk, offset)
|
||||
.map_err(|e| anyhow::anyhow!("failed to load dir entry: {e}"))?;
|
||||
|
||||
let mut dir_entry = match dir_entry {
|
||||
|
|
@ -712,7 +719,7 @@ impl<R: Read> DirIter<R> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for DirIter<R> {
|
||||
impl Iterator for DirIter<'_> {
|
||||
type Item = DirEntry;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
|
|
|
|||
|
|
@ -27,17 +27,23 @@ pub trait FatOps {
|
|||
fn get_entry(&self, cluster: u32) -> u32;
|
||||
fn set_entry(&mut self, cluster: u32, entry: u32);
|
||||
|
||||
fn valid_clusters(&self) -> RangeInclusive<u32>;
|
||||
fn reserved_clusters(&self) -> RangeInclusive<u32>;
|
||||
fn defective_cluster(&self) -> u32;
|
||||
fn reserved_eof_clusters(&self) -> RangeInclusive<u32>;
|
||||
fn eof_cluster(&self) -> u32;
|
||||
fn valid_entries(&self) -> RangeInclusive<u32>;
|
||||
fn reserved_entries(&self) -> RangeInclusive<u32>;
|
||||
fn defective_entry(&self) -> u32;
|
||||
fn reserved_eof_entries(&self) -> RangeInclusive<u32>;
|
||||
fn eof_entry(&self) -> u32;
|
||||
|
||||
fn count_free_clusters(&self) -> usize {
|
||||
self.valid_clusters()
|
||||
fn count_free_clusters(&self) -> u32 {
|
||||
self.valid_entries()
|
||||
.map(|cluster| self.get_entry(cluster))
|
||||
.filter(|&entry| entry == 0)
|
||||
.count()
|
||||
.count() as u32
|
||||
}
|
||||
|
||||
fn first_free_cluster(&self) -> Option<u32> {
|
||||
self.valid_entries()
|
||||
.map(|cluster| self.get_entry(cluster))
|
||||
.find(|&entry| entry == 0)
|
||||
}
|
||||
|
||||
fn write_to_disk(&self, sub_slice: SubSliceMut) -> std::io::Result<()>;
|
||||
|
|
@ -75,18 +81,18 @@ impl Fat {
|
|||
return Err(FatError::FreeCluster);
|
||||
}
|
||||
|
||||
if self.reserved_clusters().contains(&cluster) {
|
||||
if self.reserved_entries().contains(&cluster) {
|
||||
// can't get next cluster for reserved cluster
|
||||
return Err(FatError::ReservedCluster(cluster));
|
||||
}
|
||||
|
||||
// defective cluster
|
||||
if cluster == self.defective_cluster() {
|
||||
if cluster == self.defective_entry() {
|
||||
// can't get next cluster for defective cluster
|
||||
return Err(FatError::DefectiveCluster);
|
||||
}
|
||||
|
||||
if self.reserved_eof_clusters().contains(&cluster) {
|
||||
if self.reserved_eof_entries().contains(&cluster) {
|
||||
// Reserved and should not be used. May be interpreted as an allocated cluster and the
|
||||
// final cluster in the file (indicating end-of-file condition).
|
||||
//
|
||||
|
|
@ -99,12 +105,12 @@ impl Fat {
|
|||
let entry = self.get_entry(cluster);
|
||||
|
||||
// interpret second reserved block as EOF here
|
||||
if entry == self.eof_cluster() || self.reserved_eof_clusters().contains(&entry) {
|
||||
if entry == self.eof_entry() || self.reserved_eof_entries().contains(&entry) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// entry should be in the valid cluster range here; otherwise something went wrong
|
||||
if !self.valid_clusters().contains(&entry) {
|
||||
if !self.valid_entries().contains(&entry) {
|
||||
return Err(FatError::InvalidEntry(entry));
|
||||
}
|
||||
|
||||
|
|
@ -195,23 +201,23 @@ impl FatOps for Fat12 {
|
|||
self.next_sectors[cluster as usize] = entry as u16;
|
||||
}
|
||||
|
||||
fn valid_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn valid_entries(&self) -> RangeInclusive<u32> {
|
||||
2..=self.max
|
||||
}
|
||||
|
||||
fn reserved_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_entries(&self) -> RangeInclusive<u32> {
|
||||
(self.max as u32 + 1)..=0xFF6
|
||||
}
|
||||
|
||||
fn defective_cluster(&self) -> u32 {
|
||||
fn defective_entry(&self) -> u32 {
|
||||
0xFF7
|
||||
}
|
||||
|
||||
fn reserved_eof_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_eof_entries(&self) -> RangeInclusive<u32> {
|
||||
0xFF8..=0xFFE
|
||||
}
|
||||
|
||||
fn eof_cluster(&self) -> u32 {
|
||||
fn eof_entry(&self) -> u32 {
|
||||
0xFFF
|
||||
}
|
||||
|
||||
|
|
@ -322,23 +328,23 @@ impl FatOps for Fat16 {
|
|||
self.next_sectors[cluster as usize] = entry as u16;
|
||||
}
|
||||
|
||||
fn valid_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn valid_entries(&self) -> RangeInclusive<u32> {
|
||||
2..=self.max
|
||||
}
|
||||
|
||||
fn reserved_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_entries(&self) -> RangeInclusive<u32> {
|
||||
(self.max as u32 + 1)..=0xFFF6
|
||||
}
|
||||
|
||||
fn defective_cluster(&self) -> u32 {
|
||||
fn defective_entry(&self) -> u32 {
|
||||
0xFFF7
|
||||
}
|
||||
|
||||
fn reserved_eof_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_eof_entries(&self) -> RangeInclusive<u32> {
|
||||
0xFFF8..=0xFFFE
|
||||
}
|
||||
|
||||
fn eof_cluster(&self) -> u32 {
|
||||
fn eof_entry(&self) -> u32 {
|
||||
0xFFFF
|
||||
}
|
||||
|
||||
|
|
@ -420,23 +426,23 @@ impl FatOps for Fat32 {
|
|||
self.next_sectors[cluster as usize] = entry;
|
||||
}
|
||||
|
||||
fn valid_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn valid_entries(&self) -> RangeInclusive<u32> {
|
||||
2..=self.max
|
||||
}
|
||||
|
||||
fn reserved_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_entries(&self) -> RangeInclusive<u32> {
|
||||
(self.max + 1)..=0xFFFFFFF6
|
||||
}
|
||||
|
||||
fn defective_cluster(&self) -> u32 {
|
||||
fn defective_entry(&self) -> u32 {
|
||||
0xFFFFFFF7
|
||||
}
|
||||
|
||||
fn reserved_eof_clusters(&self) -> RangeInclusive<u32> {
|
||||
fn reserved_eof_entries(&self) -> RangeInclusive<u32> {
|
||||
0xFFFFFFF8..=0xFFFFFFFE
|
||||
}
|
||||
|
||||
fn eof_cluster(&self) -> u32 {
|
||||
fn eof_entry(&self) -> u32 {
|
||||
0xFFFFFFFF
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,4 +40,12 @@ impl FsInfo {
|
|||
next_free,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn free_count(&self) -> u32 {
|
||||
self.free_count
|
||||
}
|
||||
|
||||
pub fn next_free(&self) -> Option<u32> {
|
||||
Some(self.next_free)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::io::{Read, Write};
|
||||
|
||||
use crate::FatFs;
|
||||
use crate::subslice::{SubSlice, SubSliceMut};
|
||||
use crate::{FatFs, FatType};
|
||||
|
||||
pub struct ClusterChainReader<'a> {
|
||||
fat_fs: &'a FatFs,
|
||||
|
|
@ -12,7 +12,7 @@ pub struct ClusterChainReader<'a> {
|
|||
}
|
||||
|
||||
impl<'a> ClusterChainReader<'a> {
|
||||
pub fn new(fat_fs: &'a FatFs, first_cluster: u32) -> ClusterChainReader<'a> {
|
||||
pub fn new(fat_fs: &'a FatFs, first_cluster: u32) -> Self {
|
||||
let next_cluster = fat_fs.next_cluster(first_cluster).unwrap_or(None);
|
||||
|
||||
let sub_slice = fat_fs.cluster_as_subslice(first_cluster);
|
||||
|
|
@ -24,6 +24,28 @@ impl<'a> ClusterChainReader<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn root_dir_reader(fat_fs: &'a FatFs) -> Self {
|
||||
match fat_fs.fat_type() {
|
||||
FatType::Fat12 | FatType::Fat16 => {
|
||||
// fixed root dir, so no need to chain
|
||||
// get a single SubSlice for it and next_cluster is None
|
||||
|
||||
let sub_slice = fat_fs.root_dir_as_subslice();
|
||||
|
||||
ClusterChainReader {
|
||||
fat_fs,
|
||||
sub_slice,
|
||||
next_cluster: None,
|
||||
}
|
||||
}
|
||||
FatType::Fat32 => {
|
||||
// FAT is directory_like, so get a real chain reader
|
||||
|
||||
Self::new(fat_fs, fat_fs.bpb.root_cluster().unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn move_to_next_cluster(&mut self) -> bool {
|
||||
let Some(next_cluster) = self.next_cluster else {
|
||||
return false;
|
||||
|
|
@ -55,6 +77,10 @@ impl<'a> ClusterChainReader<'a> {
|
|||
|
||||
n
|
||||
}
|
||||
|
||||
pub fn current_offset(&self) -> u64 {
|
||||
self.sub_slice.offset()
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for ClusterChainReader<'_> {
|
||||
|
|
@ -78,7 +104,7 @@ pub struct ClusterChainWriter<'a> {
|
|||
}
|
||||
|
||||
impl<'a> ClusterChainWriter<'a> {
|
||||
pub fn new(fat_fs: &'a FatFs, first_cluster: u32) -> ClusterChainWriter<'a> {
|
||||
pub fn new(fat_fs: &'a FatFs, first_cluster: u32) -> Self {
|
||||
let next_cluster = fat_fs.next_cluster(first_cluster).unwrap_or(None);
|
||||
|
||||
let sub_slice = fat_fs.cluster_as_subslice_mut(first_cluster);
|
||||
|
|
@ -90,6 +116,28 @@ impl<'a> ClusterChainWriter<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn root_dir_writer(fat_fs: &'a FatFs) -> Self {
|
||||
match fat_fs.fat_type() {
|
||||
FatType::Fat12 | FatType::Fat16 => {
|
||||
// fixed root dir, so no need to chain
|
||||
// get a single SubSliceMut for it and next_cluster is None
|
||||
|
||||
let sub_slice = fat_fs.root_dir_as_subslice_mut();
|
||||
|
||||
ClusterChainWriter {
|
||||
fat_fs,
|
||||
sub_slice,
|
||||
next_cluster: None,
|
||||
}
|
||||
}
|
||||
FatType::Fat32 => {
|
||||
// FAT is directory_like, so get a real chain writer
|
||||
|
||||
Self::new(fat_fs, fat_fs.bpb.root_cluster().unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn move_to_next_cluster(&mut self) -> bool {
|
||||
// TODO: should allocate a new cluster here!
|
||||
let Some(next_cluster) = self.next_cluster else {
|
||||
|
|
@ -122,6 +170,10 @@ impl<'a> ClusterChainWriter<'a> {
|
|||
|
||||
n
|
||||
}
|
||||
|
||||
pub fn current_offset(&self) -> u64 {
|
||||
self.sub_slice.offset()
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for ClusterChainWriter<'_> {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
use std::cell::RefCell;
|
||||
use std::fmt::Display;
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
use std::rc::Rc;
|
||||
|
||||
use crate::dir::DirIter;
|
||||
use crate::fat::{FatError, FatOps};
|
||||
use crate::iter::ClusterChainReader;
|
||||
pub use crate::slice_like::SliceLike;
|
||||
use crate::subslice::{SubSlice, SubSliceMut};
|
||||
|
||||
pub mod bpb;
|
||||
|
|
@ -13,6 +14,7 @@ pub mod dir;
|
|||
pub mod fat;
|
||||
pub mod fs_info;
|
||||
pub mod iter;
|
||||
mod slice_like;
|
||||
mod subslice;
|
||||
mod utils;
|
||||
|
||||
|
|
@ -23,80 +25,24 @@ pub enum FatType {
|
|||
Fat32,
|
||||
}
|
||||
|
||||
pub trait SliceLike {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()>;
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()>;
|
||||
}
|
||||
|
||||
impl SliceLike for &mut [u8] {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()> {
|
||||
if offset as usize + buf.len() > self.len() {
|
||||
return Err(std::io::Error::other(anyhow::anyhow!(
|
||||
"reading {} bytes at offset {} is out of bounds for slice of len {}",
|
||||
buf.len(),
|
||||
offset,
|
||||
self.len()
|
||||
)));
|
||||
}
|
||||
|
||||
buf.copy_from_slice(&self[offset as usize..][..buf.len()]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()> {
|
||||
if offset as usize + bytes.len() > self.len() {
|
||||
return Err(std::io::Error::other(anyhow::anyhow!(
|
||||
"writing {} bytes at offset {} is out of bounds for slice of len {}",
|
||||
bytes.len(),
|
||||
offset,
|
||||
self.len()
|
||||
)));
|
||||
}
|
||||
|
||||
self[offset as usize..][..bytes.len()].copy_from_slice(bytes);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SliceLike for std::fs::File {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()> {
|
||||
self.seek(SeekFrom::Start(offset))?;
|
||||
|
||||
self.read_exact(buf)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()> {
|
||||
self.seek(SeekFrom::Start(offset))?;
|
||||
|
||||
self.write_all(bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct FatFs {
|
||||
inner: Rc<RefCell<dyn SliceLike>>,
|
||||
|
||||
fat_offset: u64,
|
||||
fat_size: usize,
|
||||
|
||||
// fat_offset: u64,
|
||||
// fat_size: usize,
|
||||
root_dir_offset: Option<u64>,
|
||||
root_dir_size: usize,
|
||||
|
||||
pub data_offset: u64,
|
||||
data_size: usize,
|
||||
|
||||
// data_size: usize,
|
||||
bytes_per_cluster: usize,
|
||||
|
||||
bpb: bpb::Bpb,
|
||||
|
||||
fat: fat::Fat,
|
||||
|
||||
next_free: Option<u32>,
|
||||
free_count: u32,
|
||||
}
|
||||
|
||||
impl Display for FatFs {
|
||||
|
|
@ -146,42 +92,93 @@ impl FatFs {
|
|||
// }
|
||||
// }
|
||||
|
||||
let fat_offset = bpb.fat_offset();
|
||||
let fat_size = bpb.fat_len_bytes();
|
||||
// let fat_offset = bpb.fat_offset();
|
||||
// let fat_size = bpb.fat_len_bytes();
|
||||
|
||||
let root_dir_offset = bpb.root_directory_offset();
|
||||
let root_dir_size = bpb.root_dir_len_bytes();
|
||||
|
||||
let data_offset = bpb.data_offset();
|
||||
let data_size = bpb.data_len_bytes();
|
||||
// let data_size = bpb.data_len_bytes();
|
||||
|
||||
let bytes_per_cluster = bpb.bytes_per_cluster();
|
||||
|
||||
let next_free = fat.first_free_cluster();
|
||||
let free_count = fat.count_free_clusters();
|
||||
|
||||
Ok(FatFs {
|
||||
inner: data,
|
||||
fat_offset,
|
||||
fat_size,
|
||||
// fat_offset,
|
||||
// fat_size,
|
||||
root_dir_offset,
|
||||
root_dir_size,
|
||||
data_offset,
|
||||
data_size,
|
||||
// data_size,
|
||||
bytes_per_cluster,
|
||||
bpb,
|
||||
fat,
|
||||
next_free,
|
||||
free_count,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn fat_type(&self) -> FatType {
|
||||
match &self.fat {
|
||||
fat::Fat::Fat12(_) => FatType::Fat12,
|
||||
fat::Fat::Fat16(_) => FatType::Fat16,
|
||||
fat::Fat::Fat32(_) => FatType::Fat32,
|
||||
}
|
||||
}
|
||||
|
||||
/// byte offset of data cluster
|
||||
fn data_cluster_to_offset(&self, cluster: u32) -> u64 {
|
||||
// assert!(cluster >= 2);
|
||||
|
||||
assert!(self.fat.valid_clusters().contains(&cluster));
|
||||
assert!(self.fat.valid_entries().contains(&cluster));
|
||||
|
||||
self.data_offset + (cluster - 2) as u64 * self.bytes_per_cluster as u64
|
||||
}
|
||||
|
||||
pub fn free_clusters(&self) -> usize {
|
||||
self.fat.count_free_clusters()
|
||||
pub fn free_clusters(&self) -> u32 {
|
||||
// self.fat.count_free_clusters()
|
||||
self.free_count
|
||||
}
|
||||
|
||||
pub fn alloc_cluster(&mut self) -> Option<u32> {
|
||||
let Some(cluster) = self.next_free else {
|
||||
// no free cluster
|
||||
return None;
|
||||
};
|
||||
|
||||
// set cluster as taken
|
||||
self.fat.set_entry(cluster, self.fat.eof_entry());
|
||||
|
||||
// something went terribly wrong
|
||||
assert_ne!(self.free_count, 0);
|
||||
|
||||
self.free_count -= 1;
|
||||
|
||||
// find next free cluster
|
||||
self.next_free = self.fat.first_free_cluster();
|
||||
|
||||
Some(cluster)
|
||||
}
|
||||
|
||||
pub fn dealloc_cluster(&mut self, cluster: u32) {
|
||||
// assert cluster is actually valid
|
||||
assert!(
|
||||
self.fat
|
||||
.valid_entries()
|
||||
.contains(&self.fat.get_entry(cluster))
|
||||
);
|
||||
|
||||
self.fat.set_entry(cluster, 0);
|
||||
|
||||
if self.next_free.is_none() || self.next_free.unwrap() > cluster {
|
||||
self.next_free = Some(cluster);
|
||||
}
|
||||
|
||||
self.free_count += 1;
|
||||
}
|
||||
|
||||
pub fn bytes_per_sector(&self) -> u16 {
|
||||
|
|
@ -203,13 +200,27 @@ impl FatFs {
|
|||
self.fat.get_next_cluster(cluster)
|
||||
}
|
||||
|
||||
pub fn cluster_as_subslice(&self, cluster: u32) -> SubSlice {
|
||||
if cluster == 0 {
|
||||
// for cluster 0 simply return empty subslice
|
||||
// this makes things a bit easier, since cluster 0 is used as a marker that a file/dir
|
||||
// is empty
|
||||
|
||||
return SubSlice::new(self.inner.clone(), 0, 0);
|
||||
}
|
||||
|
||||
let offset = self.data_cluster_to_offset(cluster);
|
||||
|
||||
SubSlice::new(self.inner.clone(), offset, self.bytes_per_cluster)
|
||||
}
|
||||
|
||||
pub fn cluster_as_subslice_mut(&self, cluster: u32) -> SubSliceMut {
|
||||
if cluster == 0 {
|
||||
// for cluster 0 simply return empty subslice
|
||||
// this makes things a bit easier, since cluster 0 is used as a marker that a file/dir
|
||||
// is empty
|
||||
|
||||
SubSliceMut::new(self.inner.clone(), 0, 0);
|
||||
return SubSliceMut::new(self.inner.clone(), 0, 0);
|
||||
}
|
||||
|
||||
let offset = self.data_cluster_to_offset(cluster);
|
||||
|
|
@ -217,18 +228,12 @@ impl FatFs {
|
|||
SubSliceMut::new(self.inner.clone(), offset, self.bytes_per_cluster)
|
||||
}
|
||||
|
||||
pub fn cluster_as_subslice(&self, cluster: u32) -> SubSlice {
|
||||
if cluster == 0 {
|
||||
// for cluster 0 simply return empty subslice
|
||||
// this makes things a bit easier, since cluster 0 is used as a marker that a file/dir
|
||||
// is empty
|
||||
fn root_dir_as_subslice(&self) -> SubSlice {
|
||||
SubSlice::new(self.inner.clone(), self.root_dir_offset.unwrap(), self.root_dir_size)
|
||||
}
|
||||
|
||||
SubSlice::new(self.inner.clone(), 0, 0);
|
||||
}
|
||||
|
||||
let offset = self.data_cluster_to_offset(cluster);
|
||||
|
||||
SubSlice::new(self.inner.clone(), offset, self.bytes_per_cluster)
|
||||
fn root_dir_as_subslice_mut(&self) -> SubSliceMut {
|
||||
SubSliceMut::new(self.inner.clone(), self.root_dir_offset.unwrap(), self.root_dir_size)
|
||||
}
|
||||
|
||||
fn chain_reader(&'_ self, first_cluster: u32) -> iter::ClusterChainReader<'_> {
|
||||
|
|
@ -239,35 +244,16 @@ impl FatFs {
|
|||
iter::ClusterChainWriter::new(self, first_cluster)
|
||||
}
|
||||
|
||||
pub fn root_dir_iter<'a>(&'a self) -> DirIter<Box<dyn Read + 'a>> {
|
||||
// Box<dyn Iterator<Item = DirEntry> + '_>
|
||||
// TODO: maybe wrap this in another RootDirIter enum, so we don't have to Box<dyn>
|
||||
pub fn root_dir_iter<'a>(&self) -> DirIter<'_> {
|
||||
let reader = ClusterChainReader::root_dir_reader(self);
|
||||
|
||||
if let Some(root_dir_offset) = self.root_dir_offset {
|
||||
// FAT12/FAT16
|
||||
|
||||
let sub_slice = SubSlice::new(self.inner.clone(), root_dir_offset, self.root_dir_size);
|
||||
|
||||
return DirIter::new(Box::new(sub_slice));
|
||||
}
|
||||
|
||||
// FAT32
|
||||
|
||||
// can't fail; we're in the FAT32 case
|
||||
let root_cluster = self.bpb.root_cluster().unwrap();
|
||||
|
||||
let cluster_iter = iter::ClusterChainReader::new(self, root_cluster);
|
||||
|
||||
DirIter::new(Box::new(cluster_iter))
|
||||
DirIter::new(reader)
|
||||
}
|
||||
|
||||
pub fn dir_iter<'a>(&'a self, first_cluster: u32) -> DirIter<Box<dyn Read + 'a>> {
|
||||
// TODO: return type must match root_dir_iter
|
||||
// if the Box<dyn> is changed there, update here as well
|
||||
|
||||
pub fn dir_iter<'a>(&self, first_cluster: u32) -> DirIter<'_> {
|
||||
let cluster_iter = self.chain_reader(first_cluster);
|
||||
|
||||
DirIter::new(Box::new(cluster_iter))
|
||||
DirIter::new(cluster_iter)
|
||||
}
|
||||
|
||||
pub fn file_reader(&self, first_cluster: u32) -> iter::ClusterChainReader<'_> {
|
||||
|
|
|
|||
58
fat-bits/src/slice_like.rs
Normal file
58
fat-bits/src/slice_like.rs
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
use std::fs::File;
|
||||
use std::io::{Read as _, Seek as _, SeekFrom, Write as _};
|
||||
|
||||
pub trait SliceLike {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()>;
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()>;
|
||||
}
|
||||
|
||||
impl SliceLike for &mut [u8] {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()> {
|
||||
if offset as usize + buf.len() > self.len() {
|
||||
return Err(std::io::Error::other(anyhow::anyhow!(
|
||||
"reading {} bytes at offset {} is out of bounds for slice of len {}",
|
||||
buf.len(),
|
||||
offset,
|
||||
self.len()
|
||||
)));
|
||||
}
|
||||
|
||||
buf.copy_from_slice(&self[offset as usize..][..buf.len()]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()> {
|
||||
if offset as usize + bytes.len() > self.len() {
|
||||
return Err(std::io::Error::other(anyhow::anyhow!(
|
||||
"writing {} bytes at offset {} is out of bounds for slice of len {}",
|
||||
bytes.len(),
|
||||
offset,
|
||||
self.len()
|
||||
)));
|
||||
}
|
||||
|
||||
self[offset as usize..][..bytes.len()].copy_from_slice(bytes);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SliceLike for File {
|
||||
fn read_at_offset(&mut self, offset: u64, buf: &mut [u8]) -> std::io::Result<()> {
|
||||
self.seek(SeekFrom::Start(offset))?;
|
||||
|
||||
self.read_exact(buf)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_at_offset(&mut self, offset: u64, bytes: &[u8]) -> std::io::Result<()> {
|
||||
self.seek(SeekFrom::Start(offset))?;
|
||||
|
||||
self.write_all(bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -5,6 +5,64 @@ use std::rc::Rc;
|
|||
|
||||
use crate::SliceLike;
|
||||
|
||||
pub struct SubSlice {
|
||||
data: Rc<RefCell<dyn SliceLike>>,
|
||||
|
||||
offset: u64,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Debug for SubSlice {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SubSliceMut")
|
||||
.field("offset", &self.offset)
|
||||
.field("len", &self.len)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> SubSlice {
|
||||
pub fn new(data: Rc<RefCell<dyn SliceLike>>, offset: u64, len: usize) -> SubSlice {
|
||||
SubSlice { data, offset, len }
|
||||
}
|
||||
|
||||
pub fn offset(&self) -> u64 {
|
||||
self.offset
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
pub fn skip(&mut self, n: usize) -> usize {
|
||||
let n = n.min(self.len());
|
||||
|
||||
self.offset += n as u64;
|
||||
self.len -= n;
|
||||
|
||||
n
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for SubSlice {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let bytes_to_read = self.len.min(buf.len());
|
||||
|
||||
self.data
|
||||
.borrow_mut()
|
||||
.read_at_offset(self.offset, &mut buf[..bytes_to_read])?;
|
||||
|
||||
self.offset += bytes_to_read as u64;
|
||||
self.len -= bytes_to_read;
|
||||
|
||||
Ok(bytes_to_read)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SubSliceMut {
|
||||
// fat_fs: &'a FatFs,
|
||||
data: Rc<RefCell<dyn SliceLike>>,
|
||||
|
|
@ -29,6 +87,10 @@ impl SubSliceMut {
|
|||
}
|
||||
|
||||
impl<'a> SubSliceMut {
|
||||
pub fn offset(&self) -> u64 {
|
||||
self.offset
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
|
@ -80,56 +142,3 @@ impl Write for SubSliceMut {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SubSlice {
|
||||
data: Rc<RefCell<dyn SliceLike>>,
|
||||
|
||||
offset: u64,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl Debug for SubSlice {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SubSliceMut")
|
||||
.field("offset", &self.offset)
|
||||
.field("len", &self.len)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> SubSlice {
|
||||
pub fn new(data: Rc<RefCell<dyn SliceLike>>, offset: u64, len: usize) -> SubSlice {
|
||||
SubSlice { data, offset, len }
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
pub fn skip(&mut self, n: usize) -> usize {
|
||||
let n = n.min(self.len());
|
||||
|
||||
self.offset += n as u64;
|
||||
self.len -= n;
|
||||
|
||||
n
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for SubSlice {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let bytes_to_read = self.len.min(buf.len());
|
||||
|
||||
self.data
|
||||
.borrow_mut()
|
||||
.read_at_offset(self.offset, &mut buf[..bytes_to_read])?;
|
||||
|
||||
self.offset += bytes_to_read as u64;
|
||||
self.len -= bytes_to_read;
|
||||
|
||||
Ok(bytes_to_read)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue