able to split tablerized image and encode group of images into sff
This commit is contained in:
commit
d7c2435fdd
6 changed files with 1520 additions and 0 deletions
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
/target
|
||||
/font
|
||||
/res
|
||||
1076
Cargo.lock
generated
Normal file
1076
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
7
Cargo.toml
Normal file
7
Cargo.toml
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
[package]
|
||||
name = "karlos-helper"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
image = "0.25.6"
|
||||
42
src/image_wrapper.rs
Normal file
42
src/image_wrapper.rs
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
use std::{
|
||||
fs::OpenOptions,
|
||||
io::{BufWriter, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use image::{DynamicImage, GenericImage, ImageReader};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BWImage {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
pub fn load_bw_image<P: AsRef<Path>>(p: P) -> BWImage {
|
||||
let img = ImageReader::open(p).unwrap().decode().unwrap();
|
||||
match img {
|
||||
DynamicImage::ImageRgba8(buf) => BWImage {
|
||||
width: buf.width(),
|
||||
height: buf.height(),
|
||||
data: buf
|
||||
.pixels()
|
||||
.map(|px| ((px.0[0] as usize + px.0[1] as usize + px.0[2] as usize) / 3) as u8)
|
||||
.collect(),
|
||||
},
|
||||
DynamicImage::ImageRgb8(buf) => BWImage {
|
||||
width: buf.width(),
|
||||
height: buf.height(),
|
||||
data: buf
|
||||
.pixels()
|
||||
.map(|px| ((px.0[0] as usize + px.0[1] as usize + px.0[2] as usize) / 3) as u8)
|
||||
.collect(),
|
||||
},
|
||||
DynamicImage::ImageLuma8(buf) => BWImage {
|
||||
width: buf.width(),
|
||||
height: buf.height(),
|
||||
data: buf.into_raw(),
|
||||
},
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
144
src/main.rs
Normal file
144
src/main.rs
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
mod image_wrapper;
|
||||
use image_wrapper::BWImage;
|
||||
use sff::FileDescOut;
|
||||
mod sff;
|
||||
|
||||
/// Iterates over a directory which shall contain images named by their number in the font.
|
||||
/// Asserts that all images have the same dimensions.
|
||||
fn load_font_dir<P: AsRef<Path>>(dir_with_images: P) -> HashMap<u32, BWImage> {
|
||||
let mut map = HashMap::new();
|
||||
let (mut found_width, mut found_height) = (None, None);
|
||||
for elem in std::fs::read_dir(dir_with_images).unwrap() {
|
||||
let elem = elem.unwrap();
|
||||
let img = image_wrapper::load_bw_image(elem.path());
|
||||
match (found_width, found_height) {
|
||||
(None, None) => {
|
||||
found_width = Some(img.width);
|
||||
found_height = Some(img.height);
|
||||
}
|
||||
(Some(fw), Some(fh)) => {
|
||||
assert!(img.width == fw);
|
||||
assert!(img.height == fh);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
let number: u32 = elem
|
||||
.path()
|
||||
.file_stem()
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.parse::<u32>()
|
||||
.unwrap();
|
||||
assert!(!map.contains_key(&number));
|
||||
map.insert(number, img);
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
fn image_table_split<P, Q>(
|
||||
src_image: P,
|
||||
dst_dir: Q,
|
||||
start_number: usize,
|
||||
nrows: usize,
|
||||
ncols: usize,
|
||||
width_per_elem: usize,
|
||||
height_per_elem: usize,
|
||||
) where
|
||||
P: AsRef<Path>,
|
||||
Q: AsRef<Path>,
|
||||
{
|
||||
let src_img = image::ImageReader::open(src_image)
|
||||
.unwrap()
|
||||
.decode()
|
||||
.unwrap();
|
||||
assert!(src_img.width() as usize == ncols * width_per_elem);
|
||||
assert!(src_img.height() as usize == nrows * height_per_elem);
|
||||
let mut number_count = start_number;
|
||||
for row in 0..nrows {
|
||||
for col in 0..ncols {
|
||||
let img = src_img.crop_imm(
|
||||
(col * width_per_elem) as u32,
|
||||
(row * height_per_elem) as u32,
|
||||
width_per_elem as u32,
|
||||
height_per_elem as u32,
|
||||
);
|
||||
let file_name = format!("{}.png", number_count);
|
||||
img.save(dst_dir.as_ref().join(file_name)).unwrap();
|
||||
number_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
struct TableRange {
|
||||
start: u32,
|
||||
len: u32,
|
||||
}
|
||||
|
||||
fn sorted_numbers_into_ranges(sorted_numbers: &[u32]) -> Vec<TableRange> {
|
||||
let mut curr_range = TableRange {
|
||||
start: sorted_numbers[0],
|
||||
len: 1,
|
||||
};
|
||||
let mut out: Vec<TableRange> = Vec::new();
|
||||
for elem in &sorted_numbers[1..] {
|
||||
if curr_range.start + curr_range.len == *elem {
|
||||
curr_range.len += 1;
|
||||
} else {
|
||||
out.push(curr_range);
|
||||
curr_range.start = *elem;
|
||||
curr_range.len = 1;
|
||||
}
|
||||
}
|
||||
out.push(curr_range);
|
||||
out
|
||||
}
|
||||
|
||||
fn main() {
|
||||
image_table_split("res/test.png", "font", 32, 6, 16, 8, 18);
|
||||
let map = load_font_dir("font");
|
||||
let mut sorted_keys: Vec<u32> = map.keys().copied().collect();
|
||||
sorted_keys.sort();
|
||||
let ranges = sorted_numbers_into_ranges(&sorted_keys);
|
||||
|
||||
let fdo = sff::FileDescOut {
|
||||
char_width: 8,
|
||||
char_height: 18,
|
||||
encoding: sff::Encoding::BitPerPix4,
|
||||
tables: ranges
|
||||
.into_iter()
|
||||
.map(|rng| sff::TableOut {
|
||||
first_codepoint: rng.start,
|
||||
num_codepoints: rng.len,
|
||||
// TODO shitty clone
|
||||
data: (rng.start..rng.start + rng.len)
|
||||
.map(|codepoint| map.get(&codepoint).unwrap().data.clone())
|
||||
.collect(),
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
std::fs::write("font.sff", fdo.encode()).unwrap();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_ranger() {
|
||||
let numbers = [3, 4, 5, 6, 8];
|
||||
let ranges = sorted_numbers_into_ranges(&numbers);
|
||||
assert_eq!(
|
||||
ranges,
|
||||
[
|
||||
TableRange { start: 3, len: 4 },
|
||||
TableRange { start: 8, len: 1 }
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
248
src/sff.rs
Normal file
248
src/sff.rs
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
#[derive(Debug)]
|
||||
struct Table {
|
||||
first_codepoint: u32,
|
||||
num_codepoints: u32,
|
||||
offset: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
BitPerPix1,
|
||||
BitPerPix2,
|
||||
BitPerPix4,
|
||||
BitPerPix8,
|
||||
}
|
||||
|
||||
struct Access {
|
||||
byte_index: usize,
|
||||
shift: u8,
|
||||
mask: u8,
|
||||
}
|
||||
|
||||
impl Encoding {
|
||||
fn into_code(self) -> u16 {
|
||||
match self {
|
||||
Encoding::BitPerPix1 => 0,
|
||||
Encoding::BitPerPix2 => 1,
|
||||
Encoding::BitPerPix4 => 2,
|
||||
Encoding::BitPerPix8 => 3,
|
||||
}
|
||||
}
|
||||
|
||||
fn access_from_index(self, px_index: usize) -> Access {
|
||||
match self {
|
||||
Encoding::BitPerPix1 => Access {
|
||||
byte_index: px_index / 8,
|
||||
shift: (7 - (px_index % 8)) as u8,
|
||||
mask: 0x01,
|
||||
},
|
||||
Encoding::BitPerPix2 => Access {
|
||||
byte_index: px_index / 4,
|
||||
shift: ((3 - (px_index % 4)) << 1) as u8,
|
||||
mask: 0x03,
|
||||
},
|
||||
Encoding::BitPerPix4 => Access {
|
||||
byte_index: px_index / 2,
|
||||
shift: ((1 - (px_index % 2)) << 2) as u8,
|
||||
mask: 0x0f,
|
||||
},
|
||||
Encoding::BitPerPix8 => Access {
|
||||
byte_index: px_index,
|
||||
shift: 0,
|
||||
mask: 0xff,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Compresses standard brightness data (0-255) into a suitable form for sff.
|
||||
fn compress(self, input: &[u8]) -> Vec<u8> {
|
||||
let mut out = Vec::new();
|
||||
match self {
|
||||
Encoding::BitPerPix1 => todo!(),
|
||||
Encoding::BitPerPix2 => todo!(),
|
||||
Encoding::BitPerPix4 => {
|
||||
assert!(input.len() % 2 == 0); // TODO not strictly necessary
|
||||
for c in 0..input.len() / 2 {
|
||||
let one = input[2 * c] >> 4;
|
||||
let two = input[2 * c + 1] >> 4;
|
||||
out.push((one << 4) | two);
|
||||
}
|
||||
}
|
||||
Encoding::BitPerPix8 => Vec::extend_from_slice(&mut out, input),
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
fn get_u16(input: &[u8], byte_offset: usize) -> u16 {
|
||||
assert!(byte_offset % 2 == 0);
|
||||
input[byte_offset] as u16 | (input[byte_offset + 1] as u16) << 8
|
||||
}
|
||||
|
||||
fn get_u32(input: &[u8], byte_offset: usize) -> u32 {
|
||||
assert!(byte_offset % 4 == 0);
|
||||
input[byte_offset] as u32
|
||||
| (input[byte_offset + 1] as u32) << 8
|
||||
| (input[byte_offset + 2] as u32) << 16
|
||||
| (input[byte_offset + 3] as u32) << 24
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FileDesc<'a> {
|
||||
input: &'a [u8],
|
||||
char_width: usize,
|
||||
char_height: usize,
|
||||
encoding: Encoding,
|
||||
tables: Vec<Table>,
|
||||
}
|
||||
|
||||
impl<'a> FileDesc<'a> {
|
||||
fn bytes_per_codepoint(&self) -> usize {
|
||||
let px_per_codepoint = self.char_width * self.char_height;
|
||||
match self.encoding {
|
||||
Encoding::BitPerPix1 => px_per_codepoint >> 3,
|
||||
Encoding::BitPerPix2 => px_per_codepoint >> 2,
|
||||
Encoding::BitPerPix4 => px_per_codepoint >> 1,
|
||||
Encoding::BitPerPix8 => px_per_codepoint,
|
||||
}
|
||||
}
|
||||
|
||||
fn find_codepoint(&self, codepoint: u32) -> Option<&[u8]> {
|
||||
self.tables
|
||||
.iter()
|
||||
.find(|tab| {
|
||||
tab.first_codepoint <= codepoint
|
||||
&& tab.first_codepoint + tab.num_codepoints > codepoint
|
||||
})
|
||||
.map(|tab| {
|
||||
let off = tab.offset as usize
|
||||
+ (codepoint - tab.first_codepoint) as usize * self.bytes_per_codepoint();
|
||||
&self.input[off..off + self.bytes_per_codepoint()]
|
||||
})
|
||||
}
|
||||
|
||||
// input should be result of find_codepoint here
|
||||
fn get_pixel_value(&self, input: &[u8], row: u8, col: u8) -> Option<u8> {
|
||||
if row as usize >= self.char_height || col as usize >= self.char_width {
|
||||
return None;
|
||||
}
|
||||
let px_index = row as usize * self.char_width + col as usize;
|
||||
let Access {
|
||||
byte_index,
|
||||
shift,
|
||||
mask,
|
||||
} = self.encoding.access_from_index(px_index);
|
||||
Some((input[byte_index] >> shift) & mask)
|
||||
}
|
||||
}
|
||||
|
||||
fn decode(input: &[u8]) -> FileDesc<'_> {
|
||||
if input[0] != b's' || input[1] != b'f' || input[2] != b'f' || input[3] != b'\0' {
|
||||
panic!("invalid header magic");
|
||||
}
|
||||
if get_u32(input, 4) != 1 {
|
||||
panic!("invalid version");
|
||||
}
|
||||
let char_width = get_u16(input, 0x8) as usize;
|
||||
let char_height = get_u16(input, 0xa) as usize;
|
||||
let encoding = match get_u16(input, 0xc) {
|
||||
0 => Encoding::BitPerPix1,
|
||||
1 => Encoding::BitPerPix2,
|
||||
2 => Encoding::BitPerPix4,
|
||||
3 => Encoding::BitPerPix8,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let num_tables = get_u16(input, 0xe);
|
||||
let mut tables = Vec::new();
|
||||
let mut coff = 0x10;
|
||||
for _ in 0..num_tables {
|
||||
let tab = Table {
|
||||
first_codepoint: get_u32(input, coff),
|
||||
num_codepoints: get_u32(input, coff + 4),
|
||||
offset: get_u32(input, coff + 8),
|
||||
};
|
||||
tables.push(tab);
|
||||
coff += 3 * 4;
|
||||
}
|
||||
|
||||
FileDesc {
|
||||
input,
|
||||
char_width,
|
||||
char_height,
|
||||
encoding,
|
||||
tables,
|
||||
}
|
||||
}
|
||||
|
||||
/// data holds unencoded values, from 0 to 255. They are scaled down/rounded internally.
|
||||
#[derive(Debug)]
|
||||
pub struct TableOut {
|
||||
pub first_codepoint: u32,
|
||||
pub num_codepoints: u32,
|
||||
pub data: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileDescOut {
|
||||
pub char_width: usize,
|
||||
pub char_height: usize,
|
||||
pub encoding: Encoding,
|
||||
pub tables: Vec<TableOut>,
|
||||
}
|
||||
|
||||
fn push_u16(v: &mut Vec<u8>, value: u16) {
|
||||
v.push((value & 0xff) as u8);
|
||||
v.push(((value >> 8) & 0xff) as u8);
|
||||
}
|
||||
|
||||
fn push_u32(v: &mut Vec<u8>, value: u32) {
|
||||
v.push((value & 0xff) as u8);
|
||||
v.push(((value >> 8) & 0xff) as u8);
|
||||
v.push(((value >> 16) & 0xff) as u8);
|
||||
v.push(((value >> 24) & 0xff) as u8);
|
||||
}
|
||||
|
||||
fn put_u16(output: &mut [u8], byte_offset: usize, value: u16) {
|
||||
assert!(byte_offset % 2 == 0);
|
||||
output[byte_offset] = (value & 0xff) as u8;
|
||||
output[byte_offset + 1] = ((value >> 8) & 0xff) as u8;
|
||||
}
|
||||
|
||||
fn put_u32(output: &mut [u8], byte_offset: usize, value: u32) {
|
||||
assert!(byte_offset % 4 == 0);
|
||||
output[byte_offset] = (value & 0xff) as u8;
|
||||
output[byte_offset + 1] = ((value >> 8) & 0xff) as u8;
|
||||
output[byte_offset + 2] = ((value >> 16) & 0xff) as u8;
|
||||
output[byte_offset + 3] = ((value >> 24) & 0xff) as u8;
|
||||
}
|
||||
|
||||
impl FileDescOut {
|
||||
pub fn encode(&self) -> Vec<u8> {
|
||||
let mut v = vec![b's', b'f', b'f', b'\0'];
|
||||
push_u32(&mut v, 1);
|
||||
assert!(self.char_width < 1 << 16);
|
||||
assert!(self.char_height < 1 << 16);
|
||||
push_u16(&mut v, self.char_width as u16);
|
||||
push_u16(&mut v, self.char_height as u16);
|
||||
push_u16(&mut v, self.encoding.into_code());
|
||||
push_u16(&mut v, self.tables.len().try_into().unwrap());
|
||||
for _ in 0..self.tables.len() {
|
||||
// first, reserve space for the table headers
|
||||
push_u32(&mut v, 0);
|
||||
push_u32(&mut v, 0);
|
||||
push_u32(&mut v, 0);
|
||||
}
|
||||
for (i, table) in self.tables.iter().enumerate() {
|
||||
let header_off = 0x10 + i * 3 * 4;
|
||||
put_u32(&mut v[..], header_off, table.first_codepoint);
|
||||
put_u32(&mut v[..], header_off + 4, table.num_codepoints);
|
||||
let curr_len = v.len() as u32;
|
||||
put_u32(&mut v[..], header_off + 8, curr_len);
|
||||
for glyph_data in table.data.iter() {
|
||||
// TODO ugly clone
|
||||
v.extend_from_slice(&self.encoding.compress(glyph_data)[..]);
|
||||
}
|
||||
}
|
||||
v
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue