This commit is contained in:
commit
b87651c549
1 changed files with 224 additions and 0 deletions
224
extract_params.py
Normal file
224
extract_params.py
Normal file
|
@ -0,0 +1,224 @@
|
|||
#!/usr/bin/env python3
|
||||
import struct
|
||||
import csv
|
||||
import sys
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
Def = namedtuple("Def", "name desc notes typedef ctype pytype length bits default min max step".split(" "))
|
||||
|
||||
typemap = dict(
|
||||
s8="b",
|
||||
u8="B",
|
||||
s16="h",
|
||||
u16="H",
|
||||
s32="i",
|
||||
u32="I",
|
||||
s64="q",
|
||||
u64="Q",
|
||||
f32="f",
|
||||
f64="d",
|
||||
|
||||
dummy8="B", # note the array length typically alongside them.
|
||||
)
|
||||
|
||||
|
||||
def U(fmt, *args, **kwargs):
|
||||
return struct.unpack(E + fmt, *args, **kwargs)
|
||||
|
||||
|
||||
def trunc(s):
|
||||
if b"\0" in s:
|
||||
return s[:s.find(b"\0")]
|
||||
return s
|
||||
|
||||
|
||||
def readcstr(f, offset=None):
|
||||
if offset is not None:
|
||||
here = f.tell()
|
||||
f.seek(offset)
|
||||
|
||||
raw = b""
|
||||
while True:
|
||||
buf = f.read(16)
|
||||
if len(buf) == 0:
|
||||
break
|
||||
|
||||
if b"\0" in buf:
|
||||
raw += buf[:buf.find(b"\0")]
|
||||
break
|
||||
else:
|
||||
raw += buf
|
||||
|
||||
if offset is not None:
|
||||
f.seek(here)
|
||||
|
||||
return raw
|
||||
|
||||
|
||||
def read_paramdef(f):
|
||||
defs = []
|
||||
|
||||
filesize, unk1, unk2, count, unk3 = U("IHHHH", f.read(12))
|
||||
paramdef_title = f.read(32)
|
||||
unk4, unk5 = U("HH", f.read(4))
|
||||
|
||||
for i in range(count):
|
||||
# TODO: rename a lot of the variables here.
|
||||
|
||||
desc = f.read(64)
|
||||
typename = f.read(8)
|
||||
printformat = f.read(8)
|
||||
default, min_, max_, step = U("ffff", f.read(16))
|
||||
unk6, unk7, notes_offset = U("IIi", f.read(12))
|
||||
full_typename = f.read(32)
|
||||
name = f.read(32)
|
||||
# ID? it seems to increase by 100 sometimes.
|
||||
(unk8,) = U("I", f.read(4))
|
||||
|
||||
desc_str = trunc(desc).decode("shift-jis", errors="replace")
|
||||
type_str = trunc(full_typename).decode()
|
||||
name_str = trunc(name).decode()
|
||||
|
||||
length = None
|
||||
if "[" in name_str and "]" in name_str:
|
||||
length = int(name_str.split("[")[1].split("]")[0])
|
||||
|
||||
bits = None
|
||||
if ":" in name_str:
|
||||
bits = int(name_str.split(":")[1])
|
||||
|
||||
if type_str in typemap:
|
||||
type_ = typemap[type_str]
|
||||
else:
|
||||
underlying_type = trunc(typename).decode()
|
||||
type_ = typemap[underlying_type]
|
||||
|
||||
if notes_offset in (0, -1):
|
||||
notes_str = ""
|
||||
else:
|
||||
notes = readcstr(f, notes_offset)
|
||||
notes_str = notes.decode("shift-jis", errors="replace")
|
||||
|
||||
d = Def(name_str, desc_str, notes_str,
|
||||
type_str, trunc(typename).decode(), type_, length, bits,
|
||||
default, min_, max_, step)
|
||||
defs.append(d)
|
||||
|
||||
return paramdef_title, defs
|
||||
|
||||
|
||||
def read_param(f, paramdef_title=None):
|
||||
entries = []
|
||||
|
||||
filesize, unk1, unk2, unk3, count = U("IHHHH", f.read(12))
|
||||
param_title = f.read(32)
|
||||
if paramdef_title is not None:
|
||||
if trunc(paramdef_title) != trunc(param_title):
|
||||
raise Exception(
|
||||
"that's the wrong paramdef for this param file!" +
|
||||
f"\nexpected: {paramedef_title}\nretrieved: {param_title}")
|
||||
|
||||
unk4, unk5 = U("HH", f.read(4))
|
||||
here = f.tell()
|
||||
|
||||
for i in range(count):
|
||||
f.seek(here)
|
||||
entry_id, param_offset, notes_offset = U("iii", f.read(12))
|
||||
here = f.tell()
|
||||
f.seek(param_offset)
|
||||
|
||||
entry = [entry_id]
|
||||
prev_type = None
|
||||
for d in defs:
|
||||
is_simple = d.length is None and d.bits is None
|
||||
if d.pytype != prev_type or d.bits is None:
|
||||
buf, bufbits = 0, 0
|
||||
|
||||
# print(f"{d.pytype:24} {f.tell():X}")
|
||||
|
||||
size = struct.calcsize(d.pytype)
|
||||
if is_simple:
|
||||
(datum,) = U(d.pytype, f.read(size))
|
||||
|
||||
elif d.length is not None:
|
||||
# this only seems to be used for padding, so we can skip it.
|
||||
assert d.ctype == "dummy8" # let's assert that though.
|
||||
datum = f.read(d.length * size)
|
||||
|
||||
elif d.bits is not None:
|
||||
if bufbits == 0 or bufbits < d.bits:
|
||||
assert d.pytype not in ("f", "d")
|
||||
(buf,) = U(d.pytype.upper(), f.read(size))
|
||||
bufbits = size * 8
|
||||
|
||||
mask = (1 << d.bits) - 1
|
||||
if big_endian:
|
||||
datum = (buf >> (size * 8 - d.bits)) & mask
|
||||
buf <<= d.bits
|
||||
else:
|
||||
datum = buf & mask
|
||||
buf >>= d.bits
|
||||
bufbits -= d.bits
|
||||
|
||||
else:
|
||||
raise Exception("unhandled definition: " + name)
|
||||
|
||||
if d.ctype != "dummy8":
|
||||
entry.append(datum)
|
||||
|
||||
prev_type = d.pytype
|
||||
|
||||
if notes_offset in (0, -1):
|
||||
notes_str = ""
|
||||
else:
|
||||
notes = readcstr(f, notes_offset)
|
||||
notes_str = notes.decode("shift-jis", errors="replace")
|
||||
entry.append(notes_str)
|
||||
|
||||
entries.append(entry)
|
||||
|
||||
return param_title, entries
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fp1 = sys.argv[1]
|
||||
fp2 = sys.argv[2]
|
||||
fpo = sys.argv[3]
|
||||
fph = sys.argv[4]
|
||||
# ew, nasty global:
|
||||
big_endian = sys.argv[5] == "big" if len(sys.argv) > 5 else False
|
||||
|
||||
# ew, nasty global:
|
||||
E = ">" if big_endian else "<"
|
||||
|
||||
with open(fp1, "rb") as f:
|
||||
paramdef_title, defs = read_paramdef(f)
|
||||
|
||||
header = ["entryId"]
|
||||
for d in defs:
|
||||
name = d.name
|
||||
if ":" in name:
|
||||
name = name.split(":")[0]
|
||||
if "[" in name:
|
||||
name = name.split("[")[0]
|
||||
if d.ctype == "dummy8":
|
||||
# print("skipping", name)
|
||||
continue
|
||||
header.append(name)
|
||||
header.append("notes")
|
||||
|
||||
with open(fp2, "rb") as f:
|
||||
param_title, entries = read_param(f, paramdef_title)
|
||||
|
||||
with open(fpo, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
cw.writerow(header)
|
||||
for entry in entries:
|
||||
cw.writerow(entry)
|
||||
|
||||
with open(fph, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
cw.writerow(Def._fields)
|
||||
for d in defs:
|
||||
cw.writerow(d)
|
Loading…
Reference in a new issue