This commit is contained in:
parent
b87651c549
commit
591b496479
3 changed files with 183 additions and 27 deletions
|
@ -77,8 +77,8 @@ def read_paramdef(f):
|
|||
(unk8,) = U("I", f.read(4))
|
||||
|
||||
desc_str = trunc(desc).decode("shift-jis", errors="replace")
|
||||
type_str = trunc(full_typename).decode()
|
||||
name_str = trunc(name).decode()
|
||||
type_str = trunc(full_typename).decode("shift-jis")
|
||||
name_str = trunc(name).decode("shift-jis")
|
||||
|
||||
length = None
|
||||
if "[" in name_str and "]" in name_str:
|
||||
|
@ -182,12 +182,25 @@ def read_param(f, paramdef_title=None):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fp1 = sys.argv[1]
|
||||
fp2 = sys.argv[2]
|
||||
fpo = sys.argv[3]
|
||||
fph = sys.argv[4]
|
||||
# ew, nasty global:
|
||||
big_endian = sys.argv[5] == "big" if len(sys.argv) > 5 else False
|
||||
if len(sys.argv) == 6:
|
||||
fp1 = sys.argv[1]
|
||||
fp2 = sys.argv[2]
|
||||
fpo = sys.argv[3]
|
||||
fph = sys.argv[4]
|
||||
# ew, nasty global:
|
||||
big_endian = sys.argv[5] == "big"
|
||||
elif len(sys.argv) == 4:
|
||||
fp1 = sys.argv[1]
|
||||
fp2 = None
|
||||
fpo = None
|
||||
fph = sys.argv[2]
|
||||
# ew, nasty global:
|
||||
big_endian = sys.argv[3] == "big"
|
||||
else:
|
||||
print("usage:")
|
||||
print(" python3 extract_params.py {paramdef in} {param in} {param out} {paramdef out} [big]")
|
||||
print(" python3 extract_params.py {paramdef in} {paramdef out} [big]")
|
||||
sys.exit(1)
|
||||
|
||||
# ew, nasty global:
|
||||
E = ">" if big_endian else "<"
|
||||
|
@ -195,27 +208,28 @@ if __name__ == "__main__":
|
|||
with open(fp1, "rb") as f:
|
||||
paramdef_title, defs = read_paramdef(f)
|
||||
|
||||
header = ["entryId"]
|
||||
for d in defs:
|
||||
name = d.name
|
||||
if ":" in name:
|
||||
name = name.split(":")[0]
|
||||
if "[" in name:
|
||||
name = name.split("[")[0]
|
||||
if d.ctype == "dummy8":
|
||||
# print("skipping", name)
|
||||
continue
|
||||
header.append(name)
|
||||
header.append("notes")
|
||||
if fp2 is not None and fph is not None:
|
||||
header = ["entryId"]
|
||||
for d in defs:
|
||||
name = d.name
|
||||
if ":" in name:
|
||||
name = name.split(":")[0]
|
||||
if "[" in name:
|
||||
name = name.split("[")[0]
|
||||
if d.ctype == "dummy8":
|
||||
# print("skipping", name)
|
||||
continue
|
||||
header.append(name)
|
||||
header.append("notes")
|
||||
|
||||
with open(fp2, "rb") as f:
|
||||
param_title, entries = read_param(f, paramdef_title)
|
||||
with open(fp2, "rb") as f:
|
||||
param_title, entries = read_param(f, paramdef_title)
|
||||
|
||||
with open(fpo, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
cw.writerow(header)
|
||||
for entry in entries:
|
||||
cw.writerow(entry)
|
||||
with open(fpo, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
cw.writerow(header)
|
||||
for entry in entries:
|
||||
cw.writerow(entry)
|
||||
|
||||
with open(fph, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
|
|
115
fmg_flatten.py
Normal file
115
fmg_flatten.py
Normal file
|
@ -0,0 +1,115 @@
|
|||
from struct import unpack as U
|
||||
import csv
|
||||
import sys
|
||||
|
||||
big_endian = False
|
||||
|
||||
def readint(f):
|
||||
if big_endian:
|
||||
return U(">i", f.read(4))[0]
|
||||
else:
|
||||
return U("<i", f.read(4))[0]
|
||||
|
||||
def dumpy(f, mapping):
|
||||
f.seek(0, 2)
|
||||
fsize = f.tell()
|
||||
f.seek(0, 0)
|
||||
|
||||
something = readint(f)
|
||||
assert something == 0x10000, something
|
||||
|
||||
size = readint(f)
|
||||
assert size == fsize, size
|
||||
|
||||
unk = readint(f)
|
||||
if big_endian:
|
||||
assert unk == 0x01FF0000, unk
|
||||
else:
|
||||
assert unk == 1, unk
|
||||
|
||||
count = readint(f)
|
||||
|
||||
offset_count = readint(f)
|
||||
|
||||
somecount1 = readint(f) # still unknown!
|
||||
something = readint(f) # still unknown!
|
||||
|
||||
starts = {}
|
||||
lengths = {}
|
||||
ids = []
|
||||
cumulative_length = 0
|
||||
previous_end = None
|
||||
|
||||
for i in range(count):
|
||||
if big_endian:
|
||||
a, b, c = U(">iii", f.read(4 * 3))
|
||||
else:
|
||||
a, b, c = U("<iii", f.read(4 * 3))
|
||||
#print(f"{a:10}: {b:10} to {c:10}")
|
||||
length = c - b + 1
|
||||
|
||||
assert a not in starts
|
||||
if previous_end is not None:
|
||||
assert a == previous_end
|
||||
|
||||
starts[a] = b
|
||||
lengths[a] = length
|
||||
|
||||
for i in range(length):
|
||||
ids.append(b + i)
|
||||
|
||||
cumulative_length += length
|
||||
previous_end = a + length
|
||||
|
||||
assert offset_count == cumulative_length
|
||||
|
||||
offsets = []
|
||||
for i in range(offset_count):
|
||||
offsets.append(readint(f))
|
||||
|
||||
for id, offset in zip(ids, offsets):
|
||||
if offset == 0:
|
||||
#mapping[id] = ""
|
||||
continue
|
||||
|
||||
f.seek(offset)
|
||||
string = ""
|
||||
while True:
|
||||
char = f.read(2)
|
||||
if char == b"\0\0":
|
||||
break
|
||||
if big_endian:
|
||||
string += char.decode("utf-16be")
|
||||
else:
|
||||
string += char.decode("utf-16le")
|
||||
mapping[id] = string
|
||||
|
||||
fp = sys.argv[1]
|
||||
fpo = sys.argv[2]
|
||||
|
||||
if len(sys.argv) > 3:
|
||||
big_endian = sys.argv[3] == "big"
|
||||
|
||||
en_mapping = {}
|
||||
jp_mapping = {}
|
||||
|
||||
with open(fp, "rb") as f:
|
||||
dumpy(f, en_mapping)
|
||||
|
||||
with open(fp.replace("ENGLISH", "JAPANESE"), "rb") as f:
|
||||
dumpy(f, jp_mapping)
|
||||
|
||||
from collections import defaultdict
|
||||
mappings = defaultdict(lambda: ["", ""])
|
||||
|
||||
for k, v in en_mapping.items():
|
||||
mappings[k][0] = v
|
||||
|
||||
for k, v in jp_mapping.items():
|
||||
mappings[k][1] = v
|
||||
|
||||
with open(fpo, "w", newline="", encoding="utf-8") as f:
|
||||
cw = csv.writer(f, dialect="excel-tab")
|
||||
for k in sorted(mappings.keys()):
|
||||
en_v, jp_v = mappings[k]
|
||||
cw.writerow([k, en_v, jp_v])
|
27
param_notes.py
Normal file
27
param_notes.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
from sys import argv
|
||||
from struct import unpack as U
|
||||
|
||||
big_endian = False
|
||||
if len(argv) > 2:
|
||||
big_endian = argv[2] == "big"
|
||||
|
||||
with open(argv[1], "rb") as f:
|
||||
f.seek(0xA)
|
||||
if big_endian:
|
||||
count = U(">h", f.read(2))[0]
|
||||
else:
|
||||
count = U("<h", f.read(2))[0]
|
||||
|
||||
for i in range(count):
|
||||
f.seek(0x30 + i * 3 * 4)
|
||||
|
||||
if big_endian:
|
||||
entryID, paramAddr, infoAddr = U(">iii", f.read(3 * 4))
|
||||
else:
|
||||
entryID, paramAddr, infoAddr = U("<iii", f.read(3 * 4))
|
||||
|
||||
if infoAddr not in (0, -1):
|
||||
f.seek(infoAddr)
|
||||
string = f.read()
|
||||
string = string[:string.index(b"\0")]
|
||||
print(entryID, string.decode("shift-jis", errors="replace"), sep="\t")
|
Loading…
Reference in a new issue