115 lines
2.5 KiB
Python
115 lines
2.5 KiB
Python
from struct import unpack as U
|
|
import csv
|
|
import sys
|
|
|
|
big_endian = False
|
|
|
|
def readint(f):
|
|
if big_endian:
|
|
return U(">i", f.read(4))[0]
|
|
else:
|
|
return U("<i", f.read(4))[0]
|
|
|
|
def dumpy(f, mapping):
|
|
f.seek(0, 2)
|
|
fsize = f.tell()
|
|
f.seek(0, 0)
|
|
|
|
something = readint(f)
|
|
assert something == 0x10000, something
|
|
|
|
size = readint(f)
|
|
assert size == fsize, size
|
|
|
|
unk = readint(f)
|
|
if big_endian:
|
|
assert unk == 0x01FF0000, unk
|
|
else:
|
|
assert unk == 1, unk
|
|
|
|
count = readint(f)
|
|
|
|
offset_count = readint(f)
|
|
|
|
somecount1 = readint(f) # still unknown!
|
|
something = readint(f) # still unknown!
|
|
|
|
starts = {}
|
|
lengths = {}
|
|
ids = []
|
|
cumulative_length = 0
|
|
previous_end = None
|
|
|
|
for i in range(count):
|
|
if big_endian:
|
|
a, b, c = U(">iii", f.read(4 * 3))
|
|
else:
|
|
a, b, c = U("<iii", f.read(4 * 3))
|
|
#print(f"{a:10}: {b:10} to {c:10}")
|
|
length = c - b + 1
|
|
|
|
assert a not in starts
|
|
if previous_end is not None:
|
|
assert a == previous_end
|
|
|
|
starts[a] = b
|
|
lengths[a] = length
|
|
|
|
for i in range(length):
|
|
ids.append(b + i)
|
|
|
|
cumulative_length += length
|
|
previous_end = a + length
|
|
|
|
assert offset_count == cumulative_length
|
|
|
|
offsets = []
|
|
for i in range(offset_count):
|
|
offsets.append(readint(f))
|
|
|
|
for id, offset in zip(ids, offsets):
|
|
if offset == 0:
|
|
#mapping[id] = ""
|
|
continue
|
|
|
|
f.seek(offset)
|
|
string = ""
|
|
while True:
|
|
char = f.read(2)
|
|
if char == b"\0\0":
|
|
break
|
|
if big_endian:
|
|
string += char.decode("utf-16be")
|
|
else:
|
|
string += char.decode("utf-16le")
|
|
mapping[id] = string
|
|
|
|
fp = sys.argv[1]
|
|
fpo = sys.argv[2]
|
|
|
|
if len(sys.argv) > 3:
|
|
big_endian = sys.argv[3] == "big"
|
|
|
|
en_mapping = {}
|
|
jp_mapping = {}
|
|
|
|
with open(fp, "rb") as f:
|
|
dumpy(f, en_mapping)
|
|
|
|
with open(fp.replace("ENGLISH", "JAPANESE"), "rb") as f:
|
|
dumpy(f, jp_mapping)
|
|
|
|
from collections import defaultdict
|
|
mappings = defaultdict(lambda: ["", ""])
|
|
|
|
for k, v in en_mapping.items():
|
|
mappings[k][0] = v
|
|
|
|
for k, v in jp_mapping.items():
|
|
mappings[k][1] = v
|
|
|
|
with open(fpo, "w", newline="", encoding="utf-8") as f:
|
|
cw = csv.writer(f, dialect="excel-tab")
|
|
for k in sorted(mappings.keys()):
|
|
en_v, jp_v = mappings[k]
|
|
cw.writerow([k, en_v, jp_v])
|