You've already forked PythonLib
mirror of
https://github.com/lifebottle/PythonLib.git
synced 2026-02-13 15:25:50 -08:00
482 lines
18 KiB
Python
482 lines
18 KiB
Python
from ToolsTales import ToolsTales
|
|
import subprocess
|
|
from dicttoxml import dicttoxml
|
|
import json
|
|
import struct
|
|
import shutil
|
|
import os
|
|
import re
|
|
import io
|
|
import pandas as pd
|
|
import xml.etree.ElementTree as ET
|
|
import lxml.etree as etree
|
|
from xml.dom import minidom
|
|
from pathlib import Path
|
|
class ToolsTOPX(ToolsTales):
|
|
|
|
def __init__(self, tbl):
|
|
|
|
super().__init__("NDX", tbl, "Narikiri-Dungeon-X")
|
|
|
|
with open("../{}/Data/Misc/{}".format(self.repo_name, self.tblFile), encoding="utf-8") as f:
|
|
|
|
jsonRaw = json.load(f)
|
|
self.jsonTblTags ={ k1:{ int(k2,16) if (k1 not in ["TBL", "NAME"]) else k2:v2 for k2,v2 in jsonRaw[k1].items()} for k1,v1 in jsonRaw.items()}
|
|
|
|
|
|
self.itable = dict([[i, struct.pack(">H", int(j))] for j, i in self.jsonTblTags['TBL'].items()])
|
|
self.itags = dict([[i, j] for j, i in self.jsonTblTags['TAGS'].items()])
|
|
if "NAME" in self.jsonTblTags.keys():
|
|
self.inames = dict([[i, j] for j, i in self.jsonTblTags['NAME'].items()])
|
|
|
|
if "COLOR" in self.jsonTblTags.keys():
|
|
self.icolors = dict([[i, j] for j, i in self.jsonTblTags['COLOR'].items()])
|
|
|
|
|
|
self.id = 1
|
|
self.struct_id = 1
|
|
|
|
|
|
|
|
|
|
#Load the hash table for the files
|
|
json_file = open('../Data/Narikiri-Dungeon-X/Misc/hashes.json', 'r')
|
|
self.hashes = json.load(json_file)
|
|
json_file.close()
|
|
|
|
self.repo_name = 'Narikiri-Dungeon-X'
|
|
self.misc = '../Data/{}/Misc'.format(self.repo_name)
|
|
self.disc_path = '../Data/{}/Disc'.format(self.repo_name)
|
|
self.story_XML_extract = '../Data/{}/Story/'.format(self.repo_name) #Files are the result of PAKCOMPOSER + Comptoe here
|
|
self.story_XML_new = '../{}/Data/NDX/Story/XML'.format(self.repo_name)
|
|
self.skit_extract = '../Data/{}/Skit/'.format(self.repo_name) #Files are the result of PAKCOMPOSER + Comptoe here
|
|
|
|
self.all_extract = '../Data/{}/All/'.format(self.repo_name)
|
|
self.all_original = '../Data/{}/Disc/Original/PSP_GAME/USRDIR/all.dat'.format(self.repo_name)
|
|
self.all_new = '../Data/{}/Disc/New/PSP_GAME/USRDIR/all.dat'.format(self.repo_name) #File is all.dat
|
|
|
|
self.story_struct_byte_code = b'\x18\x00\x0C\x04'
|
|
self.story_string_byte_code = b'\x00\x00\x82\x02'
|
|
|
|
self.make_dirs()
|
|
#############################
|
|
#
|
|
# Extraction of files and unpacking
|
|
#
|
|
#############################
|
|
|
|
# Make the basic directories for extracting all.dat
|
|
def make_dirs(self):
|
|
self.mkdir('../Data/{}/All'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/character'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/charsnd'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/data'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/effect'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/event'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/gui'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/map'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/resident'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/battle/tutorial'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/chat'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/gim'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/map'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/map/data'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/map/pack'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/movie'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/snd'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/snd/init'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/snd/se3'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/snd/se3/map_mus'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/snd/strpck'.format(self.repo_name))
|
|
self.mkdir('../Data/{}/All/sysdata'.format(self.repo_name))
|
|
|
|
# Extract each of the file from the all.dat
|
|
def extract_files(self, start, size, filename):
|
|
if filename in self.hashes.keys():
|
|
filename = self.hashes[filename]
|
|
input_file = open( self.all_original, 'rb')
|
|
input_file.seek(start, 0)
|
|
data = input_file.read(size)
|
|
output_file = open( os.path.join(self.all_extract, filename), 'wb')
|
|
output_file.write(data)
|
|
output_file.close()
|
|
input_file.close()
|
|
|
|
|
|
# Extract the story files
|
|
def extract_All_Story(self):
|
|
|
|
print("Extracting Story")
|
|
path = os.path.join( self.all_extract, 'map/pack/')
|
|
self.mkdir(self.story_XML_extract)
|
|
|
|
for f in os.listdir( path ):
|
|
if os.path.isfile( path+f) and '.cab' in f:
|
|
|
|
|
|
file_name = self.story_XML_extract+'New/'+f.replace(".cab", ".pak3")
|
|
self.extract_Story_File(path+f, file_name)
|
|
|
|
|
|
|
|
|
|
|
|
#super().pakComposerAndComptoe(fileName, "-d", "-3")
|
|
|
|
# Extract one single CAB file to the XML format
|
|
def extract_Story_File(self,original_cab_file, file_name):
|
|
|
|
#1) Extract CAB file to the PAK3 format
|
|
#subprocess.run(['expand', original_cab_file, file_name])
|
|
|
|
#2) Decompress PAK3 to a folder
|
|
#self.pakcomposer("-d", file_name, os.path.join( self.story_XML_extract, "New"))
|
|
|
|
if os.path.isdir(file_name.replace(".pak3", "")):
|
|
|
|
#3) Grab TSS file from PAK3 folder
|
|
tss = self.get_tss_from_pak3( file_name.replace(".pak3", ""))
|
|
|
|
#4) Extract TSS to XML
|
|
self.extract_tss_XML(tss, original_cab_file)
|
|
|
|
def get_tss_from_pak3(self, pak3_folder):
|
|
|
|
|
|
if os.path.isdir(pak3_folder):
|
|
folder_name = os.path.basename(pak3_folder)
|
|
|
|
file_list = [os.path.dirname(pak3_folder) + "/" + folder_name + "/" + ele for ele in os.listdir(pak3_folder)]
|
|
|
|
for file in file_list:
|
|
with open(file, "rb") as f:
|
|
data = f.read()
|
|
|
|
if data[0:3] == b'TSS':
|
|
print("... Extract TSS for file {} of size: {}".format(folder_name, len(data)))
|
|
return io.BytesIO(data)
|
|
|
|
def extract_tss_XML(self, tss, cab_file_name):
|
|
|
|
root = etree.Element('SceneText')
|
|
|
|
|
|
tss.read(12)
|
|
strings_offset = struct.unpack('<I', tss.read(4))[0]
|
|
print(hex(strings_offset))
|
|
|
|
tss.read(4)
|
|
pointer_block_size = struct.unpack('<I', tss.read(4))[0]
|
|
print(hex(pointer_block_size))
|
|
block_size = struct.unpack('<I', tss.read(4))[0]
|
|
#print(pointer_block_size)
|
|
#print(block_size)
|
|
|
|
|
|
#Create all the Nodes for Struct and grab the Person offset
|
|
strings_node = etree.SubElement(root, 'Strings')
|
|
etree.SubElement(strings_node, 'Section').text = "Story"
|
|
texts_offset, pointers_offset = self.extract_Story_Pointers(tss, self.story_struct_byte_code, strings_offset, pointer_block_size)
|
|
person_offset = [ self.extract_From_Struct(tss, strings_offset, pointer_offset, struct_offset, strings_node) for pointer_offset, struct_offset in zip(pointers_offset, texts_offset)]
|
|
|
|
|
|
#Create all the Nodes for Strings and grab the minimum offset
|
|
strings_node = etree.SubElement(root, 'Strings')
|
|
etree.SubElement(strings_node, 'Section').text = "Other Strings"
|
|
tss.seek(16)
|
|
texts_offset, pointers_offset = self.extract_Story_Pointers(tss, self.story_string_byte_code, strings_offset, pointer_block_size)
|
|
[ self.extract_From_String(tss, strings_offset, pointer_offset, text_offset, strings_node) for pointer_offset, text_offset in zip(pointers_offset, texts_offset)]
|
|
|
|
text_start = min( min(person_offset), min(texts_offset))
|
|
etree.SubElement(root, "TextStart").text = str(text_start)
|
|
|
|
#Write the XML file
|
|
txt=etree.tostring(root, encoding="UTF-8", pretty_print=True)
|
|
xml_path = os.path.join(self.story_XML_extract,"XML", self.get_file_name(cab_file_name)+".xml")
|
|
print(xml_path)
|
|
with open(xml_path, "wb") as xmlFile:
|
|
xmlFile.write(txt)
|
|
|
|
|
|
|
|
def extract_From_Struct(self, f,strings_offset, pointer_offset, struct_offset, root):
|
|
|
|
|
|
|
|
#print("Offset: {}".format(hex(struct_offset)))
|
|
f.seek(struct_offset, 0)
|
|
|
|
#Unknown first pointer
|
|
f.read(4)
|
|
|
|
|
|
unknown_pointer = struct.unpack('<I', f.read(4))[0]
|
|
self.create_Entry(root, pointer_offset, unknown_pointer,0, "Struct")
|
|
|
|
person_offset = struct.unpack('<I', f.read(4))[0] + strings_offset
|
|
text_offset = struct.unpack('<I', f.read(4))[0] + strings_offset
|
|
unknown1_offset = struct.unpack('<I', f.read(4))[0] + strings_offset
|
|
unknown2_offset = struct.unpack('<I', f.read(4))[0] + strings_offset
|
|
|
|
|
|
|
|
person_text = self.bytes_to_text(f, person_offset)[0]
|
|
self.create_Entry(root, pointer_offset, person_text,1, "Struct")
|
|
#print("Person offset: {}".format(hex(person_offset)))
|
|
#print("Text offset: {}".format(hex(text_offset)))
|
|
#print("Unknown1 offset: {}".format(hex(unknown1_offset)))
|
|
japText = self.bytes_to_text(f, text_offset)[0]
|
|
self.create_Entry(root, pointer_offset, japText,1, "Struct")
|
|
|
|
unknown1Text = self.bytes_to_text(f, unknown1_offset)[0]
|
|
self.create_Entry(root, pointer_offset, unknown1Text,0, "Struct")
|
|
#print(unknown1Text)
|
|
unknown2Text = self.bytes_to_text(f, unknown2_offset)[0]
|
|
self.create_Entry(root, pointer_offset, unknown2Text,0, "Struct")
|
|
|
|
|
|
self.struct_id += 1
|
|
|
|
return person_offset
|
|
|
|
def extract_From_String(self, f, strings_offset, pointer_offset, text_offset, root):
|
|
|
|
|
|
f.seek(text_offset, 0)
|
|
japText = self.bytes_to_text(f, text_offset)[0]
|
|
self.create_Entry(root, pointer_offset, japText,1, "Other Strings")
|
|
|
|
def extract_Story_Pointers(self, f, bytecode, strings_offset, pointer_block_size):
|
|
|
|
read = 0
|
|
text_offset = []
|
|
pointer_offset = []
|
|
while read < pointer_block_size:
|
|
b = f.read(4)
|
|
if b == bytecode:
|
|
|
|
pointer_offset.append(f.tell())
|
|
text_offset.append(struct.unpack('<I', f.read(4))[0] + strings_offset)
|
|
read += 4
|
|
else:
|
|
read += 4
|
|
|
|
return text_offset, pointer_offset
|
|
|
|
|
|
|
|
def create_Entry(self, strings_node, pointer_offset, text, to_translate, entry_type, speaker_id, unknown_pointer):
|
|
|
|
#Add it to the XML node
|
|
entry_node = etree.SubElement(strings_node, "Entry")
|
|
etree.SubElement(entry_node,"PointerOffset").text = str(pointer_offset)
|
|
etree.SubElement(entry_node,"JapaneseText").text = str(text)
|
|
eng_text = ''
|
|
|
|
if to_translate == 0:
|
|
statusText = 'Done'
|
|
eng_text = str(text)
|
|
|
|
etree.SubElement(entry_node,"EnglishText").text = eng_text
|
|
etree.SubElement(entry_node,"Notes").text = ''
|
|
etree.SubElement(entry_node,"Id").text = str(self.id)
|
|
statusText = "To Do"
|
|
|
|
if entry_type == "Struct":
|
|
etree.SubElement(entry_node,"StructId").text = str(self.struct_id)
|
|
etree.SubElement(entry_node,"UnknownPointer").text = str(unknown_pointer)
|
|
|
|
if to_translate == 1:
|
|
etree.SubElement(entry_node,"SpeakerId").text = str(speaker_id)
|
|
|
|
etree.SubElement(entry_node,"ToTranslate").text = str(to_translate)
|
|
etree.SubElement(entry_node,"Status").text = statusText
|
|
self.id += 1
|
|
|
|
|
|
|
|
# Status for Unknown_Pointer, UnknownText1 and UnknownText2 should always be Done
|
|
if (text == '') or (entry_type == "Struct" and self.id in [1,4,5]) :
|
|
statusText = 'Done'
|
|
else:
|
|
statusText = 'To Do'
|
|
|
|
etree.SubElement(entry_node,"Status").text = statusText
|
|
|
|
self.id += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def bytes_to_text(self, fileRead, offset=-1, end_strings = b"\x00"):
|
|
|
|
final_text = ''
|
|
TAGS = self.jsonTblTags['TAGS']
|
|
|
|
if (offset > 0):
|
|
fileRead.seek(offset, 0)
|
|
|
|
pos = fileRead.tell()
|
|
b = fileRead.read(1)
|
|
|
|
|
|
while b != end_strings:
|
|
#print(hex(fileRead.tell()))
|
|
|
|
b = ord(b)
|
|
|
|
#Normal character
|
|
if (b >= 0x80 and b <= 0x9F) or (b >= 0xE0 and b <= 0xE9):
|
|
c = (b << 8) + ord(fileRead.read(1))
|
|
|
|
# if str(c) not in json_data.keys():
|
|
# json_data[str(c)] = char_index[decode(c)]
|
|
try:
|
|
final_text += (self.jsonTblTags['TBL'][str(c)])
|
|
except KeyError:
|
|
b_u = (c >> 8) & 0xff
|
|
b_l = c & 0xff
|
|
final_text += ("{%02X}" % b_u)
|
|
final_text += ("{%02X}" % b_l)
|
|
|
|
#Line break
|
|
elif b == 0x0A:
|
|
final_text += ("\n")
|
|
|
|
#Find a possible Color or Pointer
|
|
elif b == 0x1:
|
|
|
|
next_byte = fileRead.read(1)
|
|
#print("0x1 next byte: {}".format(next_byte))
|
|
ord_byte = ord(next_byte)
|
|
if ord_byte >= 0x1 and ord_byte <= 0x9:
|
|
|
|
tag_name = TAGS.get(b)
|
|
|
|
tags2 = self.jsonTblTags['COLOR']
|
|
tag_param = tags2.get(ord_byte, None)
|
|
|
|
if tag_param != None:
|
|
final_text += tag_param
|
|
else:
|
|
#Pad the tag to be even number of characters
|
|
hex_value = self.hex2(b2)
|
|
final_text += '<{}:{}>'.format(tag_name, hex_value)
|
|
#Color detected
|
|
else:
|
|
fileRead.seek( fileRead.tell()-2)
|
|
b2 = struct.unpack("<L", fileRead.read(4))[0]
|
|
#print(b2)
|
|
val = ("<%02X:%08X>" % (b, b2))
|
|
#print(val)
|
|
final_text += val
|
|
|
|
#Found a name tag
|
|
elif b in [0x4, 0x9]:
|
|
|
|
|
|
val=""
|
|
while fileRead.read(1) != b"\x29":
|
|
fileRead.seek(fileRead.tell()-1)
|
|
val += fileRead.read(1).decode('shift-jis')
|
|
val += ')'
|
|
val = val.replace('(','<').replace(')','>')
|
|
|
|
final_text += val
|
|
|
|
elif chr(b) in self.PRINTABLE_CHARS:
|
|
final_text += chr(b)
|
|
|
|
elif b >= 0xA1 and b < 0xE0:
|
|
final_text += struct.pack("B", b).decode("cp932")
|
|
|
|
b = fileRead.read(1)
|
|
|
|
return final_text, pos
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_All_Skit(self):
|
|
|
|
print("Extracting Skits")
|
|
path = os.path.join( self.all_extract, 'chat/')
|
|
skitsPath ='../Data/Archives/Skits/'
|
|
self.mkdir(skitsPath)
|
|
|
|
for f in os.listdir(path):
|
|
if os.path.isfile(path + f):
|
|
|
|
#Unpack the CAB into PAK3 file
|
|
fileName = skitsPath + f.replace(".cab", ".pak3")
|
|
subprocess.run(['expand', path + f, fileName])
|
|
|
|
#Decompress using PAKCOMPOSER + Comptoe
|
|
self.pakComposerAndComptoe(fileName, "-d", "-3")
|
|
|
|
def extract_All_Events(self):
|
|
|
|
print("Extract Events")
|
|
path = os.path.join( self.allPathExtract, 'map/')
|
|
eventsPath = '..Data/Archives/Events/'
|
|
self.mkdir(eventsPath)
|
|
|
|
for f in os.listdir(path):
|
|
if os.path.isfile( path + f):
|
|
|
|
#Unpack the CAB into PAK3 file
|
|
fileName = eventsPath + f.replace(".cab", ".pak3")
|
|
subprocess.run(['expand', path + f, fileName])
|
|
|
|
#Decompress using PAKCOMPOSER + Comptoe
|
|
self.pakComposerAndComptoe(fileName, "-d", "-3")
|
|
|
|
# Extract the file all.dat to the different directorties
|
|
def extract_Main_Archive(self):
|
|
|
|
|
|
order = {}
|
|
order['order'] = []
|
|
order_json = open( os.path.join( self.misc, 'order.json'), 'w')
|
|
|
|
#Extract decrypted eboot
|
|
self.extract_Decripted_Eboot()
|
|
|
|
print("Extract All.dat")
|
|
#Open the eboot
|
|
eboot = open( os.path.join( self.misc, 'EBOOT_DEC.BIN'), 'rb')
|
|
eboot.seek(0x1FF624)
|
|
|
|
while True:
|
|
file_info = struct.unpack('<3I', eboot.read(12))
|
|
if(file_info[2] == 0):
|
|
break
|
|
hash_ = '%08X' % file_info[2]
|
|
self.extract_files(file_info[0], file_info[1], hash_)
|
|
order['order'].append(hash_)
|
|
json.dump(order, order_json, indent = 4)
|
|
order_json.close()
|
|
|
|
def extract_Decripted_Eboot(self):
|
|
print("Extracting Eboot")
|
|
args = ["deceboot", "../Data/{}Disc/Original/PSP_GAME/SYSDIR/EBOOT.BIN".format(self.repo_name), "../Data/{}/Misc/EBOOT_DEC.BIN".format(self.repo_name)]
|
|
listFile = subprocess.run(
|
|
args,
|
|
cwd= os.getcwd(),
|
|
)
|
|
|
|
def pakcomposer(action, file_name, working_directory):
|
|
|
|
args = [ "pakcomposer", action, os.path.basename(file_name), "-3", "-x", "-u", "-v"]
|
|
listFile = subprocess.run(
|
|
args,
|
|
cwd=working_directory
|
|
) |