Skeleton impurities (roots,effectors) now included by default, zaa_reader and msh_reader combined in chunked_file_reader, skin and skeleton parentage issues worked out. TODO: Fill material properties on import, decide what to do with SkeletonProperties.

This commit is contained in:
William Herald Snyder 2022-01-07 13:45:50 -05:00
parent c320310084
commit 5eea77adf3
8 changed files with 436 additions and 388 deletions

View File

@ -1,23 +1,22 @@
"""
Basically the same as msh reader but with a couple additional
methods for making TADA easier to navigate and treats the whole
file as an initial dummy chunk to avoid the oddities of SMNA and
to handle both zaa and zaabin.
Reader class for both zaabin, zaa, and msh files.
"""
import io
import struct
import os
class ZAAReader:
def __init__(self, file, parent=None, indent=0):
from mathutils import Vector, Quaternion
class Reader:
def __init__(self, file, parent=None, indent=0, debug=False):
self.file = file
self.size: int = 0
self.size_pos = None
self.parent = parent
self.indent = " " * indent #for print debugging
self.indent = " " * indent #for print debugging, should be stored as str so msh_scene_read can access it
self.debug = debug
def __enter__(self):
@ -26,7 +25,7 @@ class ZAAReader:
if self.parent is not None:
self.header = self.read_bytes(4).decode("utf-8")
else:
self.header = "HEAD"
self.header = "FILE"
if self.parent is not None:
self.size = self.read_u32()
@ -36,20 +35,22 @@ class ZAAReader:
padding_length = 4 - (self.size % 4) if self.size % 4 > 0 else 0
self.end_pos = self.size_pos + padding_length + self.size + 8
if self.parent is not None:
print(self.indent + "Begin " + self.header + ", Size: " + str(self.size) + ", Pos: " + str(self.size_pos))
else:
print(self.indent + "Begin head, Size: " + str(self.size) + ", Pos: " + str(self.size_pos))
if self.debug:
if self.parent is not None:
print(self.indent + "Begin " + self.header + ", Size: " + str(self.size) + ", At pos: " + str(self.size_pos))
else:
print(self.indent + "Begin file, Size: " + str(self.size) + ", At pos: " + str(self.size_pos))
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.size > self.MAX_SIZE:
raise OverflowError(f".msh file overflowed max size. size = {self.size} MAX_SIZE = {self.MAX_SIZE}")
raise OverflowError(f"File overflowed max size. size = {self.size} MAX_SIZE = {self.MAX_SIZE}")
if self.debug:
print(self.indent + "End " + self.header)
print(self.indent + "End " + self.header)
self.file.seek(self.end_pos)
@ -103,9 +104,16 @@ class ZAAReader:
return result[0] if num == 1 else result
def read_quat(self):
rot = self.read_f32(4)
return Quaternion((rot[3], rot[0], rot[1], rot[2]))
def read_vec(self):
return Vector(self.read_f32(3))
def read_child(self):
child = ZAAReader(self.file, parent=self, indent=int(len(self.indent) / 2) + 1)
child = Reader(self.file, parent=self, indent=int(len(self.indent) / 2) + 1, debug=self.debug)
return child

View File

@ -240,6 +240,8 @@ def get_is_model_hidden(obj: bpy.types.Object) -> bool:
name = obj.name.lower()
if name.startswith("c_"):
return True
if name.startswith("sv_"):
return True
if name.startswith("p_"):

View File

@ -1,122 +0,0 @@
import io
import struct
from mathutils import Vector, Quaternion
class Reader:
def __init__(self, file, parent=None, indent=0, debug=False):
self.file = file
self.size: int = 0
self.size_pos = None
self.parent = parent
self.indent = " " * indent #for print debugging
self.debug = debug
def __enter__(self):
self.size_pos = self.file.tell()
self.header = self.read_bytes(4).decode("utf-8")
self.size = self.read_u32()
self.end_pos = self.size_pos + self.size + 8
if self.debug:
print(self.indent + "Begin " + self.header + ", Size: " + str(self.size) + ", Pos: " + str(self.size_pos))
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.size > self.MAX_SIZE:
raise OverflowError(f".msh file overflowed max size. size = {self.size} MAX_SIZE = {self.MAX_SIZE}")
if self.debug:
print(self.indent + "End " + self.header)
self.file.seek(self.end_pos)
def read_bytes(self,num_bytes):
return self.file.read(num_bytes)
def read_string(self):
last_byte = self.read_bytes(1)
result = b''
while last_byte[0] != 0x0:
result += last_byte
last_byte = self.read_bytes(1)
return result.decode("utf-8")
def read_i8(self, num=1):
buf = self.read_bytes(num)
result = struct.unpack(f"<{num}b", buf)
return result[0] if num == 1 else result
def read_u8(self, num=1):
buf = self.read_bytes(num)
result = struct.unpack(f"<{num}B", buf)
return result[0] if num == 1 else result
def read_i16(self, num=1):
buf = self.read_bytes(num * 2)
result = struct.unpack(f"<{num}h", buf)
return result[0] if num == 1 else result
def read_u16(self, num=1):
buf = self.read_bytes(num * 2)
result = struct.unpack(f"<{num}H", buf)
return result[0] if num == 1 else result
def read_i32(self, num=1):
buf = self.read_bytes(num * 4)
result = struct.unpack(f"<{num}i", buf)
return result[0] if num == 1 else result
def read_u32(self, num=1):
buf = self.read_bytes(num * 4)
result = struct.unpack(f"<{num}I", buf)
return result[0] if num == 1 else result
def read_f32(self, num=1):
buf = self.read_bytes(num * 4)
result = struct.unpack(f"<{num}f", buf)
return result[0] if num == 1 else result
def read_quat(self):
rot = self.read_f32(4)
return Quaternion((rot[3], rot[0], rot[1], rot[2]))
def read_vec(self):
return Vector(self.read_f32(3))
def read_child(self):
child = Reader(self.file, parent=self, indent=int(len(self.indent) / 2) + 1, debug=self.debug)
return child
def skip_bytes(self,num):
self.file.seek(num,1)
def peak_next_header(self):
buf = self.read_bytes(4);
self.file.seek(-4,1)
try:
result = buf.decode("utf-8")
except:
result = ""
return result
def could_have_child(self):
return self.end_pos - self.file.tell() >= 8
MAX_SIZE: int = 2147483647 - 8

View File

@ -5,17 +5,34 @@ from typing import Dict
from .msh_scene import Scene
from .msh_model import *
from .msh_material import *
from .msh_reader import Reader
from .msh_utilities import *
from .crc import *
from .chunked_file_reader import Reader
# Current model position
model_counter = 0
mndx_remap = {}
# Used to remap MNDX to the MODL's actual position
mndx_remap : Dict[int, int] = {}
# How much to print
debug_level = 0
def read_scene(input_file, anim_only=False) -> Scene:
'''
Debug levels just indicate how much info should be printed.
0 = nothing
1 = just blurbs about valuable info in the chunks
2 = #1 + full chunk structure
'''
def read_scene(input_file, anim_only=False, debug=0) -> Scene:
global debug_level
debug_level = debug
scene = Scene()
scene.models = []
@ -27,54 +44,58 @@ def read_scene(input_file, anim_only=False) -> Scene:
global model_counter
model_counter = 0
with Reader(file=input_file, debug=True) as hedr:
with Reader(file=input_file, debug=debug_level>0) as head:
while hedr.could_have_child():
head.skip_until("HEDR")
next_header = hedr.peak_next_header()
with head.read_child() as hedr:
if next_header == "MSH2":
while hedr.could_have_child():
with hedr.read_child() as msh2:
next_header = hedr.peak_next_header()
if not anim_only:
materials_list = []
if next_header == "MSH2":
while (msh2.could_have_child()):
with hedr.read_child() as msh2:
next_header = msh2.peak_next_header()
if not anim_only:
materials_list = []
if next_header == "SINF":
with msh2.read_child() as sinf:
pass
while (msh2.could_have_child()):
elif next_header == "MATL":
with msh2.read_child() as matl:
materials_list += _read_matl_and_get_materials_list(matl)
for i,mat in enumerate(materials_list):
scene.materials[mat.name] = mat
next_header = msh2.peak_next_header()
elif next_header == "MODL":
with msh2.read_child() as modl:
scene.models.append(_read_modl(modl, materials_list))
if next_header == "SINF":
with msh2.read_child() as sinf:
pass
else:
msh2.skip_bytes(1)
elif next_header == "MATL":
with msh2.read_child() as matl:
materials_list += _read_matl_and_get_materials_list(matl)
for i,mat in enumerate(materials_list):
scene.materials[mat.name] = mat
elif next_header == "SKL2":
with hedr.read_child() as skl2:
num_bones = skl2.read_u32()
scene.skeleton = [skl2.read_u32(5)[0] for i in range(num_bones)]
elif next_header == "MODL":
with msh2.read_child() as modl:
scene.models.append(_read_modl(modl, materials_list))
elif next_header == "ANM2":
with hedr.read_child() as anm2:
scene.animation = _read_anm2(anm2)
else:
msh2.skip_bytes(1)
else:
hedr.skip_bytes(1)
elif next_header == "SKL2":
with hedr.read_child() as skl2:
num_bones = skl2.read_u32()
scene.skeleton = [skl2.read_u32(5)[0] for i in range(num_bones)]
elif next_header == "ANM2":
with hedr.read_child() as anm2:
scene.animation = _read_anm2(anm2)
if scene.skeleton:
else:
hedr.skip_bytes(1)
# Print models in skeleton
if scene.skeleton and debug_level > 0:
print("Skeleton models: ")
for model in scene.models:
for i in range(len(scene.skeleton)):
@ -84,7 +105,11 @@ def read_scene(input_file, anim_only=False) -> Scene:
scene.skeleton.pop(i)
break
'''
Iterate through every vertex weight in the scene and
change its index to directly reference its bone's index.
It will reference the MNDX of its bone's MODL by default.
'''
for model in scene.models:
if model.geometry:
for seg in model.geometry:
@ -173,7 +198,7 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
index = mndx.read_u32()
global model_counter
print(mndx.indent + "MNDX doesn't match counter, expected: {} found: {}".format(model_counter, index))
#print(mndx.indent + "MNDX doesn't match counter, expected: {} found: {}".format(model_counter, index))
global mndx_remap
mndx_remap[index] = model_counter
@ -203,6 +228,7 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
with modl.read_child() as geom:
while geom.could_have_child():
#print("Searching for next seg or envl child..")
next_header_geom = geom.peak_next_header()
if next_header_geom == "SEGM":
@ -239,7 +265,9 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
else:
modl.skip_bytes(1)
print(modl.indent + "Read model " + model.name + " of type: " + str(model.model_type)[10:])
global debug_level
if debug_level > 0:
print(modl.indent + "Read model " + model.name + " of type: " + str(model.model_type)[10:])
return model
@ -253,7 +281,9 @@ def _read_tran(tran: Reader) -> ModelTransform:
xform.rotation = tran.read_quat()
xform.translation = tran.read_vec()
print(tran.indent + "Rot: {} Loc: {}".format(str(xform.rotation), str(xform.translation)))
global debug_level
if debug_level > 0:
print(tran.indent + "Rot: {} Loc: {}".format(str(xform.rotation), str(xform.translation)))
return xform
@ -301,12 +331,16 @@ def _read_segm(segm: Reader, materials_list: List[Material]) -> GeometrySegment:
geometry_seg.texcoords.append(Vector(uv0l.read_f32(2)))
elif next_header == "NDXL":
with segm.read_child() as ndxl:
pass
'''
num_polygons = ndxl.read_u32()
for _ in range(num_polygons):
polygon = ndxl.read_u16(ndxl.read_u16())
geometry_seg.polygons.append(polygon)
'''
elif next_header == "NDXT":
with segm.read_child() as ndxt:
@ -374,6 +408,7 @@ def _read_segm(segm: Reader, materials_list: List[Material]) -> GeometrySegment:
geometry_seg.weights.append(weight_set)
else:
#print("Skipping...")
segm.skip_bytes(1)
return geometry_seg
@ -390,7 +425,8 @@ def _read_anm2(anm2: Reader) -> Animation:
if next_header == "CYCL":
with anm2.read_child() as cycl:
pass
# Dont even know what CYCL's data does. Tried playing
# with the values but didn't change anything in zenasset or ingame...
'''
num_anims = cycl.read_u32()
@ -399,15 +435,19 @@ def _read_anm2(anm2: Reader) -> Animation:
cycl.skip_bytes(64)
print("CYCL play style {}".format(cycl.read_u32(4)[1]))
'''
pass
elif next_header == "KFR3":
with anm2.read_child() as kfr3:
num_bones = kfr3.read_u32()
bone_crcs = []
for _ in range(num_bones):
bone_crc = kfr3.read_u32()
bone_crcs.append(bone_crc)
frames = ([],[])
@ -423,6 +463,18 @@ def _read_anm2(anm2: Reader) -> Animation:
frames[1].append(RotationFrame(kfr3.read_u32(), kfr3.read_quat()))
anim.bone_frames[bone_crc] = frames
for bone_crc in sorted(bone_crcs):
global debug_level
if debug_level > 0:
print("\t{}: ".format(hex(bone_crc)))
bone_frames = anim.bone_frames[bone_crc]
loc_frames = bone_frames[0]
rot_frames = bone_frames[1]
else:
anm2.skip_bytes(1)

View File

@ -27,9 +27,6 @@ def save_scene(output_file, scene: Scene):
material_index = _write_matl_and_get_material_index(matl, scene)
for index, model in enumerate(scene.models):
#print("Name: {:.10}, Pos: {:15}, Rot: {:15}, Parent: {}".format(model.name, vec_to_str(model.transform.translation), quat_to_str(model.transform.rotation), model.parent))
with msh2.create_child("MODL") as modl:
_write_modl(modl, model, index, material_index, model_index)

View File

@ -9,9 +9,9 @@ from .msh_model import *
class SkeletonProperties(PropertyGroup):
name: StringProperty(name="Name", default="Bone Name")
parent: StringProperty(name="Parent", default="Bone Parent")
loc: FloatVectorProperty(name="Local Position", default=(0.0, 0.0, 0.0), subtype="XYZ", size=3)
rot: FloatVectorProperty(name="Local Rotation", default=(0.0, 0.0, 0.0, 0.0), subtype="QUATERNION", size=4)
#parent: StringProperty(name="Parent", default="Bone Parent")
#loc: FloatVectorProperty(name="Local Position", default=(0.0, 0.0, 0.0), subtype="XYZ", size=3)
#rot: FloatVectorProperty(name="Local Rotation", default=(0.0, 0.0, 0.0, 0.0), subtype="QUATERNION", size=4)
@ -40,12 +40,10 @@ class SkeletonPropertiesPanel(bpy.types.Panel):
skel_props = context.object.data.swbf_msh_skel
layout.label(text = "Bones In MSH Skeleton: ")
for prop in skel_props:
layout.prop(prop, "name")
layout.prop(prop, "parent")
layout.prop(prop, "loc")
layout.prop(prop, "rot")
'''
layout.prop(skel_props, "name")

View File

@ -17,9 +17,8 @@ from .crc import *
import os
def extract_and_apply_anim(filename, scene):
# Extracts and applies anims in the scene to the currently selected armature
def extract_and_apply_anim(filename : str, scene : Scene):
arma = bpy.context.view_layer.objects.active
@ -39,29 +38,26 @@ def extract_and_apply_anim(filename, scene):
arma.animation_data_create()
# Record the starting transforms of each bone. Pose space is relative
# to bones starting transforms. Starting = in edit mode
bone_bind_poses = {}
for bone in arma.data.bones:
bone_obj = bpy.data.objects[bone.name]
bone_obj_parent = bone_obj.parent
bpy.context.view_layer.objects.active = arma
bpy.ops.object.mode_set(mode='EDIT')
bind_mat = bone_obj.matrix_local
stack_mat = Matrix.Identity(4)
for edit_bone in arma.data.edit_bones:
if edit_bone.parent:
bone_local = edit_bone.parent.matrix.inverted() @ edit_bone.matrix
else:
bone_local = arma.matrix_local @ edit_bone.matrix
bone_bind_poses[edit_bone.name] = bone_local.inverted()
while(True):
if bone_obj_parent is None or bone_obj_parent.name in arma.data.bones:
break
bind_mat = bone_obj_parent.matrix_local @ bind_mat
stack_mat = bone_obj_parent.matrix_local @ stack_mat
bone_obj_parent = bone_obj_parent.parent
bone_bind_poses[bone.name] = bind_mat.inverted() @ stack_mat
bpy.ops.object.mode_set(mode='OBJECT')
for bone in arma.pose.bones:
if to_crc(bone.name) in scene.animation.bone_frames:
#print("Inserting anim data for bone: {}".format(bone.name))
bind_mat = bone_bind_poses[bone.name]
@ -85,7 +81,6 @@ def extract_and_apply_anim(filename, scene):
fcurve_rot_y.keyframe_points.insert(i,q.y)
fcurve_rot_z.keyframe_points.insert(i,q.z)
fcurve_loc_x = action.fcurves.new(loc_data_path, index=0, action_group=bone.name)
fcurve_loc_y = action.fcurves.new(loc_data_path, index=1, action_group=bone.name)
fcurve_loc_z = action.fcurves.new(loc_data_path, index=2, action_group=bone.name)
@ -103,58 +98,62 @@ def extract_and_apply_anim(filename, scene):
def parent_object_to_bone(obj, armature, bone_name):
worldmat = obj.matrix_world
obj.parent = None
obj.parent = armature
obj.parent_type = 'BONE'
obj.parent_bone = bone_name
obj.matrix_basis = Matrix()
obj.matrix_parent_inverse = Matrix()
obj.matrix_world = worldmat
'''
Creates armature from the required nodes.
Assumes the required_skeleton is already sorted by parent.
def refined_skeleton_to_armature(refined_skeleton : List[Model], model_map):
Uses model_map to get the world matrix of each bone (hacky, see NOTE)
'''
def required_skeleton_to_armature(required_skeleton : List[Model], model_map : Dict[str, bpy.types.Object], msh_scene : Scene) -> bpy.types.Object:
armature = bpy.data.armatures.new("skeleton")
armature_obj = bpy.data.objects.new("skeleton", armature)
bpy.context.view_layer.active_layer_collection.collection.objects.link(armature_obj)
armature_obj.select_set(True)
preserved = armature_obj.data.swbf_msh_skel
for model in refined_skeleton:
loc,rot,_ = model_map[model.name].matrix_world.decompose()
print(str(loc))
entry = preserved.add()
entry.name = model.name
entry.loc = loc
entry.rot = rot
entry.parent = model.parent
for model in required_skeleton:
if to_crc(model.name) in msh_scene.skeleton:
entry = preserved.add()
entry.name = model.name
#loc,rot,_ = model_map[model.name].matrix_world.decompose()
#entry.loc = loc
#entry.rot = rot
#entry.parent = model.parent
bones_set = set([model.name for model in required_skeleton])
armature_obj.select_set(True)
bpy.context.view_layer.objects.active = armature_obj
bpy.ops.object.mode_set(mode='EDIT')
for bone in refined_skeleton:
for bone in required_skeleton:
edit_bone = armature.edit_bones.new(bone.name)
if bone.parent:
if bone.parent and bone.parent in bones_set:
edit_bone.parent = armature.edit_bones[bone.parent]
'''
NOTE: I recall there being some rare issue with the get_world_matrix utility func.
Never bothered to figure it out and referencing the bone object's world mat always works.
Bone objects will be deleted later.
'''
bone_obj = model_map[bone.name]
edit_bone.matrix = bone_obj.matrix_world
edit_bone.tail = bone_obj.matrix_world @ Vector((0.0,1.0,0.0))
bone_children = [b for b in get_model_children(bone, required_skeleton)]
bone_children = [b for b in get_model_children(bone, refined_skeleton)]
'''
Perhaps we'll add an option for importing bones tip-to-tail, but that would
require preserving their original transforms as changing the tail position
changes the bones' transform...
'''
tail_pos = Vector()
if bone_children:
for bone_child in bone_children:
@ -165,8 +164,6 @@ def refined_skeleton_to_armature(refined_skeleton : List[Model], model_map):
bone_length = .5# edit_bone.parent.length if edit_bone.parent is not None else .5
edit_bone.tail = bone_obj.matrix_world @ Vector((0.0,bone_length,0.0))
bpy.ops.object.mode_set(mode='OBJECT')
armature_obj.select_set(True)
bpy.context.view_layer.update()
@ -176,14 +173,26 @@ def refined_skeleton_to_armature(refined_skeleton : List[Model], model_map):
'''
Ok, so this method is crucial. What this does is:
1) Find all nodes that are weighted to by skinned segments.
2) A node must be included in the armature if:
- It is in SKL2 and is not the scene root
- It is weighted to
- It has a parent and child that must be in the armature
'''
def extract_required_skeleton(scene: Scene) -> List[Model]:
# Will map Model names to Models in scene, for convenience
model_dict : Dict[str, Model] = {}
def extract_refined_skeleton(scene: Scene):
model_dict = {}
skeleton_models = []
# Will contain hashes of all models that definitely need to be in the skeleton/armature.
# We initialize it with the contents of SKL2 i.e. the nodes that are animated.
# For now this includes the scene root, but that'll be excluded later.
skeleton_hashes = set(scene.skeleton)
# We also need to add all nodes that are weighted to. These are not necessarily in
# SKL2, as SKL2 seems to only reference nodes that are keyframed.
for model in scene.models:
model_dict[model.name] = model
@ -194,57 +203,73 @@ def extract_refined_skeleton(scene: Scene):
for weight in weight_set:
model_weighted_to = scene.models[weight.bone]
if to_crc(model_weighted_to.name) not in scene.skeleton:
scene.skeleton.append(to_crc(model_weighted_to.name))
if to_crc(model_weighted_to.name) not in skeleton_hashes:
skeleton_hashes.add(to_crc(model_weighted_to.name))
for model in scene.models:
if to_crc(model.name) in scene.skeleton:
skeleton_models.append(model)
# The result of this function (to be sorted by parent)
required_skeleton_models = []
# Set of nodes to be included in required skeleton/were visited
visited_nodes = set()
refined_skeleton_models = []
'''
Here we add all skeleton nodes (except root) and any necessary ancestors to the armature.
- e.g. in bone_x/eff_x/eff_y, the effectors do not have to be in armature, as they are not ancestors of a bone
- but in bone_x/eff_x/eff_y/bone_y, they do.
'''
for bone in sort_by_parent(scene.models):
for bone in skeleton_models:
# make sure we exclude the scene root and any nodes irrelevant to the armature
if not bone.parent or to_crc(bone.name) not in skeleton_hashes:
continue
if bone.parent:
potential_bones = [bone]
visited_nodes.add(bone.name)
curr_ancestor = model_dict[bone.parent]
stacked_transform = model_transform_to_matrix(bone.transform)
# Stacked transform will be needed if we decide to include an option for excluding effectors/roots
#stacked_transform = model_transform_to_matrix(bone.transform)
while True:
curr_ancestor = model_dict[bone.parent]
if to_crc(curr_ancestor.name) in scene.skeleton or curr_ancestor.name == scene.models[0].name:
new_model = Model()
new_model.name = bone.name
new_model.parent = curr_ancestor.name if curr_ancestor.name != scene.models[0].name else ""
while True:
loc, rot, _ = stacked_transform.decompose()
# If we hit a non-skin scene root, that means we just add the bone we started with, no ancestors.
if not curr_ancestor.parent and curr_ancestor.model_type != ModelType.SKIN:
required_skeleton_models.append(bone)
visited_nodes.add(bone.name)
break
new_model.transform.rotation = rot
new_model.transform.translation = loc
# If we encounter another bone, a skin, or a previously visited object, we need to add the bone and its
# ancestors.
elif to_crc(curr_ancestor.name) in scene.skeleton or curr_ancestor.model_type == ModelType.SKIN or curr_ancestor.name in visited_nodes:
for potential_bone in potential_bones:
required_skeleton_models.append(potential_bone)
visited_nodes.add(potential_bone.name)
break
refined_skeleton_models.append(new_model)
break
# Add ancestor to potential bones, update next ancestor
else:
if curr_ancestor.name not in visited_nodes:
potential_bones.insert(0, curr_ancestor)
curr_ancestor = model_dict[curr_ancestor.parent]
else:
curr_ancestor = model_dict[curr_ancestor.parent]
stacked_transform = model_transform_to_matrix(curr_ancestor.transform) @ stacked_transform
#stacked_transform = model_transform_to_matrix(curr_ancestor.transform) @ stacked_transform
return sort_by_parent(refined_skeleton_models)
return required_skeleton_models
# Create the msh hierachy. Armatures are not created here.
def extract_models(scene: Scene, materials_map : Dict[str, bpy.types.Material]) -> Dict[str, bpy.types.Object]:
# This will be filled with model names -> Blender objects and returned
model_map : Dict[str, bpy.types.Object] = {}
sorted_models : List[Model] = sort_by_parent(scene.models)
def extract_models(scene: Scene, materials_map):
model_map = {}
for model in sort_by_parent(scene.models):
for model in sorted_models:
new_obj = None
if model.model_type == ModelType.STATIC or model.model_type == ModelType.SKIN:
@ -358,19 +383,19 @@ def extract_models(scene: Scene, materials_map):
return model_map
# TODO: Add to custom material info struct, maybe some material conversion/import?
def extract_materials(folder_path: str, scene: Scene) -> Dict[str, bpy.types.Material]:
def extract_materials(folder_path: str, scene: Scene) -> Dict[str,bpy.types.Material]:
extracted_materials : Dict[str, bpy.types.Material] = {}
extracted_materials = {}
for material_name in scene.materials.keys():
for material_name, material in scene.materials.items():
new_mat = bpy.data.materials.new(name=material_name)
new_mat.use_nodes = True
bsdf = new_mat.node_tree.nodes["Principled BSDF"]
tex_path_def = os.path.join(folder_path, scene.materials[material_name].texture0)
tex_path_alt = os.path.join(folder_path, "PC", scene.materials[material_name].texture0)
tex_path_def = os.path.join(folder_path, material.texture0)
tex_path_alt = os.path.join(folder_path, "PC", material.texture0)
tex_path = tex_path_def if os.path.exists(tex_path_def) else tex_path_alt
@ -379,6 +404,20 @@ def extract_materials(folder_path: str, scene: Scene) -> Dict[str,bpy.types.Mate
texImage.image = bpy.data.images.load(tex_path)
new_mat.node_tree.links.new(bsdf.inputs['Base Color'], texImage.outputs['Color'])
# Fill MaterialProperties datablock
'''
material_properties = new_mat.swbf_msh
material_properties.specular_color = material.specular_color.copy()
material_properties.diffuse_map = material.texture0
result.rendertype = _read_material_props_rendertype(props)
result.flags = _read_material_props_flags(props)
result.data = _read_material_props_data(props)
result.texture1 = _read_normal_map_or_distortion_map_texture(props)
result.texture2 = _read_detail_texture(props)
result.texture3 = _read_envmap_texture(props)
'''
extracted_materials[material_name] = new_mat
return extracted_materials
@ -388,82 +427,112 @@ def extract_materials(folder_path: str, scene: Scene) -> Dict[str,bpy.types.Mate
def extract_scene(filepath: str, scene: Scene):
folder = os.path.join(os.path.dirname(filepath),"")
matmap = extract_materials(folder, scene)
model_map = extract_models(scene, matmap)
# material_map mapes Material names to Blender materials
material_map = extract_materials(folder, scene)
skel = extract_refined_skeleton(scene)
armature = refined_skeleton_to_armature(skel, model_map)
# model_map maps Model names to Blender objects.
model_map = extract_models(scene, material_map)
# skel contains all models needed in an armature
skel = extract_required_skeleton(scene)
# Create the armature if skel is non-empty
armature = None if not skel else required_skeleton_to_armature(skel, model_map, scene)
for bone in armature.data.bones:
bone_local = bone.matrix_local
if bone.parent:
bone_local = bone.parent.matrix_local.inverted() @ bone_local
'''
If an armature was created, we need to do a few extra
things to ensure the import makes sense in Blender. It can
get a bit messy, as XSI + SWBF have very loose requirements
when it comes to skin-skeleton parentage.
bone_obj_local = bpy.data.objects[bone.name].matrix_local
obj_loc, obj_rot, _ = bone_obj_local.decompose()
If not, we're good.
'''
if armature is not None:
loc, rot, _ = bone_local.decompose()
has_skin = False
# Handle armature related parenting
for curr_model in scene.models:
curr_obj = model_map[curr_model.name]
# Parent all skins to armature
if curr_model.model_type == ModelType.SKIN:
has_skin = True
curr_obj.select_set(True)
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
bpy.ops.object.parent_clear(type='CLEAR')
bpy.ops.object.parent_set(type='ARMATURE')
curr_obj.select_set(False)
armature.select_set(False)
bpy.context.view_layer.objects.active = None
# Parent the object to a bone if necessary
else:
if curr_model.parent in armature.data.bones and curr_model.name not in armature.data.bones:
# Some of this is redundant, but necessary...
worldmat = curr_obj.matrix_world
# ''
curr_obj.parent = None
curr_obj.parent = armature
curr_obj.parent_type = 'BONE'
curr_obj.parent_bone = curr_model.parent
# ''
curr_obj.matrix_basis = Matrix()
curr_obj.matrix_parent_inverse = Matrix()
curr_obj.matrix_world = worldmat
'''
Sometimes skins are parented to other skins. We need to find the skin highest in the hierarchy and
parent all skins to its parent (armature_reparent_obj).
If not skin exists, we just reparent the armature to the parent of the highest node in the skeleton
'''
armature_reparent_obj = None
if has_skin:
for model in sort_by_parent(scene.models):
if model.model_type == ModelType.SKIN:
armature_reparent_obj = None if not model.parent else model_map[model.parent]
else:
skeleton_parent_name = skel[0].parent
for model in scene.models:
if model.name == skeleton_parent_name:
armature_reparent_obj = None if not skeleton_parent_name else model_map[skeleton_parent_name]
# Now we reparent the armature to the node (armature_reparent_obj) we just found
if armature_reparent_obj is not None and armature.name != armature_reparent_obj.name:
reparent_obj = None
for model in scene.models:
if model.model_type == ModelType.SKIN:
if model.parent:
reparent_obj = model_map[model.parent]
skin_obj = model_map[model.name]
skin_obj.select_set(True)
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
armature_reparent_obj.select_set(True)
bpy.ops.object.parent_clear(type='CLEAR')
bpy.ops.object.parent_set(type='ARMATURE')
bpy.context.view_layer.objects.active = armature_reparent_obj
bpy.ops.object.parent_set(type='OBJECT')
skin_obj.select_set(False)
armature.select_set(False)
armature_reparent_obj.select_set(False)
bpy.context.view_layer.objects.active = None
if armature is not None:
for bone in armature.data.bones:
for model in scene.models:
if model.parent in armature.data.bones and model.model_type != ModelType.NULL:
pass#parent_object_to_bone(model_map[model.name], armature, model.parent)
'''
if reparent_obj is not None and armature.name != reparent_obj.name:
# If an bone exists in the armature, delete its
# object counterpart (as created in extract_models)
for bone in skel:
model_to_remove = model_map[bone.name]
if model_to_remove:
bpy.data.objects.remove(model_to_remove, do_unlink=True)
model_map.pop(bone.name)
armature.select_set(True)
reparent_obj.select_set(True)
bpy.context.view_layer.objects.active = reparent_obj
bpy.ops.object.parent_set(type='OBJECT')
armature.select_set(False)
reparent_obj.select_set(False)
bpy.context.view_layer.objects.active = None
'''
# Lastly, hide all that is hidden in the msh scene
for model in scene.models:
if model.name in bpy.data.objects:
obj = bpy.data.objects[model.name]
if get_is_model_hidden(obj) and len(obj.children) == 0 and model.model_type != ModelType.NULL:
if model.name in model_map:
obj = model_map[model.name]
if get_is_model_hidden(obj) and len(obj.children) == 0:
obj.hide_set(True)

View File

@ -10,7 +10,7 @@ import os
import bpy
import re
from .zaa_reader import *
from .chunked_file_reader import Reader
from .crc import *
from .msh_model import *
@ -20,20 +20,25 @@ from .msh_utilities import *
from typing import List, Set, Dict, Tuple
debug = False
#anims #bones #comps #keyframes: index,value
#anims #bones #components #keyframes: index,value
def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]]:
global debug
decompressed_anims: Dict[int, Dict[int, List[ Dict[int,float]]]] = {}
with ZAAReader(input_file) as head:
with Reader(input_file, debug=debug) as head:
# Dont read SMNA as child, since it has a length field always set to 0...
head.skip_until("SMNA")
head.skip_bytes(20)
num_anims = head.read_u16()
#print("\nFile contains {} animations\n".format(num_anims))
if debug:
print("\nFile contains {} animations\n".format(num_anims))
head.skip_bytes(2)
@ -92,7 +97,8 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
transBitFlags = anim_metadata[anim_crc]["transBitFlags"]
#print("\n\tAnim hash: {} Num frames: {} Num joints: {}".format(hex(anim_crc), num_frames, num_bones))
if debug:
print("\n\tAnim hash: {} Num frames: {} Num joints: {}".format(hex(anim_crc), num_frames, num_bones))
for bone_num, bone_crc in enumerate(anim_metadata[anim_crc]["bone_list"]):
@ -103,8 +109,9 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
offsets_list = params_bone["rot_offsets"] + params_bone["loc_offsets"]
qparams = params_bone["qparams"]
#print("\n\t\tBone #{} hash: {}".format(bone_num,hex(bone_crc)))
#print("\n\t\tQParams: {}, {}, {}, {}".format(*qparams))
if debug:
print("\n\t\tBone #{} hash: {}".format(bone_num,hex(bone_crc)))
print("\n\t\tQParams: {}, {}, {}, {}".format(*qparams))
for o, start_offset in enumerate(offsets_list):
@ -125,17 +132,14 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
# a single multiplier for all three
else:
if (0x00000001 << bone_num) & transBitFlags == 0:
bone_curves.append(None)
continue
mult = qparams[-1]
bias = qparams[o - 4]
#print("\n\t\t\tBias = {}, multiplier = {}".format(bias, mult))
if debug:
print("\n\t\t\tBias = {}, multiplier = {}".format(bias, mult))
#print("\n\t\t\tOffset {}: {} ({}, {} remaining)".format(o,start_offset, tada.get_current_pos(), tada.how_much_left(tada.get_current_pos())))
if debug:
print("\n\t\t\tOffset {}: {} ({}, {} remaining)".format(o,start_offset, tada.get_current_pos(), tada.how_much_left(tada.get_current_pos())))
# Skip to start of compressed data for component, as specified in TNJA
tada.skip_bytes(start_offset)
@ -146,7 +150,9 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
accumulator = bias + mult * tada.read_i16()
curve[j if j < num_frames else num_frames] = accumulator
#print("\t\t\t\t{}: {}".format(j, accumulator))
if debug:
print("\t\t\t\t{}: {}".format(j, accumulator))
j+=1
while (j < num_frames):
@ -155,13 +161,15 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
# Reset the accumulator to next dequantized i16
if control == -0x7f:
#print("\t\t\t\tControl: READING NEXT FRAME")
if debug:
print("\t\t\t\tControl: READING NEXT FRAME")
break
# RLE: hold current accumulator for the next u8 frames
elif control == -0x80:
num_skips = tada.read_u8()
#print("\t\t\t\tControl: HOLDING FOR {} FRAMES".format(num_skips))
if debug:
print("\t\t\t\tControl: HOLDING FOR {} FRAMES".format(num_skips))
j += num_skips
# If not a special value, increment accumulator by the dequantized i8
@ -169,8 +177,8 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
else:
accumulator += mult * float(control)
curve[j if j < num_frames else num_frames] = accumulator
#print("\t\t\t\t{}: {}".format(j, accumulator))
if debug:
print("\t\t\t\t{}: {}".format(j, accumulator))
j+=1
curve[num_frames - 1] = accumulator
@ -217,6 +225,8 @@ for now this will work ONLY if the model was directly imported from a .msh file.
def extract_and_apply_munged_anim(input_file_path):
global debug
with open(input_file_path,"rb") as input_file:
animation_set = decompress_curves(input_file)
@ -244,23 +254,37 @@ def extract_and_apply_munged_anim(input_file_path):
This will be replaced with the eventual importer release.
"""
animated_bones = set()
for anim_crc in animation_set:
for bone_crc in animation_set[anim_crc]:
animated_bones.add(bone_crc)
bpy.context.view_layer.objects.active = arma
bpy.ops.object.mode_set(mode='EDIT')
bone_bind_poses = {}
for bone in arma.data.bones:
bone_obj = bpy.data.objects[bone.name]
bone_obj_parent = bone_obj.parent
for edit_bone in arma.data.edit_bones:
if to_crc(edit_bone.name) not in animated_bones:
continue
bind_mat = bone_obj.matrix_local
curr_ancestor = edit_bone.parent
while curr_ancestor is not None and to_crc(curr_ancestor.name) not in animated_bones:
curr_ancestor = curr_ancestor.parent
while(True):
if bone_obj_parent is None or bone_obj_parent.name in arma.data.bones:
break
bind_mat = bone_obj_parent.matrix_local @ bind_mat
bone_obj_parent = bone_obj_parent.parent
if curr_ancestor:
bind_mat = curr_ancestor.matrix.inverted() @ edit_bone.matrix
else:
bind_mat = arma.matrix_local @ edit_bone.matrix
bone_bind_poses[bone.name] = bind_mat.inverted()
bone_bind_poses[edit_bone.name] = bind_mat.inverted()
bpy.ops.object.mode_set(mode='OBJECT')
if debug:
print("Extracting {} animations from {}:".format(len(animation_set), input_file_path))
for anim_crc in animation_set:
@ -270,16 +294,30 @@ def extract_and_apply_munged_anim(input_file_path):
else:
anim_str = str(hex(anim_crc))
if anim_str in bpy.data.actions:
bpy.data.actions[anim_str].use_fake_user = False
bpy.data.actions.remove(bpy.data.actions[anim_str])
if debug:
print("\tExtracting anim {}:".format(anim_str))
#if anim_str in bpy.data.actions:
# bpy.data.actions[anim_str].use_fake_user = False
# bpy.data.actions.remove(bpy.data.actions[anim_str])
action = bpy.data.actions.new(anim_str)
action.use_fake_user = True
animation = animation_set[anim_crc]
for bone in arma.pose.bones:
bone_crcs_list = [bone_crc_ for bone_crc_ in animation]
for bone_crc in sorted(bone_crcs_list):
bone_name = next((bone.name for bone in arma.pose.bones if to_crc(bone.name) == bone_crc), None)
if bone_name is None:
continue
bone = arma.pose.bones[bone_name]
bone_crc = to_crc(bone.name)
if bone_crc not in animation:
@ -294,7 +332,8 @@ def extract_and_apply_munged_anim(input_file_path):
has_translation = bone_curves[4] is not None
#print("\t\tNum frames: " + str(num_frames))
if debug:
print("\t\tBone {} has {} frames: ".format(bone_name, num_frames))
last_values = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
@ -330,7 +369,6 @@ def extract_and_apply_munged_anim(input_file_path):
return v if has_key else None
fcurve_rot_w = action.fcurves.new(rot_data_path, index=0, action_group=bone.name)
fcurve_rot_x = action.fcurves.new(rot_data_path, index=1, action_group=bone.name)
fcurve_rot_y = action.fcurves.new(rot_data_path, index=2, action_group=bone.name)
@ -346,6 +384,9 @@ def extract_and_apply_munged_anim(input_file_path):
q = get_quat(frame)
if q is not None:
if debug:
print("\t\t\tRot key: ({}, {})".format(frame, quat_to_str(q)))
# Very bloated, but works for now
q = (bind_mat @ convert_rotation_space(q).to_matrix().to_4x4()).to_quaternion()
fcurve_rot_w.keyframe_points.insert(frame,q.w)
@ -358,6 +399,9 @@ def extract_and_apply_munged_anim(input_file_path):
t = get_vec(frame)
if t is not None:
if debug:
print("\t\t\tPos key: ({}, {})".format(frame, vec_to_str(t)))
t = (bind_mat @ Matrix.Translation(convert_vector_space(t))).translation
fcurve_loc_x.keyframe_points.insert(frame,t.x)