Fill material properties upon import + if SKL2 missing, assume all BONE models are in it + SkeletonProperties only relevant for imported meshes + chunked_file_reader no longer assumes 4-byte alignment

This commit is contained in:
William Herald Snyder 2022-01-11 14:02:17 -05:00
parent 5eea77adf3
commit 7244446dd9
6 changed files with 201 additions and 94 deletions

View File

@ -25,21 +25,18 @@ class Reader:
if self.parent is not None:
self.header = self.read_bytes(4).decode("utf-8")
else:
self.header = "FILE"
self.header = "File"
if self.parent is not None:
self.size = self.read_u32()
else:
self.size = os.path.getsize(self.file.name) - 8
padding_length = 4 - (self.size % 4) if self.size % 4 > 0 else 0
self.end_pos = self.size_pos + padding_length + self.size + 8
# No padding to multiples of 4. Files exported from XSI via zetools do not align by 4!
self.end_pos = self.size_pos + self.size + 8
if self.debug:
if self.parent is not None:
print(self.indent + "Begin " + self.header + ", Size: " + str(self.size) + ", At pos: " + str(self.size_pos))
else:
print(self.indent + "Begin file, Size: " + str(self.size) + ", At pos: " + str(self.size_pos))
print("{}Begin {} of Size {} at pos {}:".format(self.indent, self.header, self.size, self.size_pos))
return self
@ -49,7 +46,7 @@ class Reader:
raise OverflowError(f"File overflowed max size. size = {self.size} MAX_SIZE = {self.MAX_SIZE}")
if self.debug:
print(self.indent + "End " + self.header)
print("{}End {} at pos: {}".format(self.indent, self.header, self.end_pos))
self.file.seek(self.end_pos)

View File

@ -0,0 +1,133 @@
""" For finding textures and assigning material properties from entries in a Material """
import bpy
from typing import Dict
from .msh_material import *
from .msh_material_gather import *
from .msh_material_properties import *
import os
def find_texture_path(folder_path : str, name : str) -> str:
if not folder_path or not name:
return ""
possible_paths = [
os.path.join(folder_path, name),
os.path.join(folder_path, "PC", name),
os.path.join(folder_path, "pc", name),
os.path.join(folder_path, ".." , name),
]
for possible_path in possible_paths:
if os.path.exists(possible_path):
return possible_path
return ""
def fill_material_props(material : Material, material_properties):
""" Fills MaterialProperties from Material instance """
if material_properties is None or material is None:
return
material_properties.specular_color = (material.specular_color[0], material.specular_color[1], material.specular_color[2])
material_properties.diffuse_map = material.texture0
_fill_material_props_rendertype(material, material_properties)
_fill_material_props_flags(material, material_properties)
_fill_material_props_data(material, material_properties)
_fill_normal_map_or_distortion_map_texture(material, material_properties)
_fill_detail_texture(material, material_properties)
_fill_envmap_texture(material, material_properties)
def _fill_material_props_rendertype(material, material_properties):
_REVERSE_RENDERTYPES_MAPPING = {
Rendertype.NORMAL : "NORMAL_BF2",
Rendertype.SCROLLING : "SCROLLING_BF2",
Rendertype.ENVMAPPED : "ENVMAPPED_BF2",
Rendertype.ANIMATED : "ANIMATED_BF2",
Rendertype.REFRACTION : "REFRACTION_BF2",
Rendertype.BLINK : "BLINK_BF2",
Rendertype.NORMALMAPPED_TILED : "NORMALMAPPED_TILED_BF2",
Rendertype.NORMALMAPPED_ENVMAPPED : "NORMALMAPPED_ENVMAPPED_BF2",
Rendertype.NORMALMAPPED : "NORMALMAPPED_BF2",
Rendertype.NORMALMAPPED_TILED_ENVMAP : "NORMALMAPPED_TILED_ENVMAPPED_BF2"}
material_properties.rendertype = _REVERSE_RENDERTYPES_MAPPING[material.rendertype]
def _fill_material_props_flags(material, material_properties):
if material.rendertype == Rendertype.REFRACTION:
material_properties.blended_transparency = True
return
flags = material.flags
material_properties.blended_transparency = bool(flags & MaterialFlags.BLENDED_TRANSPARENCY)
material_properties.additive_transparency = bool(flags & MaterialFlags.ADDITIVE_TRANSPARENCY)
material_properties.hardedged_transparency = bool(flags & MaterialFlags.HARDEDGED_TRANSPARENCY)
material_properties.unlit = bool(flags & MaterialFlags.UNLIT)
material_properties.glow = bool(flags & MaterialFlags.GLOW)
material_properties.perpixel = bool(flags & MaterialFlags.PERPIXEL)
material_properties.specular = bool(flags & MaterialFlags.SPECULAR)
material_properties.doublesided = bool(flags & MaterialFlags.DOUBLESIDED)
def _fill_material_props_data(material, material_properties):
if material.rendertype == Rendertype.SCROLLING:
material_properties.scroll_speed_u = material.data[0]
material_properties.scroll_speed_v = material.data[1]
elif material.rendertype == Rendertype.BLINK:
material_properties.blink_min_brightness = material.data[0]
material_properties.blink_speed = material.data[1]
elif material.rendertype == Rendertype.NORMALMAPPED_TILED_ENVMAP or material.rendertype == Rendertype.NORMALMAPPED_TILED:
material_properties.normal_map_tiling_u = material.data[0]
material_properties.normal_map_tiling_v = material.data[1]
elif material.rendertype == Rendertype.REFRACTION:
pass
elif material.rendertype == Rendertype.ANIMATED:
anim_length_index = int(sqrt(length)) - 1
if animation_length_index < 0:
animation_length_index = 0
elif animation_length_index > len(UI_MATERIAL_ANIMATION_LENGTHS):
animation_length_index = len(UI_MATERIAL_ANIMATION_LENGTHS) - 1
material_properties.animation_length = UI_MATERIAL_ANIMATION_LENGTHS[animation_length_index]
material_properties.animation_speed = material.data[1]
else:
material_properties.detail_map_tiling_u = material.data[0]
material_properties.detail_map_tiling_v = material.data[1]
def _fill_normal_map_or_distortion_map_texture(material, material_properties):
if material.rendertype == Rendertype.REFRACTION:
material_properties.distortion_map = material.texture1
elif material.rendertype.value > 24:
material_properties.normal_map = material.texture1
def _fill_detail_texture(material, material_properties):
if material.rendertype != Rendertype.REFRACTION:
material_properties.detail_map = material.texture2
def _fill_envmap_texture(material, material_properties):
if material.rendertype != Rendertype.ENVMAPPED:
material_properties.environment_map = material.texture3

View File

@ -110,13 +110,27 @@ def read_scene(input_file, anim_only=False, debug=0) -> Scene:
change its index to directly reference its bone's index.
It will reference the MNDX of its bone's MODL by default.
'''
for model in scene.models:
if model.geometry:
for seg in model.geometry:
if seg.weights:
for weight_set in seg.weights:
for vweight in weight_set:
if vweight.bone in mndx_remap:
vweight.bone = mndx_remap[vweight.bone]
else:
vweight.bone = 0
# So in the new republic boba example, the weights aimed for bone_head instead map to sv_jettrooper...
#for key, val in mndx_remap.items():
#if scene.models[val].name == "bone_head" or scene.models[val].name == "sv_jettrooper":
#print("Key: {} is mapped to val: {}".format(key, val))
#print("Key: {}, val {} is model: {}".format(key, val, scene.models[val].name))
return scene
@ -155,8 +169,8 @@ def _read_matd(matd: Reader) -> Material:
elif next_header == "ATRB":
with matd.read_child() as atrb:
mat.flags = atrb.read_u8()
mat.rendertype = atrb.read_u8()
mat.flags = MaterialFlags(atrb.read_u8())
mat.rendertype = Rendertype(atrb.read_u8())
mat.data = atrb.read_u8(2)
elif next_header == "TX0D":
@ -197,8 +211,10 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
with modl.read_child() as mndx:
index = mndx.read_u32()
global model_counter
#print(mndx.indent + "MNDX doesn't match counter, expected: {} found: {}".format(model_counter, index))
#print("Model counter: {} MNDX: {}".format(model_counter, index))
global mndx_remap
mndx_remap[index] = model_counter
@ -228,7 +244,6 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
with modl.read_child() as geom:
while geom.could_have_child():
#print("Searching for next seg or envl child..")
next_header_geom = geom.peak_next_header()
if next_header_geom == "SEGM":
@ -242,16 +257,12 @@ def _read_modl(modl: Reader, materials_list: List[Material]) -> Model:
else:
geom.skip_bytes(1)
#with geom.read_child() as null:
#pass
for seg in model.geometry:
if seg.weights and envelope:
for weight_set in seg.weights:
for i in range(len(weight_set)):
vertex_weight = weight_set[i]
index = vertex_weight.bone
weight_set[i] = VertexWeight(vertex_weight.weight, envelope[vertex_weight.bone])
for vertex_weight in weight_set:
vertex_weight.bone = envelope[vertex_weight.bone]
elif next_header == "SWCI":
prim = CollisionPrimitive()
@ -348,7 +359,7 @@ def _read_segm(segm: Reader, materials_list: List[Material]) -> GeometrySegment:
for _ in range(num_tris):
geometry_seg.triangles.append(ndxt.read_u16(3))
#
elif next_header == "STRP":
strips : List[List[int]] = []
@ -408,7 +419,6 @@ def _read_segm(segm: Reader, materials_list: List[Material]) -> GeometrySegment:
geometry_seg.weights.append(weight_set)
else:
#print("Skipping...")
segm.skip_bytes(1)
return geometry_seg
@ -467,10 +477,6 @@ def _read_anm2(anm2: Reader) -> Animation:
for bone_crc in sorted(bone_crcs):
global debug_level
if debug_level > 0:
print("\t{}: ".format(hex(bone_crc)))
bone_frames = anim.bone_frames[bone_crc]
loc_frames = bone_frames[0]

View File

@ -9,11 +9,6 @@ from .msh_model import *
class SkeletonProperties(PropertyGroup):
name: StringProperty(name="Name", default="Bone Name")
#parent: StringProperty(name="Parent", default="Bone Parent")
#loc: FloatVectorProperty(name="Local Position", default=(0.0, 0.0, 0.0), subtype="XYZ", size=3)
#rot: FloatVectorProperty(name="Local Rotation", default=(0.0, 0.0, 0.0, 0.0), subtype="QUATERNION", size=4)
@ -29,7 +24,7 @@ class SkeletonPropertiesPanel(bpy.types.Panel):
@classmethod
def poll(cls, context):
return context.object.type == 'ARMATURE'
return context.object.type == 'ARMATURE' and context.object.data.swbf_msh_skel and len(context.object.data.swbf_msh_skel) > 0
def draw(self, context):
@ -45,13 +40,4 @@ class SkeletonPropertiesPanel(bpy.types.Panel):
for prop in skel_props:
layout.prop(prop, "name")
'''
layout.prop(skel_props, "name")
layout.prop(skel_props, "parent")
layout.prop(skel_props, "loc")
layout.prop(skel_props, "rot")
'''
#self.layout.label(text=context.object.swbf_msh_skel.yolo[1])

View File

@ -8,6 +8,7 @@ from enum import Enum
from typing import List, Set, Dict, Tuple
from itertools import zip_longest
from .msh_scene import Scene
from .msh_material_to_blend import *
from .msh_model import *
from .msh_model_utilities import *
from .msh_utilities import *
@ -118,10 +119,6 @@ def required_skeleton_to_armature(required_skeleton : List[Model], model_map : D
if to_crc(model.name) in msh_scene.skeleton:
entry = preserved.add()
entry.name = model.name
#loc,rot,_ = model_map[model.name].matrix_world.decompose()
#entry.loc = loc
#entry.rot = rot
#entry.parent = model.parent
bones_set = set([model.name for model in required_skeleton])
@ -186,17 +183,29 @@ def extract_required_skeleton(scene: Scene) -> List[Model]:
# Will map Model names to Models in scene, for convenience
model_dict : Dict[str, Model] = {}
# Will contain hashes of all models that definitely need to be in the skeleton/armature.
# We initialize it with the contents of SKL2 i.e. the nodes that are animated.
# For now this includes the scene root, but that'll be excluded later.
'''
Will contain hashes of all models that definitely need to be in the skeleton/armature.
We initialize it with the contents of SKL2 i.e. the nodes that are animated.
For now this includes the scene root, but that'll be excluded later.
'''
skeleton_hashes = set(scene.skeleton)
# We also need to add all nodes that are weighted to. These are not necessarily in
# SKL2, as SKL2 seems to only reference nodes that are keyframed.
'''
We also need to add all nodes that are weighted to. These are not necessarily in
SKL2, as SKL2 seems to only reference nodes that are keyframed.
However, sometimes SKL2 is not included when it should be, but it can be mostly recovered
by checking which models are BONEs.
'''
for model in scene.models:
model_dict[model.name] = model
if model.geometry:
#if to_crc(model.name) in scene.skeleton:
print("Skel model {} of type {} has parent {}".format(model.name, model.model_type, model.parent))
if model.model_type == ModelType.BONE:
skeleton_hashes.add(to_crc(model.name))
elif model.geometry:
for seg in model.geometry:
if seg.weights:
for weight_set in seg.weights:
@ -346,6 +355,7 @@ def extract_models(scene: Scene, materials_map : Dict[str, bpy.types.Material])
if index not in vertex_groups_indicies:
model_name = scene.models[index].name
#print("Adding new vertex group with index {} and model name {}".format(index, model_name))
vertex_groups_indicies[index] = new_obj.vertex_groups.new(name=model_name)
vertex_groups_indicies[index].add([offset + i], weight.weight, 'ADD')
@ -394,29 +404,14 @@ def extract_materials(folder_path: str, scene: Scene) -> Dict[str, bpy.types.Mat
new_mat.use_nodes = True
bsdf = new_mat.node_tree.nodes["Principled BSDF"]
tex_path_def = os.path.join(folder_path, material.texture0)
tex_path_alt = os.path.join(folder_path, "PC", material.texture0)
diffuse_texture_path = find_texture_path(folder_path, material.texture0)
tex_path = tex_path_def if os.path.exists(tex_path_def) else tex_path_alt
if os.path.exists(tex_path):
if diffuse_texture_path:
texImage = new_mat.node_tree.nodes.new('ShaderNodeTexImage')
texImage.image = bpy.data.images.load(tex_path)
texImage.image = bpy.data.images.load(diffuse_texture_path)
new_mat.node_tree.links.new(bsdf.inputs['Base Color'], texImage.outputs['Color'])
# Fill MaterialProperties datablock
'''
material_properties = new_mat.swbf_msh
material_properties.specular_color = material.specular_color.copy()
material_properties.diffuse_map = material.texture0
result.rendertype = _read_material_props_rendertype(props)
result.flags = _read_material_props_flags(props)
result.data = _read_material_props_data(props)
result.texture1 = _read_normal_map_or_distortion_map_texture(props)
result.texture2 = _read_detail_texture(props)
result.texture3 = _read_envmap_texture(props)
'''
fill_material_props(material, new_mat.swbf_msh)
extracted_materials[material_name] = new_mat
@ -434,6 +429,7 @@ def extract_scene(filepath: str, scene: Scene):
# model_map maps Model names to Blender objects.
model_map = extract_models(scene, material_map)
# skel contains all models needed in an armature
skel = extract_required_skeleton(scene)
@ -463,24 +459,16 @@ def extract_scene(filepath: str, scene: Scene):
has_skin = True
curr_obj.select_set(True)
armature.select_set(True)
bpy.context.view_layer.objects.active = armature
bpy.ops.object.parent_clear(type='CLEAR')
bpy.ops.object.parent_set(type='ARMATURE')
curr_obj.select_set(False)
armature.select_set(False)
bpy.context.view_layer.objects.active = None
curr_obj.parent = armature
curr_obj.parent_type = 'ARMATURE'
# Parent the object to a bone if necessary
else:
if curr_model.parent in armature.data.bones and curr_model.name not in armature.data.bones:
# Some of this is redundant, but necessary...
# Not sure what the different mats do, but saving the worldmat and
# applying it after clearing the other mats yields correct results...
worldmat = curr_obj.matrix_world
# ''
curr_obj.parent = None
curr_obj.parent = armature
curr_obj.parent_type = 'BONE'
curr_obj.parent_bone = curr_model.parent
@ -506,19 +494,9 @@ def extract_scene(filepath: str, scene: Scene):
if model.name == skeleton_parent_name:
armature_reparent_obj = None if not skeleton_parent_name else model_map[skeleton_parent_name]
# Now we reparent the armature to the node (armature_reparent_obj) we just found
if armature_reparent_obj is not None and armature.name != armature_reparent_obj.name:
armature.select_set(True)
armature_reparent_obj.select_set(True)
bpy.context.view_layer.objects.active = armature_reparent_obj
bpy.ops.object.parent_set(type='OBJECT')
armature.select_set(False)
armature_reparent_obj.select_set(False)
bpy.context.view_layer.objects.active = None
armature.parent = armature_reparent_obj
# If an bone exists in the armature, delete its

View File

@ -45,6 +45,8 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
anim_crcs = []
anim_metadata = {}
head.skip_until("MINA")
# Read metadata (crc, num frames, num bones) for each anim
with head.read_child() as mina:
@ -62,6 +64,9 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
"transBitFlags" : transBitFlags,
}
head.skip_until("TNJA")
# Read TADA offsets and quantization parameters for each rot + loc component, for each bone, for each anim
with head.read_child() as tnja:
@ -85,6 +90,8 @@ def decompress_curves(input_file) -> Dict[int, Dict[int, List[ Dict[int,float]]]
anim_metadata[anim_crc]["bone_params"] = bone_params
anim_metadata[anim_crc]["bone_list"] = bone_list
head.skip_until("TADA")
# Decompress/dequantize frame data into discrete per-component curves
with head.read_child() as tada: