diff --git a/src/blam.py b/src/blam.py index 8b32c0e..f0818bb 100644 --- a/src/blam.py +++ b/src/blam.py @@ -17,9 +17,9 @@ ''' import bpy import mathutils -import math, cmath +import math, cmath -bl_info = { \ +bl_info = { 'name': 'BLAM - The Blender camera calibration toolkit', 'author': 'Per Gantelius', 'version': (0, 0, 6), @@ -347,46 +347,46 @@ def length(vec): def dot(x, y): return sum([x[i] * y[i] for i in range(len(x))]) -def cbrt(x): - if x >= 0: - return math.pow(x, 1.0/3.0) - else: +def cbrt(x): + if x >= 0: + return math.pow(x, 1.0/3.0) + else: return -math.pow(abs(x), 1.0/3.0) - -def polar(x, y, deg=0): # radian if deg=0; degree if deg=1 - if deg: - return math.hypot(x, y), 180.0 * math.atan2(y, x) / math.pi - else: + +def polar(x, y, deg=0): # radian if deg=0; degree if deg=1 + if deg: + return math.hypot(x, y), 180.0 * math.atan2(y, x) / math.pi + else: return math.hypot(x, y), math.atan2(y, x) -def quadratic(a, b, c=None): - if c: # (ax^2 + bx + c = 0) - a, b = b / float(a), c / float(a) - t = a / 2.0 - r = t**2 - b - if r >= 0: # real roots - y1 = math.sqrt(r) - else: # complex roots - y1 = cmath.sqrt(r) - y2 = -y1 - return y1 - t, y2 - t - -def solveCubic(a, b, c, d): +def quadratic(a, b, c=None): + if c: # (ax^2 + bx + c = 0) + a, b = b / float(a), c / float(a) + t = a / 2.0 + r = t**2 - b + if r >= 0: # real roots + y1 = math.sqrt(r) + else: # complex roots + y1 = cmath.sqrt(r) + y2 = -y1 + return y1 - t, y2 - t + +def solveCubic(a, b, c, d): cIn = [a, b, c, d] - a, b, c = b / float(a), c / float(a), d / float(a) - t = a / 3.0 - p, q = b - 3 * t**2, c - b * t + 2 * t**3 - u, v = quadratic(q, -(p/3.0)**3) - if type(u) == type(0j): # complex cubic root - r, w = polar(u.real, u.imag) - y1 = 2 * cbrt(r) * math.cos(w / 3.0) - else: # real root - y1 = cbrt(u) + cbrt(v) - y2, y3 = quadratic(y1, p + y1**2) + a, b, c = b / float(a), c / float(a), d / float(a) + t = a / 3.0 + p, q = b - 3 * t**2, c - b * t + 2 * t**3 + u, v = quadratic(q, -(p/3.0)**3) + if type(u) == type(0j): # complex cubic root + r, w = polar(u.real, u.imag) + y1 = 2 * cbrt(r) * math.cos(w / 3.0) + else: # real root + y1 = cbrt(u) + cbrt(v) + y2, y3 = quadratic(y1, p + y1**2) x1 = y1 - t x2 = y2 - t x3 = y3 - t - + return x1, x2, x3 #helper function to determine if the current version @@ -394,30 +394,30 @@ def solveCubic(a, b, c, d): #or row major. ''' def arePythonMatricesRowMajor(): - - + + v = bpy.app.version is262OrGreater = v[0] >= 2 and v[1] >= 62 is260OrLess = v[0] <= 2 and v[1] <= 60 is261 = v[0] == 2 and v[1] == 61 rev = bpy.app.build_revision - + if is262OrGreater: return True - + if is260OrLess: return False - + #apparently, build_revision is not always just a number: #http://code.google.com/p/blam/issues/detail?id=11 - #TODO: find out what the format of bpy.app.build_revision is + #TODO: find out what the format of bpy.app.build_revision is #for now, remove anything that isn't a digit digits = [str(d) for d in range(9)] numberString = '' for ch in rev: if ch in digits: numberString = numberString + ch - + #do revision check if we're running 2.61 #matrices are row major starting in revision r42816 return int(numberString) >= 42816 @@ -440,7 +440,7 @@ class ProjectorCalibrationPanel(bpy.types.Panel): bl_label = "Video Projector Calibration" bl_space_type = "CLIP_EDITOR" bl_region_type = "TOOLS" - + def draw(self, context): scn = bpy.context.scene l = self.layout @@ -457,26 +457,26 @@ class CreateProjectorCalibrationWindowOperator(bpy.types.Operator): bl_idname = "object.create_proj_calib_win" bl_label = "Create calibration window" bl_description = "TODO" - + def execute(self, context): ws = bpy.context.window_manager.windows if len(ws) > 1: self.report({'ERROR'}, "Other windows exist. Close them and try again.") return{'CANCELLED'} - + return bpy.ops.screen.area_dupli('INVOKE_DEFAULT') class SetCalibrationWindowToClipEditor(bpy.types.Operator): bl_idname = "object.set_calib_window_to_clip" bl_label = "Clip editor" bl_description = "" - + def execute(self, context): windows = bpy.context.window_manager.windows if len(windows) > 2: self.report({'ERROR'}, "Expected two windows. Found " + str(len(windows))) return{'CANCELLED'} - + #operate on the window with one area window = None for w in windows: @@ -484,23 +484,23 @@ def execute(self, context): if len(areas) == 1: window = w break - + if not window: self.report({'ERROR'}, "Could not find single area window.") return{'CANCELLED'} - + area = window.screen.areas[0] - + toolsHidden = False propsHidden = False - + for i in area.regions: print(i.type, i.width, i.height) if i.type == 'TOOLS' and i.width <= 1: toolsHidden = True elif i.type == 'TOOL_PROPS' and i.width <= 1: propsHidden = True - + area.type = "CLIP_EDITOR" override = {'window': window, 'screen': window.screen, 'area': area} if not toolsHidden: @@ -509,20 +509,20 @@ def execute(self, context): bpy.ops.clip.properties(override) bpy.ops.clip.view_all(override) bpy.ops.clip.view_zoom_ratio(override, ratio=1) - + return{'FINISHED'} class SetCalibrationWindowToView3D(bpy.types.Operator): bl_idname = "object.set_calib_window_to_view3d" bl_label = "3D view" bl_description = "" - + def execute(self, context): windows = bpy.context.window_manager.windows if len(windows) > 2: self.report({'ERROR'}, "Expected two windows. Found " + str(len(windows))) return{'CANCELLED'} - + #operate on the window with one area window = None for w in windows: @@ -530,16 +530,16 @@ def execute(self, context): if len(areas) == 1: window = w break - + if not window: self.report({'ERROR'}, "Could not find single area window.") return{'CANCELLED'} - + area = window.screen.areas[0] - + toolsHidden = False propsHidden = False - + for i in area.regions: print(i.type, i.width, i.height) if i.type == 'TOOLS' and i.width <= 1: @@ -568,10 +568,10 @@ def execute(self, context): CAMERA CALIBRATION STUFF ''' -class PhotoModelingToolsPanel(bpy.types.Panel): - bl_label = "Photo Modeling Tools" - bl_space_type = "VIEW_3D" - bl_region_type = "TOOLS" +class PhotoModelingToolsPanel(bpy.types.Panel): + bl_label = "Photo Modeling Tools" + bl_space_type = "VIEW_3D" + bl_region_type = "TOOLS" def draw(self, context): scn = bpy.context.scene l = self.layout @@ -581,52 +581,52 @@ def draw(self, context): b.prop(scn, 'separate_faces') r = l.row() b = r.box() - + b.operator("object.project_bg_onto_mesh") b.prop(scn, 'projection_method') - #self.layout.operator("object.make_edge_x") + #self.layout.operator("object.make_edge_x") l.operator("object.set_los_scale_pivot") class SetLineOfSightScalePivot(bpy.types.Operator): - bl_idname = "object.set_los_scale_pivot" + bl_idname = "object.set_los_scale_pivot" bl_label = "Set line of sight scale pivot" bl_description = "Set the pivot to the camera origin, which makes scaling equivalent to translation along the line of sight." - + def execute(self, context): bpy.ops.object.mode_set(mode='OBJECT') - + selStates = [] objs = bpy.context.scene.objects for obj in objs: selStates.append(obj.select) obj.select = False - + #select the camera bpy.context.scene.camera.select = True - + #snap the cursor to the camer bpy.ops.view3d.snap_cursor_to_selected() - + #set the cursor to be the pivot space = bpy.context.area.spaces.active print(space.pivot_point) space.pivot_point = 'CURSOR' - + for i in range(len(objs)): obj = objs[i] obj.select = selStates[i] - + return{'FINISHED'} - + class ProjectBackgroundImageOntoMeshOperator(bpy.types.Operator): - bl_idname = "object.project_bg_onto_mesh" + bl_idname = "object.project_bg_onto_mesh" bl_label = "Project background image onto mesh" - bl_description = "Projects the current 3D view background image onto a mesh (the active object) from the active camera." - + bl_description = "Projects the current 3D view background image onto a mesh (the active object) from the active camera." + projectorName = 'tex_projector' materialName = 'cam_map_material' - + def meshVerticesToNDC(self, cam, mesh): #compute a projection matrix transforming @@ -647,7 +647,7 @@ def meshVerticesToNDC(self, cam, mesh): aspect = ry / float(rx) w = math.tan(0.5 * fov) h = aspect * w - + pm = mathutils.Matrix() pm[0][0] = 1 / w pm[1][1] = 1 / h @@ -655,15 +655,15 @@ def meshVerticesToNDC(self, cam, mesh): pm[2][3] = 2 * near * far / (near - far) pm[3][2] = -1.0 pm[3][3] = 0.0 - + #if not arePythonMatricesRowMajor(): # pm.transpose() - + returnVerts = [] for v in mesh.data.vertices: #the vert in local coordinates - vec = v.co.to_4d() + vec = v.co.to_4d() #the vert in world coordinates vec = mesh.matrix_world * vec #the vert in clip coordinates @@ -672,67 +672,67 @@ def meshVerticesToNDC(self, cam, mesh): w = vec[3] vec = [x / w for x in vec] returnVerts.append((vec[0], vec[1], vec[2])) - + return returnVerts - + def addUVsProjectedFromView(self, mesh): cam = bpy.context.scene.camera - + #get the mesh vertices in normalized device coordinates #as seen through the active camera ndcVerts = self.meshVerticesToNDC(cam, mesh) - + #create a uv layer bpy.ops.object.mode_set(mode='EDIT') #projecting from view here, but the current view might not - #be the camera, so the uvs are computed manually a couple + #be the camera, so the uvs are computed manually a couple #of lines down bpy.ops.uv.project_from_view(scale_to_bounds=True) bpy.ops.object.mode_set(mode='OBJECT') - - #set uvs to match the vertex x and y components in NDC + + #set uvs to match the vertex x and y components in NDC isBmesh = True try: f = mesh.data.faces isBmesh = False except: pass - + if isBmesh: loops = mesh.data.loops uvLoops = mesh.data.uv_layers[0].data - for loop, uvLoop in zip(loops, uvLoops): + for loop, uvLoop in zip(loops, uvLoops): vIdx = loop.vertex_index - print("loop", loop, "vertex", loop.vertex_index, "uvLoop", uvLoop) + print("loop", loop, "vertex", loop.vertex_index, "uvLoop", uvLoop) ndcVert = ndcVerts[vIdx] uvLoop.uv[0] = 0.5 * (ndcVert[0] + 1.0) uvLoop.uv[1] = 0.5 * (ndcVert[1] + 1.0) - + else: assert(len(getMeshFaces(mesh)) == len(mesh.data.uv_textures[0].data)) for meshFace, uvFace in zip(getMeshFaces(mesh), mesh.data.uv_textures[0].data): faceVerts = [ndcVerts[i] for i in meshFace.vertices] - + for i in range(len(uvFace.uv)): uvFace.uv[i][0] = 0.5 * (faceVerts[i][0] + 1.0) uvFace.uv[i][1] = 0.5 * (faceVerts[i][1] + 1.0) - + def performSimpleProjection(self, camera, mesh, img): if len(mesh.material_slots) == 0: mat = bpy.data.materials.new(self.materialName) mesh.data.materials.append(mat) else: - mat = mesh.material_slots[0].material + mat = mesh.material_slots[0].material mat.use_shadeless = True mat.use_face_texture = True - - - + + + self.addUVsProjectedFromView(mesh) - + for f in mesh.data.uv_textures[0].data: f.image = img - + def performHighQualityProjection(self, camera, mesh, img): if len(mesh.material_slots) == 0: mat = bpy.data.materials.new(self.materialName) @@ -740,30 +740,30 @@ def performHighQualityProjection(self, camera, mesh, img): else: mat = mesh.material_slots[0].material mat.use_shadeless = True - mat.use_face_texture = True - - - + mat.use_face_texture = True + + + #the texture sampling is not perspective correct #when directly using sticky UVs or UVs projected from the view #this is a pretty messy workaround that gives better looking results self.addUVsProjectedFromView(mesh) - + #then create an empty object that will serve as a texture projector - #if the mesh has a child with the name of a texture projector, + #if the mesh has a child with the name of a texture projector, #reuse it reusedProjector = None for ch in mesh.children: if self.projectorName in ch.name: reusedProjector = ch break - + if reusedProjector: projector = reusedProjector else: bpy.ops.object.camera_add() projector = bpy.context.active_object - + bpy.context.scene.objects.active = projector projector.name = mesh.name + '_' + self.projectorName projector.matrix_world = camera.matrix_world @@ -773,32 +773,32 @@ def performHighQualityProjection(self, camera, mesh, img): projector.data.sensor_width = camera.data.sensor_width projector.data.sensor_height = camera.data.sensor_height projector.data.sensor_fit = camera.data.sensor_fit - + #parent the projector to the mesh for convenience for obj in bpy.context.scene.objects: obj.select = False - + projector.select = True bpy.context.scene.objects.active = mesh #bpy.ops.object.parent_set() - + #lock the projector to the mesh #bpy.context.scene.objects.active = projector #bpy.ops.object.constraint_add(type='COPY_LOCATION') #projector.constraints[-1].target = mesh - - #create a simple subdivision modifier on the mesh object. + + #create a simple subdivision modifier on the mesh object. #this subdivision is what alleviates the texture sampling #artefacts. bpy.context.scene.objects.active = mesh levels = 3 bpy.ops.object.modifier_add() - + modifier = mesh.modifiers[-1] modifier.subdivision_type = 'SIMPLE' modifier.levels = levels modifier.render_levels = levels - + #then create a uv project modifier that will project the #image onto the subdivided mesh using our projector object. bpy.ops.object.modifier_add(type='UV_PROJECT') @@ -808,19 +808,19 @@ def performHighQualityProjection(self, camera, mesh, img): modifier.use_image_override = True modifier.projectors[0].object = projector modifier.uv_layer = mesh.data.uv_textures[0].name - + bpy.ops.object.mode_set(mode='EDIT') bpy.ops.object.mode_set(mode='OBJECT') - + def prepareMesh(self, mesh): #remove all uv layers while len(mesh.data.uv_textures) > 0: bpy.ops.mesh.uv_texture_remove() - + #remove all modifiers for m in mesh.modifiers: bpy.ops.object.modifier_remove(modifier=m.name) - + def execute(self, context): ''' Get the active object and make sure it is a mesh @@ -833,22 +833,22 @@ def execute(self, context): elif not 'Mesh' in str(type(mesh.data)): self.report({'ERROR'}, "The active object is not a mesh") return{'CANCELLED'} - + ''' Get the current camera ''' camera = bpy.context.scene.camera if not camera: self.report({'ERROR'}, "No active camera.") - return{'CANCELLED'} - - + return{'CANCELLED'} + + activeSpace = bpy.context.area.spaces.active - + if len(activeSpace.background_images) == 0: self.report({'ERROR'}, "No backround images of clips found.") return{'CANCELLED'} - + #check what kind of background we're dealing with bg = activeSpace.background_images[0] if bg.image != None: @@ -860,12 +860,12 @@ def execute(self, context): img = bpy.data.images.load(path) except: self.report({'ERROR'}, "Cannot load image %s" % path) - return{'CANCELLED'} + return{'CANCELLED'} else: #shouldnt end up here self.report({'ERROR'}, "Both background clip and image are None") return{'CANCELLED'} - + #if we made it here, we have a camera, a mesh and an image. self.prepareMesh(mesh) method = bpy.context.scene.projection_method @@ -876,51 +876,51 @@ def execute(self, context): else: self.report({'ERROR'}, "Unknown projection method") return{'CANCELLED'} - + activeSpace.viewport_shade = 'TEXTURED' - + return{'FINISHED'} class Reconstruct3DMeshOperator(bpy.types.Operator): - bl_idname = "object.compute_depth_information" + bl_idname = "object.compute_depth_information" bl_label = "Reconstruct 3D geometry" - bl_description = "Reconstructs a 3D mesh with rectangular faces based on a mesh with faces lining up with the corresponding faces in the image. Relies on the active camera being properly calibrated." - + bl_description = "Reconstructs a 3D mesh with rectangular faces based on a mesh with faces lining up with the corresponding faces in the image. Relies on the active camera being properly calibrated." + def evalEq17(self, origin, p1, p2): a = [x - y for x, y in zip(origin, p1)] b = [x - y for x, y in zip(origin, p2)] return dot(a, b) - + def evalEq27(self, l): return self.C4 * l ** 4 + self.C3 * l ** 3 + self.C2 * l ** 2 + self.C1 * l + self.C0 def evalEq28(self, l): return self.B4 * l ** 4 + self.B3 * l ** 3 + self.B2 * l ** 2 + self.B1 * l + self.B0 - + def evalEq29(self, l): return self.D3 * l ** 3 + self.D2 * l ** 2 + self.D1 * l + self.D0 - + def worldToCameraSpace(self, verts): - + ret = [] for v in verts: #the vert in local coordinates - vec = v.co.to_4d() + vec = v.co.to_4d() #the vert in world coordinates vec = self.mesh.matrix_world * vec #the vert in camera coordinates - vec = self.camera.matrix_world.inverted() * vec - + vec = self.camera.matrix_world.inverted() * vec + ret.append(vec[0:3]) return ret - - def computeCi(self, Qab, Qac, Qad, Qbc, Qbd, Qcd): + + def computeCi(self, Qab, Qac, Qad, Qbc, Qbd, Qcd): self.C4 = Qad * Qbc * Qbd - Qac * Qbd ** 2 self.C3 = Qab * Qad * Qbd * Qcd - Qad ** 2 * Qcd + Qac * Qad * Qbd ** 2 - Qad ** 2 * Qbc * Qbd - Qbc * Qbd + 2 * Qab * Qac * Qbd - Qab * Qad * Qbc self.C2 = -Qab * Qbd * Qcd - Qab ** 2 * Qad * Qcd + 2 * Qad * Qcd + Qad * Qbc * Qbd - 3 * Qab * Qac * Qad * Qbd + Qab * Qad ** 2 * Qbc + Qab * Qbc + Qac * Qad ** 2 - Qab ** 2 * Qac self.C1 = Qab ** 2 * Qcd - Qcd + Qab * Qac * Qbd - Qab * Qad * Qbc + 2 * Qab ** 2 * Qac * Qad - 2 * Qac * Qad self.C0 = Qac - Qab ** 2 * Qac - + def computeBi(self, Qab, Qac, Qad, Qbc, Qbd, Qcd): self.B4 = Qbd - Qbd * Qcd ** 2 self.B3 = 2 * Qad * Qbd * Qcd ** 2 + Qab * Qcd ** 2 + Qac * Qbd * Qcd - Qad * Qbc * Qcd - 2 * Qad * Qbd - Qab @@ -930,38 +930,38 @@ def computeBi(self, Qab, Qac, Qad, Qbc, Qbd, Qcd): def computeQuadDepthInformation(self, qHatA, qHatB, qHatC, qHatD): - #print() + #print() #print("computeQuadDepthInformation") - + ''' compute the coefficients Qij ''' Qab = dot(qHatA, qHatB) Qac = dot(qHatA, qHatC) Qad = dot(qHatA, qHatD) - + Qba = dot(qHatB, qHatA) Qbc = dot(qHatB, qHatC) Qbd = dot(qHatB, qHatD) - + Qca = dot(qHatC, qHatA) Qcb = dot(qHatC, qHatB) Qcd = dot(qHatC, qHatD) - + #print("Qab", Qab, "Qac", Qac, "Qad", Qad) #print("Qba", Qba, "Qbc", Qbc, "Qbd", Qbd) #print("Qca", Qca, "Qcb", Qcb, "Qcd", Qcd) - + ''' compute the coefficients Ci of equation (27) ''' self.computeCi(Qab, Qac, Qad, Qbc, Qbd, Qcd) - + ''' compute the coefficients Bi of equation (28) ''' self.computeBi(Qab, Qac, Qad, Qbc, Qbd, Qcd) - + ''' compute the cofficients Di of equation (29) ''' @@ -976,32 +976,32 @@ def computeQuadDepthInformation(self, qHatA, qHatB, qHatC, qHatD): ''' roots = solveCubic(self.D3, self.D2, self.D1, self.D0) #print("Eq 29 Roots", roots) - + ''' choose one of the three computed roots. Tan, Sullivan and Baker propose choosing a real root that satisfies "(27) or (28)". Since these - equations are derived from the orthogonality equations (17) and - since we're interested in a quad with edges that are "as + equations are derived from the orthogonality equations (17) and + since we're interested in a quad with edges that are "as orthogonal as possible", in this implementation the positive real root that minimizes the quad orthogonality error is chosen instead. ''' - + chosenRoot = None minError = None - + #print() #print("Finding root") for root in roots: - + #print("Root", root) - + if type(root) == type(0j): #complex root. do nothing continue elif root <= 0: #non-positive root. do nothing continue - + #compute depth values lambdaA-D based on the current root lambdaD = root self.lambdaA = 1 #arbitrarily set to 1 @@ -1012,29 +1012,29 @@ def computeQuadDepthInformation(self, qHatA, qHatB, qHatC, qHatD): denLambdaC = (Qac - Qcd * lambdaD) self.lambdaC = numLambdaC / denLambdaC self.lambdaD = lambdaD - + #print("lambdaA", numLambdaA, "/", denLambdaA) #print("lambdaC", numLambdaC, "/", denLambdaC) - + #compute vertex positions pA = [x * self.lambdaA for x in qHatA] pB = [x * self.lambdaB for x in qHatB] pC = [x * self.lambdaC for x in qHatC] pD = [x * self.lambdaD for x in qHatD] - + #compute the mean orthogonality error for the resulting quad meanError, maxError = self.getQuadError(pA, pB, pC, pD) - + if minError == None or meanError < minError: minError = meanError chosenRoot = root - + if chosenRoot == None: self.report({'ERROR'}, "No appropriate root found.") return #TODO cancel properly - + #print("Chosen root", chosenRoot) - + ''' compute and return the final vertex positions from equation (16) ''' @@ -1043,39 +1043,39 @@ def computeQuadDepthInformation(self, qHatA, qHatB, qHatC, qHatD): self.lambdaB = (Qad * lambdaD - 1.0) / (Qbd * lambdaD - Qab) self.lambdaC = (Qad * lambdaD - lambdaD * lambdaD) / (Qac - Qcd * lambdaD) self.lambdaD = lambdaD - + pA = [x * self.lambdaA for x in qHatA] pB = [x * self.lambdaB for x in qHatB] pC = [x * self.lambdaC for x in qHatC] pD = [x * self.lambdaD for x in qHatD] - + meanError, maxError = self.getQuadError(pA, pB, pC, pD) #self.report({'INFO'}, "Error: " + str(meanError) + " (" + str(maxError) + ")") - + return [pA, pB, pC, pD] - + def getQuadError(self, pA, pB, pC, pD): orthABD = self.evalEq17(pA, pB, pD) orthABC = self.evalEq17(pB, pA, pC) orthBCD = self.evalEq17(pC, pB, pD) orthACD = self.evalEq17(pD, pA, pC) - + absErrors = [abs(orthABD), abs(orthABC), abs(orthBCD), abs(orthACD)] maxError = max(absErrors) - meanError = 0.25 * sum(absErrors) + meanError = 0.25 * sum(absErrors) #print("absErrors", absErrors, "meanError", meanError) - + return meanError, maxError - + def createMesh(self, inputMesh, computedCoordsByFace, quads): ''' Mesh creation is done in two steps: - 1. adjust the computed depth values so that the + 1. adjust the computed depth values so that the quad vertices line up as well as possible 2. optionally merge each set of computed quad vertices that correspond to a single vertex in the input mesh ''' - + ''' Step 1 least squares minimize the depth difference @@ -1083,7 +1083,7 @@ def createMesh(self, inputMesh, computedCoordsByFace, quads): computing depth factors for each quad. ''' quadFacePairsBySharedEdge = {} - + def indexOfFace(fcs, face): i = 0 for f in fcs: @@ -1097,17 +1097,17 @@ def indexOfFace(fcs, face): quadFaces = [] for e in inputMesh.data.edges: ev = e.vertices - + #gather all faces containing the current edge facesContainingEdge = [] - + for f in getMeshFaces(inputMesh): matchFound = False fv = f.vertices if len(fv) != 4: #ignore non-quad faces continue - + if f not in quadFaces: quadFaces.append(f) @@ -1115,16 +1115,16 @@ def indexOfFace(fcs, face): #print("fv", fv, "ev", ev, "len(set(fv) & set(ev))", len(set(fv) & set(ev))) if len(set(fv) & set(ev)) == 2: matchFound = True - + if matchFound: assert(f not in facesContainingEdge) facesContainingEdge.append(f) - + #sanity check. an edge can be shared by at most two faces. assert(len(facesContainingEdge) <= 2 and len(facesContainingEdge) >= 0) - + edgeIsShared = (len(facesContainingEdge) == 2) - + if edgeIsShared: quadFacePairsBySharedEdge[e] = facesContainingEdge else: @@ -1132,11 +1132,11 @@ def indexOfFace(fcs, face): numSharedEdges = len(quadFacePairsBySharedEdge.keys()) numQuadFaces = len(quadFaces) #sanity check: the shared and unshared edges are disjoint and should add up to the total number of edges - assert(unsharedEdgeCount + numSharedEdges == len(inputMesh.data.edges)) + assert(unsharedEdgeCount + numSharedEdges == len(inputMesh.data.edges)) #print("num shared edges", numSharedEdges) #print(quadFacePairsBySharedEdge) #assert(False) - + #each shared edge gives rise to one equation per vertex, #so the number of rows in the matrix is 2n, where n is #the number of shared edges. the number of columns is m-1 @@ -1151,24 +1151,24 @@ def indexOfFace(fcs, face): vertsToMergeByOriginalIdx = {} for e in quadFacePairsBySharedEdge.keys(): pair = quadFacePairsBySharedEdge[e] - + #the two original mesh faces sharing the current edge f0 = pair[0] f1 = pair[1] - + assert(f0 in faces) assert(f1 in faces) assert(len(f0.vertices) == 4) assert(len(f1.vertices) == 4) - + #the two computed quads corresponding to the original mesh faces c0 = computedCoordsByFace[f0] c1 = computedCoordsByFace[f1] - + #the indices into the output mesh of the two faces sharing the current edge f0Idx = quads.index(c0)#indexOfFace(faces, f0) f1Idx = quads.index(c1)#indexOfFace(faces, f1) - + def getQuadVertWithMeshIdx(quad, idx): #print("idx", idx, "quad", quad) i = 0 @@ -1177,31 +1177,31 @@ def getQuadVertWithMeshIdx(quad, idx): return p, i i = i + 1 assert(False) #shouldnt end up here - + #vij is vertex j of the current edge in face i #idxij is the index of vertex j in quad i (0-3) v00, idx00 = getQuadVertWithMeshIdx(c0, e.vertices[0]) v01, idx01 = getQuadVertWithMeshIdx(c0, e.vertices[1]) - + v10, idx10 = getQuadVertWithMeshIdx(c1, e.vertices[0]) v11, idx11 = getQuadVertWithMeshIdx(c1, e.vertices[1]) - + #vert 0 depths lambda00 = v00[2] lambda10 = v10[2] - + #vert 1 depths lambda01 = v01[2] lambda11 = v11[2] #print(faces, f0, f1) - + assert(f0Idx >= 0 and f0Idx < numFaces) assert(f1Idx >= 0 and f1Idx < numFaces) - + #vert 0 vert0MatrixRow = [0] * (numQuadFaces - 1) vert0RhRow = [0] - + if f0Idx == 0: vert0RhRow[0] = lambda00 vert0MatrixRow[f1Idx - 1] = lambda10 @@ -1211,11 +1211,11 @@ def getQuadVertWithMeshIdx(quad, idx): else: vert0MatrixRow[f0Idx - 1] = lambda00 vert0MatrixRow[f1Idx - 1] = -lambda10 - + #vert 1 vert1MatrixRow = [0] * (numQuadFaces - 1) vert1RhRow = [0] - + if f0Idx == 0: vert1RhRow[0] = lambda01 vert1MatrixRow[f1Idx - 1] = lambda11 @@ -1225,32 +1225,32 @@ def getQuadVertWithMeshIdx(quad, idx): else: vert1MatrixRow[f0Idx - 1] = lambda01 vert1MatrixRow[f1Idx - 1] = -lambda11 - + matrixRows.append(vert0MatrixRow) matrixRows.append(vert1MatrixRow) - + rhRows.append(vert0RhRow) rhRows.append(vert1RhRow) - + #store index information for vertex merging in the new mesh if e.vertices[0] not in vertsToMergeByOriginalIdx.keys(): vertsToMergeByOriginalIdx[e.vertices[0]] = [] if e.vertices[1] not in vertsToMergeByOriginalIdx.keys(): vertsToMergeByOriginalIdx[e.vertices[1]] = [] - + l0 = vertsToMergeByOriginalIdx[e.vertices[0]] l1 = vertsToMergeByOriginalIdx[e.vertices[1]] - + if idx00 + 4 * f0Idx not in l0: l0.append(idx00 + 4 * f0Idx) if idx10 + 4 * f1Idx not in l0: l0.append(idx10 + 4 * f1Idx) - + if idx01 + 4 * f0Idx not in l1: l1.append(idx01 + 4 * f0Idx) if idx11 + 4 * f1Idx not in l1: l1.append(idx11 + 4 * f1Idx) - + assert(len(matrixRows) == len(rhRows)) assert(len(matrixRows) == 2 * len(quadFacePairsBySharedEdge)) #solve for the depth factors 2,3...m @@ -1259,20 +1259,20 @@ def getQuadVertWithMeshIdx(quad, idx): #print("rhRows") #print(rhRows) - + #sanity check: the sets of vertex indices to merge should be disjoint for vs in vertsToMergeByOriginalIdx.values(): - + for idx in vs: assert(idx >= 0 and idx < numFaces * 4) - + for vsRef in vertsToMergeByOriginalIdx.values(): if vs != vsRef: #check that the current sets are disjoint s1 = set(vs) s2 = set(vsRef) assert(len(s1 & s2) == 0) - + if numQuadFaces > 2: m = Mat(matrixRows) b = Mat(rhRows) @@ -1286,9 +1286,9 @@ def getQuadVertWithMeshIdx(quad, idx): factors = [1, f] elif numQuadFaces == 1: factors = [1] - - - + + + #print("factors", factors) #multiply depths by the factors computed per face depth factors #quads = [] @@ -1303,16 +1303,16 @@ def getQuadVertWithMeshIdx(quad, idx): #print("vert after", vert) quad[i] = vert #quads.append(quad) - + #create the actual blender mesh bpy.ops.object.mode_set(mode='OBJECT') name = inputMesh.name + '_3D' #print("createM esh", points) - me = bpy.data.meshes.new(name) - ob = bpy.data.objects.new(name, me) - ob.show_name = True - # Link object to scene - bpy.context.scene.objects.link(ob) + me = bpy.data.meshes.new(name) + ob = bpy.data.objects.new(name, me) + ob.show_name = True + # Link object to scene + bpy.context.scene.objects.link(ob) verts = [] faces = [] idx = 0 @@ -1341,7 +1341,7 @@ def getQuadVertWithMeshIdx(quad, idx): print("merging verts", vs) #merge at the mean position, which is guaranteed to #lie on the line of sight, since all the vertices do - + mean = [0, 0, 0] for idx in vs: #print("idx", idx) @@ -1350,12 +1350,12 @@ def getQuadVertWithMeshIdx(quad, idx): for i in range(3): mean[i] = mean[i] + currVert[i] / float(len(vs)) - + for idx in vs: verts[idx] = mean print("2") - # Update mesh with new data - me.from_pydata(verts, [], faces) + # Update mesh with new data + me.from_pydata(verts, [], faces) me.update(calc_edges=True) ob.select = True bpy.context.scene.objects.active = ob @@ -1364,9 +1364,9 @@ def getQuadVertWithMeshIdx(quad, idx): bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.remove_doubles() bpy.ops.object.mode_set(mode='OBJECT') - + return ob - + def getOutputMeshScale(self, camera, inMesh, outMesh): inMeanPos = [0.0] * 3 cmi = camera.matrix_world.inverted() @@ -1376,7 +1376,7 @@ def getOutputMeshScale(self, camera, inMesh, outMesh): vCamSpace = cmi * mm * v.co.to_4d() for i in range(3): inMeanPos[i] = inMeanPos[i] + vCamSpace[i] / float(len(inMesh.data.vertices)) - + outMeanPos = [0.0] * 3 for v in outMesh.data.vertices: for i in range(3): @@ -1384,18 +1384,18 @@ def getOutputMeshScale(self, camera, inMesh, outMesh): inDistance = math.sqrt(sum([x * x for x in inMeanPos])) outDistance = math.sqrt(sum([x * x for x in outMeanPos])) - - print(inMeanPos, outMeanPos, inDistance, outDistance) + + print(inMeanPos, outMeanPos, inDistance, outDistance) if outDistance == 0.0: #if we need to handle this case, we probably have bigger problems... #anyway, return 1. return 1 - + return inDistance / outDistance - + def areAllMeshFacesConnected(self, mesh): - + def getFaceNeighbors(faces, face): neighbors = [] for v in face.vertices: @@ -1407,15 +1407,15 @@ def getFaceNeighbors(faces, face): if otherFace not in neighbors: neighbors.append(otherFace) return neighbors - + def visitNeighbors(faces, face, visitedFaces): ns = getFaceNeighbors(faces, face) for n in ns: if n not in visitedFaces: visitedFaces.append(n) visitNeighbors(faces, n, visitedFaces) - - + + faces = getMeshFaces(mesh) if len(faces) == 1: return True @@ -1423,24 +1423,24 @@ def visitNeighbors(faces, face, visitedFaces): visitNeighbors(faces, faces[0], visitedFaces) return len(visitedFaces) == len(faces) - + def areAllMeshFacesQuads(self, mesh): for f in getMeshFaces(mesh): if len(f.vertices) != 4: return False return True - + def execute(self, context): - + scn = bpy.context.scene - + ''' get the active camera ''' self.camera = bpy.context.scene.camera if self.camera == None: self.report({'ERROR'}, "There is no active camera") - + ''' get the mesh containing the quads, assume it's the active object ''' @@ -1454,14 +1454,14 @@ def execute(self, context): return{'CANCELLED'} if len(getMeshFaces(self.mesh)) == 0: self.report({'ERROR'}, "The mesh does not have any faces.") - return{'CANCELLED'} + return{'CANCELLED'} if not self.areAllMeshFacesQuads(self.mesh): self.report({'ERROR'}, "The mesh must consist of quad faces only.") - return{'CANCELLED'} + return{'CANCELLED'} if not self.areAllMeshFacesConnected(self.mesh): self.report({'ERROR'}, "All faces of the input mesh must be connected.") return{'CANCELLED'} - + ''' process all quads from the mesh individually, computing vertex depth values for each of them. @@ -1475,67 +1475,67 @@ def execute(self, context): #if this is a quad face, process it if len(f.vertices) == 4: assert(f not in computedCoordsByFace.keys()) - + #gather quad vertices in the local coordinate frame of the #mesh inputPointsLocalMeshSpace = [] for idx in f.vertices: inputPointsLocalMeshSpace.append(self.mesh.data.vertices[idx]) - + #transform vertices to camera space inputPointsCameraSpace = self.worldToCameraSpace(inputPointsLocalMeshSpace) - + #compute normalized input vectors (eq 16) qHats = [normalize(x) for x in inputPointsCameraSpace] #run the algorithm to create a quad with depth. coords in camera space outputPointsCameraSpace = self.computeQuadDepthInformation(*qHats) - + #store the index in the original mesh of the computed quad verts. #used later when constructing the output mesh. #print("quad") for i in range(4): outputPointsCameraSpace[i] = list(outputPointsCameraSpace[i][:]) outputPointsCameraSpace[i].append(f.vertices[i]) - - #remember which original mesh face corresponds to the computed quad + + #remember which original mesh face corresponds to the computed quad computedCoordsByFace[f] = outputPointsCameraSpace quads.append(outputPointsCameraSpace) else: assert(False) #no non-quads allowed. should have been caught earlier - + m = self.createMesh(self.mesh, computedCoordsByFace, quads) - + #up intil now, coords have been in camera space. transform the final mesh so #its transform (and thus origin) conicides with the camera. m.matrix_world = bpy.context.scene.camera.matrix_world - + #finally apply a uniform scale that matches the distance between #the camera and mean point of the two meshes uniformScale = self.getOutputMeshScale(self.camera, self.mesh, m) m.scale = [uniformScale] * 3 - + return{'FINISHED'} class CameraCalibrationPanel(bpy.types.Panel): ''' The GUI for the focal length and camera orientation functionality. ''' - bl_label = "Static Camera Calibration" - bl_space_type = "CLIP_EDITOR" - bl_region_type = "TOOLS" - - def draw(self, context): - + bl_label = "Static Camera Calibration" + bl_space_type = "CLIP_EDITOR" + bl_region_type = "TOOLS" + + def draw(self, context): + l = self.layout scn = context.scene l.prop(scn, 'calibration_type') - row = l.row() + row = l.row() box = row.box() box.label("Line set 1 (first grease pencil layer)") box.prop(scn, 'vp1_axis') - - row = l.row() + + row = l.row() box = row.box() singleVp = scn.calibration_type == 'one_vp' if singleVp: @@ -1545,7 +1545,7 @@ def draw(self, context): else: box.label("Line set 2 (second grease pencil layer)") box.prop(scn, 'vp2_axis') - row = l.row() + row = l.row() #row.enabled = singleVp if singleVp: row.prop(scn, 'up_axis') @@ -1554,24 +1554,24 @@ def draw(self, context): #TODO l.prop(scn, 'vp1_only') l.prop(scn, 'set_cambg') - l.operator("object.estimate_focal_length") + l.operator("object.estimate_focal_length") class CameraCalibrationOperator(bpy.types.Operator): '''\brief This operator handles estimation of focal length and camera orientation from input line segments. All sections numbers, equations numbers etc - refer to "Using Vanishing Points for Camera Calibration and Coarse 3D Reconstruction + refer to "Using Vanishing Points for Camera Calibration and Coarse 3D Reconstruction from a Single Image" by E. Guillou, D. Meneveaux, E. Maisel, K. Bouatouch. (http://www.irisa.fr/prive/kadi/Reconstruction/paper.ps.gz). ''' - - bl_idname = "object.estimate_focal_length" + + bl_idname = "object.estimate_focal_length" bl_label = "Calibrate active camera" bl_description = "Computes the focal length and orientation of the active camera based on the provided grease pencil strokes." def computeSecondVanishingPoint(self, Fu, f, P, horizonDir): '''Computes the coordinates of the second vanishing point - based on the first, a focal length, the center of projection and - the desired horizon tilt angle. The equations here are derived from + based on the first, a focal length, the center of projection and + the desired horizon tilt angle. The equations here are derived from section 3.2 "Determining the focal length from a single image". param Fu the first vanishing point in normalized image coordinates. param f the relative focal length. @@ -1579,14 +1579,14 @@ def computeSecondVanishingPoint(self, Fu, f, P, horizonDir): param horizonDir The desired horizon direction. return The coordinates of the second vanishing point. ''' - + #find the second vanishing point #TODO 1: take principal point into account here #TODO 2: if the first vanishing point coincides with the image center, # these lines won't work, but this case should be handled somehow. k = -(Fu[0] ** 2 + Fu[1] ** 2 + f ** 2) / (Fu[0] * horizonDir[0] + Fu[1] * horizonDir[1]) Fv = [Fu[i] + k * horizonDir[i] for i in range(2)] - + return Fv def computeFocalLength(self, Fu, Fv, P): @@ -1597,32 +1597,32 @@ def computeFocalLength(self, Fu, Fv, P): \param P the center of projection in normalized image coordinates. \return The relative focal length. ''' - + #compute Puv, the orthogonal projection of P onto FuFv dirFuFv = normalize([x - y for x, y in zip(Fu, Fv)]) FvP = [x - y for x, y in zip(P, Fv)] proj = dot(dirFuFv, FvP) Puv = [proj * x + y for x, y in zip(dirFuFv, Fv)] - + PPuv = length([x - y for x, y in zip(P, Puv)]) - + FvPuv = length([x - y for x, y in zip(Fv, Puv)]) FuPuv = length([x - y for x, y in zip(Fu, Puv)]) FuFv = length([x - y for x, y in zip(Fu, Fv)]) #print("FuFv", FuFv, "FvPuv + FuPuv", FvPuv + FuPuv) - + fSq = FvPuv * FuPuv - PPuv * PPuv #print("FuPuv", FuPuv, "FvPuv", FvPuv, "PPuv", PPuv, "OPuv", FvPuv * FuPuv) #print("fSq = ", fSq, " = ", FvPuv * FuPuv, " - ", PPuv * PPuv) if fSq < 0: - return None + return None f = math.sqrt(fSq) #print("dot 1:", dot(normalize(Fu + [f]), normalize(Fv + [f]))) - + return f - + def computeCameraRotationMatrix(self, Fu, Fv, f, P): - '''Computes the camera rotation matrix based on two vanishing points + '''Computes the camera rotation matrix based on two vanishing points and a focal length as in section 3.3 "Computing the rotation matrix". \param Fu the first vanishing point in normalized image coordinates. \param Fv the second vanishing point in normalized image coordinates. @@ -1631,44 +1631,44 @@ def computeCameraRotationMatrix(self, Fu, Fv, f, P): ''' Fu[0] -= P[0] Fu[1] -= P[1] - + Fv[0] -= P[0] Fv[1] -= P[1] - + OFu = [Fu[0], Fu[1], f] OFv = [Fv[0], Fv[1], f] - + #print("matrix dot", dot(OFu, OFv)) - + s1 = length(OFu) upRc = normalize(OFu) - + s2 = length(OFv) vpRc = normalize(OFv) - - wpRc = [upRc[1] * vpRc[2] - upRc[2] * vpRc[1], upRc[2] * vpRc[0] - upRc[0] * vpRc[2], upRc[0] * vpRc[1] - upRc[1] * vpRc[0]] - + + wpRc = [upRc[1] * vpRc[2] - upRc[2] * vpRc[1], upRc[2] * vpRc[0] - upRc[0] * vpRc[2], upRc[0] * vpRc[1] - upRc[1] * vpRc[0]] + M = mathutils.Matrix() M[0][0] = Fu[0] / s1 M[0][1] = Fv[0] / s2 M[0][2] = wpRc[0] - + M[1][0] = Fu[1] / s1 M[1][1] = Fv[1] / s2 M[1][2] = wpRc[1] - + M[2][0] = f / s1 M[2][1] = f / s2 M[2][2] = wpRc[2] - + #if arePythonMatricesRowMajor(): M.transpose() - + return M - + def alignCoordinateAxes(self, M, ax1, ax2): ''' - Modifies the original camera transform to make the coordinate axes line + Modifies the original camera transform to make the coordinate axes line up as specified. \param M the original camera rotation matrix \ax1 The index of the axis to align with the first layer segments. @@ -1682,9 +1682,9 @@ def alignCoordinateAxes(self, M, ax1, ax2): zn90Rot = mathutils.Euler((0, 0, math.radians(-90.0)), 'XYZ').to_matrix().to_4x4() yn90Rot = mathutils.Euler((0, math.radians(-90.0), 0), 'XYZ').to_matrix().to_4x4() xn90Rot = mathutils.Euler((math.radians(-90.0), 0, 0), 'XYZ').to_matrix().to_4x4() - + M = x180Rot * M * z180Rot - + if ax1 == 0 and ax2 == 1: #print("x,y") pass @@ -1703,16 +1703,16 @@ def alignCoordinateAxes(self, M, ax1, ax2): elif ax1 == 2 and ax2 == 1: #print("z, y") M = yn90Rot * M - + return M - + def gatherGreasePencilSegments(self): '''Collects and returns line segments in normalized image coordinates from the first two grease pencil layers. - \return A list of line segment sets. [i][j][k][l] is coordinate l of point k - in segment j from layer i. + \return A list of line segment sets. [i][j][k][l] is coordinate l of point k + in segment j from layer i. ''' - + gp = bpy.context.area.spaces.active.clip.grease_pencil gpl = gp.layers @@ -1730,62 +1730,62 @@ def gatherGreasePencilSegments(self): #this is a line segment. add it. line = [p.co[0:2] for p in s.points] lines.append(line) - + vpLineSets.append(lines) - return vpLineSets - + return vpLineSets + def computeIntersectionPointForLineSegments(self, lineSet): - '''Computes the intersection point in a least squares sense of + '''Computes the intersection point in a least squares sense of a collection of line segments. ''' matrixRows = [] rhsRows = [] - + for line in lineSet: #a point on the line - p = line[0] + p = line[0] #a unit vector parallel to the line dir = normalize([x - y for x, y in zip(line[1], line[0])]) #a unit vector perpendicular to the line n = [dir[1], -dir[0]] matrixRows.append([n[0], n[1]]) rhsRows.append([p[0] * n[0] + p[1] * n[1]]) - + m = Mat(matrixRows) b = Mat(rhsRows) vp = [f[0] for f in m.solve(b)] return vp - + def computeTriangleOrthocenter(self, verts): #print("verts", verts) assert(len(verts) == 3) - + A = verts[0] B = verts[1] C = verts[2] - + #print("A, B, C", A, B, C) - + a = A[0] b = A[1] c = B[0] d = B[1] e = C[0] f = C[1] - + N = b * c+ d * e + f * a - c * f - b * e - a * d x = ((d-f) * b * b + (f-b) * d * d + (b-d) * f * f + a * b * (c-e) + c * d * (e-a) + e * f * (a-c) ) / N y = ((e-c) * a * a + (a-e) * c * c + (c-a) * e * e + a * b * (f-d) + c * d * (b-f) + e * f * (d-b) ) / N - + return (x, y) - - + + def relImgCoords2ImgPlaneCoords(self, pt, imageWidth, imageHeight): ratio = imageWidth / float(imageHeight) sw = ratio sh = 1 return [sw * (pt[0] - 0.5), sh * (pt[1] - 0.5)] - + def execute(self, context): '''Executes the operator. \param context The context in which the operator was executed. @@ -1794,25 +1794,25 @@ def execute(self, context): singleVp = scn.calibration_type == 'one_vp' useHorizonSegment = scn.use_horizon_segment setBgImg = scn.set_cambg - + ''' get the active camera ''' - cam = scn.camera + cam = scn.camera if not cam: self.report({'ERROR'}, "No active camera.") return{'CANCELLED'} - + ''' check settings ''' if singleVp: upAxisIndex = ['x', 'y', 'z'].index(scn.up_axis) vp1AxisIndex = ['x', 'y', 'z'].index(scn.vp1_axis) - + if upAxisIndex == vp1AxisIndex: self.report({'ERROR'}, "The up axis cannot be parallel to the axis of the line set.") - return{'CANCELLED'} + return{'CANCELLED'} vp2AxisIndex = (set([0, 1, 2]) ^ set([upAxisIndex, vp1AxisIndex])).pop() vpAxisIndices = [vp1AxisIndex, vp2AxisIndex] else: @@ -1820,20 +1820,20 @@ def execute(self, context): vp2AxisIndex = ['x', 'y', 'z'].index(scn.vp2_axis) vpAxisIndices = [vp1AxisIndex, vp2AxisIndex] setBgImg = scn.set_cambg - + if vpAxisIndices[0] == vpAxisIndices[1]: self.report({'ERROR'}, "The two line sets cannot be parallel to the same axis.") return{'CANCELLED'} - + ''' gather lines for each vanishing point - ''' + ''' activeSpace = bpy.context.area.spaces.active - + if not activeSpace.clip: self.report({'ERROR'}, "There is no active movie clip.") return{'CANCELLED'} - + #check that we have the number of layers we need if not activeSpace.clip.grease_pencil: self.report({'ERROR'}, "There is no grease pencil datablock.") @@ -1848,9 +1848,9 @@ def execute(self, context): if len(gpl) < 2 and singleVp and useHorizonSegment: self.report({'ERROR'}, "Single vanishing point calibration with a custom horizon line requires two grease pencil layers") return{'CANCELLED'} - + vpLineSets = self.gatherGreasePencilSegments() - + #check that we have the expected number of line segment strokes if len(vpLineSets[0]) < 2: self.report({'ERROR'}, "The first grease pencil layer must contain at least two line segment strokes.") @@ -1860,26 +1860,26 @@ def execute(self, context): return{'CANCELLED'} if singleVp and useHorizonSegment and len(vpLineSets[1]) != 1: self.report({'ERROR'}, "The second grease pencil layer must contain exactly one line segment stroke (the horizon line).") - return{'CANCELLED'} - + return{'CANCELLED'} + ''' get the principal point P in image plane coordinates - TODO: get the value from the camera data panel, + TODO: get the value from the camera data panel, currently always using the image center ''' imageWidth = activeSpace.clip.size[0] imageHeight = activeSpace.clip.size[1] - - #principal point in image plane coordinates. + + #principal point in image plane coordinates. #in the middle of the image by default P = [0, 0] - + if singleVp: ''' calibration using a single vanishing point ''' imgAspect = imageWidth / float(imageHeight) - + #compute the horizon direction horizDir = normalize([1.0, 0.0]) #flat horizon by default if useHorizonSegment: @@ -1887,14 +1887,14 @@ def execute(self, context): yHorizDir = vpLineSets[1][0][1][1] - vpLineSets[1][0][0][1] horizDir = normalize([-xHorizDir, -yHorizDir]) #print("horizDir", horizDir) - + #compute the vanishing point location vp1 = self.computeIntersectionPointForLineSegments(vpLineSets[0]) - + #get the current relative focal length fAbs = activeSpace.clip.tracking.camera.focal_length sensorWidth = activeSpace.clip.tracking.camera.sensor_width - + f = fAbs / sensorWidth * imgAspect #print("fAbs", fAbs, "f rel", f) Fu = self.relImgCoords2ImgPlaneCoords(vp1, imageWidth, imageHeight) @@ -1923,54 +1923,54 @@ def execute(self, context): else: #assume optical center in image midpoint pass - + #compute the two vanishing points - vps = [self.computeIntersectionPointForLineSegments(vpLineSets[i]) for i in range(2)] - + vps = [self.computeIntersectionPointForLineSegments(vpLineSets[i]) for i in range(2)] + #order vanishing points along the image x axis if vps[1][0] < vps[0][0]: vps.reverse() vpLineSets.reverse() - vpAxisIndices.reverse() - + vpAxisIndices.reverse() + ''' compute focal length ''' Fu = self.relImgCoords2ImgPlaneCoords(vps[0], imageWidth, imageHeight) Fv = self.relImgCoords2ImgPlaneCoords(vps[1], imageWidth, imageHeight) - + f = self.computeFocalLength(Fu, Fv, P) - + if f == None: self.report({'ERROR'}, "Failed to compute focal length. Invalid vanishing point constellation.") return{'CANCELLED'} - + ''' compute camera orientation ''' print(Fu, Fv, f) #initial orientation based on the vanishing points and focal length M = self.computeCameraRotationMatrix(Fu, Fv, f, P) - - #sanity check: M should be a pure rotation matrix, + + #sanity check: M should be a pure rotation matrix, #so its determinant should be 1 eps = 0.00001 if 1.0 - M.determinant() < -eps or 1.0 - M.determinant() > eps: - self.report({'ERROR'}, "Non unit rotation matrix determinant: " + str(M.determinant())) - #return{'CANCELLED'} - + self.report({'ERROR'}, "Non unit rotation matrix determinant: " + str(M.determinant())) + #return{'CANCELLED'} + #align the camera to the coordinate axes as specified M = self.alignCoordinateAxes(M, vpAxisIndices[0], vpAxisIndices[1]) #apply the transform to the camera cam.matrix_world = M - + ''' move the camera an arbitrary distance away from the ground plane TODO: focus on the origin or something ''' cam.location = (0, 0, 2) - - #compute an absolute focal length in mm based + + #compute an absolute focal length in mm based #on the current camera settings #TODO: make sure this works for all combinations of #image dimensions and camera sensor settings @@ -1980,25 +1980,25 @@ def execute(self, context): fMm = cam.data.sensor_width * f cam.data.lens = fMm self.report({'INFO'}, "Camera focal length set to " + str(fMm)) - + #move principal point of the blender camera r = imageWidth / float(imageHeight) cam.data.shift_x = -1 * P[0] / r cam.data.shift_y = -1 * P[1] / r - + ''' set the camera background image ''' bpy.context.scene.render.resolution_x = imageWidth bpy.context.scene.render.resolution_y = imageHeight - + if setBgImg: bpy.ops.clip.set_viewport_background() - - return{'FINISHED'} - - - + + return{'FINISHED'} + + + def initSceneProps(): ''' @@ -2006,17 +2006,17 @@ def initSceneProps(): ''' desc = "The type of calibration method to use." bpy.types.Scene.calibration_type = bpy.props.EnumProperty(items = [('one_vp','one vanishing point','Estimates the camera orientation using a known focal length, a single vanishing point and an optional horizon tilt angle.'),('two_vp','two vanishing points','Estimates the camera focal length and orientation from two vanishing points')],name = "Method", description=desc, default = ('two_vp')) - + desc = "The axis to which the line segments from the first layer are parallel." bpy.types.Scene.vp1_axis = bpy.props.EnumProperty(items = [('x','x axis','xd'),('y','y axis','yd'),('z','z axis','zd')],name = "Parallel to the", description=desc, default = ('x')) desc = "The axis to which the line segments from the second layer are parallel." bpy.types.Scene.vp2_axis = bpy.props.EnumProperty(items = [('x','x axis','xd'),('y','y axis','yd'),('z','z axis','zd')],name = "Parallel to the", description=desc, default = ('y')) desc = "The up axis for single vanishing point calibration." bpy.types.Scene.up_axis = bpy.props.EnumProperty(items = [('x','x axis','xd'),('y','y axis','yd'),('z','z axis','zd')],name = "Up axis", description=desc, default = ('z')) - + desc = "How the optical center is computed for calibration using two vanishing points." bpy.types.Scene.optical_center_type = bpy.props.EnumProperty(items = [('mid','Image midpoint','Assume the optical center coincides with the image midpoint (reasonable in most cases).'),('camdata','From camera data','Get a known optical center from the current camera data.'),('compute','From 3rd vanishing point','Computes the optical center using a third vanishing point from grease pencil layer 3.')],name = "Optical center", description=desc, default = ('mid')) - + bpy.types.Scene.vp1_only = bpy.props.BoolProperty(name="Only use first line set", description=desc, default=False) desc = "Automatically set the current movie clip as the camera background image when performing camera calibration (works only when a 3D view-port is visible)." bpy.types.Scene.set_cambg = bpy.props.BoolProperty(name="Set background image", description=desc, default=True) @@ -2027,17 +2027,17 @@ def initSceneProps(): ''' desc = "Do not join the faces in the reconstructed mesh. Useful for finding problematic faces." bpy.types.Scene.separate_faces = bpy.props.BoolProperty(name="Separate faces", description=desc, default=False) - + desc = "The method to use to project the image onto the mesh." bpy.types.Scene.projection_method = bpy.props.EnumProperty(items = [('simple','simple','Uses UV coordinates projected from the camera view. May give warping on large faces.'),('hq','high quality','Uses a UV Project modifier combined with a simple subdivision modifier.')],name = "Method", description=desc, default = ('hq')) def register(): initSceneProps() bpy.utils.register_module(__name__) - - + + def unregister(): bpy.utils.unregister_module(__name__) - + if __name__ == "__main__": register()