diff --git a/bin/meshroom_photogrammetry b/bin/meshroom_photogrammetry index b7d8fecc6e..2447759287 100755 --- a/bin/meshroom_photogrammetry +++ b/bin/meshroom_photogrammetry @@ -114,7 +114,10 @@ with multiview.GraphModification(graph): multiview.photogrammetry(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) elif args.pipeline.lower() == "hdri": # default hdri pipeline - graph = multiview.hdri(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) + multiview.hdri(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) + elif args.pipeline.lower() == "hdrifisheye": + # default hdriFisheye pipeline + multiview.hdriFisheye(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph) else: # custom pipeline graph.load(args.pipeline) diff --git a/meshroom/core/attribute.py b/meshroom/core/attribute.py index 2880bb836f..79b940aeb3 100644 --- a/meshroom/core/attribute.py +++ b/meshroom/core/attribute.py @@ -1,8 +1,10 @@ #!/usr/bin/env python # coding:utf-8 import collections +import copy import re import weakref +import types from meshroom.common import BaseObject, Property, Variant, Signal, ListModel, DictModel, Slot from meshroom.core import desc, pyCompatibility, hashValue @@ -54,7 +56,7 @@ def __init__(self, node, attributeDesc, isOutput, root=None, parent=None): self._node = weakref.ref(node) self.attributeDesc = attributeDesc self._isOutput = isOutput - self._value = attributeDesc.value + self._value = copy.copy(attributeDesc.value) self._label = attributeDesc.label # invalidation value for output attributes @@ -162,8 +164,15 @@ def isLinkExpression(value): """ return isinstance(value, pyCompatibility.basestring) and Attribute.stringIsLinkRe.match(value) - def getLinkParam(self): - return self.node.graph.edge(self).src if self.isLink else None + def getLinkParam(self, recursive=False): + if not self.isLink: + return None + linkParam = self.node.graph.edge(self).src + if not recursive: + return linkParam + if linkParam.isLink: + return linkParam.getLinkParam(recursive) + return linkParam def _applyExpr(self): """ @@ -189,7 +198,7 @@ def getExportValue(self): if self.isLink: return self.getLinkParam().asLinkExpr() if self.isOutput: - return self.desc.value + return self.defaultValue() return self._value def getValueStr(self): @@ -201,7 +210,10 @@ def getValueStr(self): return str(self.value) def defaultValue(self): - return self.desc.value + if isinstance(self.desc.value, types.FunctionType): + return self.desc.value(self) + # Need to force a copy, for the case where the value is a list (avoid reference to the desc value) + return copy.copy(self.desc.value) def _isDefault(self): return self._value == self.defaultValue() diff --git a/meshroom/core/desc.py b/meshroom/core/desc.py index 7618fa671f..7196a45953 100755 --- a/meshroom/core/desc.py +++ b/meshroom/core/desc.py @@ -397,6 +397,7 @@ class Node(object): outputs = [] size = StaticNodeSize(1) parallelization = None + documentation = '' def __init__(self): pass diff --git a/meshroom/core/graph.py b/meshroom/core/graph.py index 1b176e8a59..c994dc0cef 100644 --- a/meshroom/core/graph.py +++ b/meshroom/core/graph.py @@ -273,12 +273,15 @@ def load(self, filepath, setupProjectFile=True): # Add node to the graph with raw attributes values self._addNode(n, nodeName) - if setupProjectFile: - # Update filepath related members - self._setFilepath(filepath) + # Create graph edges by resolving attributes expressions + self._applyExpr() - # Create graph edges by resolving attributes expressions - self._applyExpr() + if setupProjectFile: + # Update filepath related members + # Note: needs to be done at the end as it will trigger an updateInternals. + self._setFilepath(filepath) + + return True @property def updateEnabled(self): @@ -558,7 +561,7 @@ def findNode(self, nodeExpr): candidates = self.findNodeCandidates('^' + nodeExpr) if not candidates: raise KeyError('No node candidate for "{}"'.format(nodeExpr)) - elif len(candidates) > 1: + if len(candidates) > 1: raise KeyError('Multiple node candidates for "{}": {}'.format(nodeExpr, str([c.name for c in candidates]))) return candidates[0] @@ -678,11 +681,11 @@ def _dfsVisit(self, u, visitor, colors, nodeChildren, longestPathFirst): # (u,v) is a tree edge self.dfsVisit(v, visitor, colors, nodeChildren, longestPathFirst) # TODO: avoid recursion elif colors[v] == GRAY: + # (u,v) is a back edge visitor.backEdge((u, v), self) - pass # (u,v) is a back edge elif colors[v] == BLACK: + # (u,v) is a cross or forward edge visitor.forwardOrCrossEdge((u, v), self) - pass # (u,v) is a cross or forward edge visitor.finishEdge((u, v), self) colors[u] = BLACK visitor.finishVertex(u, self) @@ -737,8 +740,7 @@ def finishVertex(vertex, graph): def finishEdge(edge, graph): if edge[0].hasStatus(Status.SUCCESS) or edge[1].hasStatus(Status.SUCCESS): return - else: - edges.append(edge) + edges.append(edge) visitor.finishVertex = finishVertex visitor.finishEdge = finishEdge @@ -869,23 +871,23 @@ def flowEdges(self, startNodes=None): flowEdges.append(link) return flowEdges - def nodesFromNode(self, startNode, filterType=None): + def nodesFromNode(self, startNode, filterTypes=None): """ Return the node chain from startNode to the graph leaves. Args: startNode (Node): the node to start the visit from. - filterType (str): (optional) only return the nodes of the given type + filterTypes (str list): (optional) only return the nodes of the given types (does not stop the visit, this is a post-process only) Returns: - The list of nodes from startNode to the graph leaves following edges. + The list of nodes and edges, from startNode to the graph leaves following edges. """ nodes = [] edges = [] visitor = Visitor() def discoverVertex(vertex, graph): - if not filterType or vertex.nodeType == filterType: + if not filterTypes or vertex.nodeType in filterTypes: nodes.append(vertex) visitor.discoverVertex = discoverVertex diff --git a/meshroom/core/node.py b/meshroom/core/node.py index 9f0dc3251c..a59e4b1cd2 100644 --- a/meshroom/core/node.py +++ b/meshroom/core/node.py @@ -465,6 +465,9 @@ def getLabel(self): t, idx = self._name.split("_") return "{}{}".format(t, idx if int(idx) > 1 else "") + def getDocumentation(self): + return self.nodeDesc.documentation + @property def packageFullName(self): return '-'.join([self.packageName, self.packageVersion]) @@ -495,6 +498,9 @@ def attribute(self, name): def getAttributes(self): return self._attributes + def hasAttribute(self, name): + return name in self._attributes.keys() + def _applyExpr(self): for attr in self._attributes: attr._applyExpr() @@ -540,6 +546,23 @@ def _computeUids(self): self._uids[uidIndex] = hashValue(uidAttributes) def _buildCmdVars(self): + def _buildAttributeCmdVars(cmdVars, name, attr): + if attr.attributeDesc.group is not None: + # if there is a valid command line "group" + v = attr.getValueStr() + cmdVars[name] = '--{name} {value}'.format(name=name, value=v) + cmdVars[name + 'Value'] = str(v) + + if v: + cmdVars[attr.attributeDesc.group] = cmdVars.get(attr.attributeDesc.group, '') + \ + ' ' + cmdVars[name] + elif isinstance(attr, GroupAttribute): + assert isinstance(attr.value, DictModel) + # if the GroupAttribute is not set in a single command line argument, + # the sub-attributes may need to be exposed individually + for v in attr._value: + _buildAttributeCmdVars(cmdVars, v.name, v) + """ Generate command variables using input attributes and resolved output attributes names and values. """ for uidIndex, value in self._uids.items(): self._cmdVars['uid{}'.format(uidIndex)] = value @@ -548,14 +571,7 @@ def _buildCmdVars(self): for name, attr in self._attributes.objects.items(): if attr.isOutput: continue # skip outputs - v = attr.getValueStr() - - self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v) - self._cmdVars[name + 'Value'] = str(v) - - if v: - self._cmdVars[attr.attributeDesc.group] = self._cmdVars.get(attr.attributeDesc.group, '') + \ - ' ' + self._cmdVars[name] + _buildAttributeCmdVars(self._cmdVars, name, attr) # For updating output attributes invalidation values cmdVarsNoCache = self._cmdVars.copy() @@ -570,8 +586,14 @@ def _buildCmdVars(self): if not isinstance(attr.attributeDesc, desc.File): continue - attr.value = attr.attributeDesc.value.format(**self._cmdVars) - attr._invalidationValue = attr.attributeDesc.value.format(**cmdVarsNoCache) + defaultValue = attr.defaultValue() + try: + attr.value = defaultValue.format(**self._cmdVars) + attr._invalidationValue = defaultValue.format(**cmdVarsNoCache) + except KeyError as e: + logging.warning('Invalid expression with missing key on "{nodeName}.{attrName}" with value "{defaultValue}".\nError: {err}'.format(nodeName=self.name, attrName=attr.name, defaultValue=defaultValue, err=str(e))) + except ValueError as e: + logging.warning('Invalid expression value on "{nodeName}.{attrName}" with value "{defaultValue}".\nError: {err}'.format(nodeName=self.name, attrName=attr.name, defaultValue=defaultValue, err=str(e))) v = attr.getValueStr() self._cmdVars[name] = '--{name} {value}'.format(name=name, value=v) @@ -597,8 +619,7 @@ def hasStatus(self, status): return False return True - @Slot(result=bool) - def isComputed(self): + def _isComputed(self): return self.hasStatus(Status.SUCCESS) @Slot() @@ -748,6 +769,7 @@ def __repr__(self): name = Property(str, getName, constant=True) label = Property(str, getLabel, constant=True) nodeType = Property(str, nodeType.fget, constant=True) + documentation = Property(str, getDocumentation, constant=True) positionChanged = Signal() position = Property(Variant, position.fget, position.fset, notify=positionChanged) x = Property(float, lambda self: self._position.x, notify=positionChanged) @@ -764,6 +786,7 @@ def __repr__(self): size = Property(int, getSize, notify=sizeChanged) globalStatusChanged = Signal() globalStatus = Property(str, lambda self: self.getGlobalStatus().name, notify=globalStatusChanged) + isComputed = Property(bool, _isComputed, notify=globalStatusChanged) class Node(BaseNode): diff --git a/meshroom/core/stats.py b/meshroom/core/stats.py index ec07dbef31..e9a325381f 100644 --- a/meshroom/core/stats.py +++ b/meshroom/core/stats.py @@ -303,7 +303,7 @@ def run(self): if self.proc.is_running(): self.updateStats() return - except (KeyboardInterrupt, SystemError, GeneratorExit): + except (KeyboardInterrupt, SystemError, GeneratorExit, psutil.NoSuchProcess): pass def stopRequest(self): diff --git a/meshroom/multiview.py b/meshroom/multiview.py index 6088172fbf..6332061de8 100644 --- a/meshroom/multiview.py +++ b/meshroom/multiview.py @@ -6,7 +6,10 @@ from meshroom.core.graph import Graph, GraphModification # Supported image extensions -imageExtensions = ('.jpg', '.jpeg', '.tif', '.tiff', '.png', '.exr', '.rw2', '.cr2', '.nef', '.arw') +imageExtensions = ('.jpg', '.jpeg', '.tif', '.tiff', '.png', '.exr', + '.rw2', '.cr2', '.nef', '.arw', + '.dpx', + ) videoExtensions = ('.avi', '.mov', '.qt', '.mkv', '.webm', '.mp4', '.mpg', '.mpeg', '.m2v', '.m4v', @@ -90,7 +93,7 @@ def findFilesByTypeInFolder(folder, recursive=False): return output -def hdri(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None): +def hdri(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None): """ Create a new Graph with a complete HDRI pipeline. @@ -107,16 +110,27 @@ def hdri(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), out with GraphModification(graph): nodes = hdriPipeline(graph) cameraInit = nodes[0] - cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) - cameraInit.viewpoints.extend(inputViewpoints) - cameraInit.intrinsics.extend(inputIntrinsics) + if inputImages: + cameraInit.viewpoints.extend([{'path': image} for image in inputImages]) + if inputViewpoints: + cameraInit.viewpoints.extend(inputViewpoints) + if inputIntrinsics: + cameraInit.intrinsics.extend(inputIntrinsics) if output: - stitching = nodes[-1] - graph.addNewNode('Publish', output=output, inputFiles=[stitching.output]) + imageProcessing = nodes[-1] + graph.addNewNode('Publish', output=output, inputFiles=[imageProcessing.outputImages]) return graph +def hdriFisheye(inputImages=None, inputViewpoints=None, inputIntrinsics=None, output='', graph=None): + if not graph: + graph = Graph('HDRI-Fisheye') + with GraphModification(graph): + hdri(inputImages, inputViewpoints, inputIntrinsics, output, graph) + for panoramaInit in graph.nodesByType("PanoramaInit"): + panoramaInit.attribute("useFisheye").value = True + return graph def hdriPipeline(graph): """ @@ -128,46 +142,77 @@ def hdriPipeline(graph): list of Node: the created nodes """ cameraInit = graph.addNewNode('CameraInit') + try: + # fisheye4 does not work well in the ParoramaEstimation, so here we avoid to use it. + cameraInit.attribute('allowedCameraModels').value.remove("fisheye4") + except ValueError: + pass - ldr2hdr = graph.addNewNode('LDRToHDR', + panoramaPrepareImages = graph.addNewNode('PanoramaPrepareImages', input=cameraInit.output) + ldr2hdrSampling = graph.addNewNode('LdrToHdrSampling', + input=panoramaPrepareImages.output) + + ldr2hdrCalibration = graph.addNewNode('LdrToHdrCalibration', + input=ldr2hdrSampling.input, + samples=ldr2hdrSampling.output) + + ldr2hdrMerge = graph.addNewNode('LdrToHdrMerge', + input=ldr2hdrCalibration.input, + response=ldr2hdrCalibration.response) + featureExtraction = graph.addNewNode('FeatureExtraction', - input=ldr2hdr.outSfMDataFilename) - featureExtraction.describerPreset.value = 'ultra' - imageMatching = graph.addNewNode('ImageMatching', + input=ldr2hdrMerge.outSfMData, + describerPreset='high') + + panoramaInit = graph.addNewNode('PanoramaInit', input=featureExtraction.input, - featuresFolders=[featureExtraction.output]) + dependency=[featureExtraction.output] # Workaround for tractor submission with a fake dependency + ) + + imageMatching = graph.addNewNode('ImageMatching', + input=panoramaInit.outSfMData, + featuresFolders=[featureExtraction.output], + method='FrustumOrVocabularyTree') + featureMatching = graph.addNewNode('FeatureMatching', input=imageMatching.input, featuresFolders=imageMatching.featuresFolders, imagePairsList=imageMatching.output) - panoramaExternalInfo = graph.addNewNode('PanoramaExternalInfo', - input=ldr2hdr.outSfMDataFilename, - matchesFolders=[featureMatching.output] # Workaround for tractor submission with a fake dependency - ) - panoramaEstimation = graph.addNewNode('PanoramaEstimation', - input=panoramaExternalInfo.outSfMDataFilename, + input=featureMatching.input, featuresFolders=featureMatching.featuresFolders, matchesFolders=[featureMatching.output]) + panoramaOrientation = graph.addNewNode('SfMTransform', + input=panoramaEstimation.output, + method='from_single_camera') + panoramaWarping = graph.addNewNode('PanoramaWarping', - input=panoramaEstimation.outSfMDataFilename) + input=panoramaOrientation.output) panoramaCompositing = graph.addNewNode('PanoramaCompositing', - input=panoramaWarping.output) + input=panoramaWarping.input, + warpingFolder=panoramaWarping.output) + + imageProcessing = graph.addNewNode('ImageProcessing', + input=panoramaCompositing.output, + fillHoles=True, + extension='exr') return [ cameraInit, featureExtraction, + panoramaInit, imageMatching, featureMatching, - panoramaExternalInfo, panoramaEstimation, + panoramaOrientation, panoramaWarping, panoramaCompositing, + imageProcessing, ] diff --git a/meshroom/nodes/aliceVision/CameraDownscale.py b/meshroom/nodes/aliceVision/CameraDownscale.py deleted file mode 100644 index 894c3cc3c6..0000000000 --- a/meshroom/nodes/aliceVision/CameraDownscale.py +++ /dev/null @@ -1,49 +0,0 @@ -__version__ = "1.0" - -import json -import os - -from meshroom.core import desc - - -class CameraDownscale(desc.CommandLineNode): - commandLine = 'aliceVision_cameraDownscale {allParams}' - size = desc.DynamicNodeSize('input') - - inputs = [ - desc.File( - name='input', - label='Input', - description="SfM Data File", - value='', - uid=[0], - ), - desc.FloatParam( - name='rescalefactor', - label='RescaleFactor', - description='Newsize = rescalefactor * oldsize', - value=0.5, - range=(0.0, 1.0, 0.1), - uid=[0], - advanced=True, - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='Verbosity level (fatal, error, warning, info, debug, trace).', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - uid=[], - ), - ] - - outputs = [ - desc.File( - name='outSfMDataFilename', - label='Output SfMData File', - description='Path to the output sfmdata file', - value=desc.Node.internalFolder + 'sfmData.abc', - uid=[], - ) - ] diff --git a/meshroom/nodes/aliceVision/CameraInit.py b/meshroom/nodes/aliceVision/CameraInit.py index 876bc850cc..1d632a3ddf 100644 --- a/meshroom/nodes/aliceVision/CameraInit.py +++ b/meshroom/nodes/aliceVision/CameraInit.py @@ -5,6 +5,7 @@ import psutil import shutil import tempfile +import logging from meshroom.core import desc @@ -16,18 +17,39 @@ desc.IntParam(name="intrinsicId", label="Intrinsic", description="Internal Camera Parameters", value=-1, uid=[0], range=None), desc.IntParam(name="rigId", label="Rig", description="Rig Parameters", value=-1, uid=[0], range=None), desc.IntParam(name="subPoseId", label="Rig Sub-Pose", description="Rig Sub-Pose Parameters", value=-1, uid=[0], range=None), - desc.StringParam(name="metadata", label="Image Metadata", description="", value="", uid=[], advanced=True), + desc.StringParam(name="metadata", label="Image Metadata", + description="The configuration of the Viewpoints is based on the images metadata.\n" + "The important ones are:\n" + " * Focal Length: the focal length in mm.\n" + " * Make and Model: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database.\n" + " * Serial Number: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately.", + value="", uid=[], advanced=True), ] Intrinsic = [ desc.IntParam(name="intrinsicId", label="Id", description="Intrinsic UID", value=-1, uid=[0], range=None), - desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length", description="Initial Guess on the Focal Length", value=-1.0, uid=[0], range=None), - desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length", value=-1.0, uid=[0], range=None), - desc.ChoiceParam(name="type", label="Camera Type", description="Camera Type", value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4'], exclusive=True, uid=[0]), + desc.FloatParam(name="pxInitialFocalLength", label="Initial Focal Length", + description="Initial Guess on the Focal Length (in pixels). \n" + "When we have an initial value from EXIF, this value is not accurate but cannot be wrong. \n" + "So this value is used to limit the range of possible values in the optimization. \n" + "If you put -1, this value will not be used and the focal length will not be bounded.", + value=-1.0, uid=[0], range=None), + desc.FloatParam(name="pxFocalLength", label="Focal Length", description="Known/Calibrated Focal Length (in pixels)", value=-1.0, uid=[0], range=None), + desc.ChoiceParam(name="type", label="Camera Type", + description="Mathematical Model used to represent a camera:\n" + " * pinhole: Simplest projective camera model without optical distortion (focal and optical center).\n" + " * radial1: Pinhole camera with one radial distortion parameter\n" + " * radial3: Pinhole camera with 3 radial distortion parameters\n" + " * brown: Pinhole camera with 3 radial and 2 tangential distortion parameters\n" + " * fisheye4: Pinhole camera with 4 distortion parameters suited for fisheye optics (like 120deg FoV)\n" + " * equidistant_r3: Non-projective camera model suited for full-fisheye optics (like 180deg FoV)\n", + value="", values=['', 'pinhole', 'radial1', 'radial3', 'brown', 'fisheye4', 'equidistant_r3'], exclusive=True, uid=[0]), desc.IntParam(name="width", label="Width", description="Image Width", value=0, uid=[], range=(0, 10000, 1)), desc.IntParam(name="height", label="Height", description="Image Height", value=0, uid=[], range=(0, 10000, 1)), - desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (camera and lens combined)", value="", uid=[]), - desc.GroupAttribute(name="principalPoint", label="Principal Point", description="", groupDesc=[ + desc.FloatParam(name="sensorWidth", label="Sensor Width", description="Sensor Width (mm)", value=36, uid=[], range=(0, 1000, 1)), + desc.FloatParam(name="sensorHeight", label="Sensor Height", description="Sensor Height (mm)", value=24, uid=[], range=(0, 1000, 1)), + desc.StringParam(name="serialNumber", label="Serial Number", description="Device Serial Number (Camera UID and Lens UID combined)", value="", uid=[]), + desc.GroupAttribute(name="principalPoint", label="Principal Point", description="Position of the Optical Center in the Image (i.e. the sensor surface).", groupDesc=[ desc.FloatParam(name="x", label="x", description="", value=0, uid=[], range=(0, 10000, 1)), desc.FloatParam(name="y", label="y", description="", value=0, uid=[], range=(0, 10000, 1)), ]), @@ -94,6 +116,21 @@ class CameraInit(desc.CommandLineNode): size = desc.DynamicNodeSize('viewpoints') + documentation = ''' +This node describes your dataset. It lists the Viewpoints candidates, the guess about the type of optic, the initial focal length +and which images are sharing the same internal camera parameters, as well as potential cameras rigs. + +When you import new images into Meshroom, this node is automatically configured from the analysis of the image metadata. +The software can support images without any metadata but it is recommended to have them for robustness. + +### Metadata +Metadata allows images to be grouped together and provides an initialization of the focal length (in pixel unit). +The metadata needed are: + * **Focal Length**: the focal length in mm. + * **Make** & **Model**: this information allows to convert the focal in mm into a focal length in pixel using an embedded sensor database. + * **Serial Number**: allows to uniquely identify a device so multiple devices with the same Make, Model can be differentiated and their internal parameters are optimized separately (in the photogrammetry case). +''' + inputs = [ desc.ListAttribute( name="viewpoints", @@ -221,7 +258,7 @@ def buildIntrinsics(self, node, additionalViews=()): os.makedirs(os.path.join(tmpCache, node.internalFolder)) self.createViewpointsFile(node, additionalViews) cmd = self.buildCommandLine(node.chunks[0]) - # logging.debug(' - commandLine:', cmd) + logging.debug(' - commandLine: {}'.format(cmd)) proc = psutil.Popen(cmd, stdout=None, stderr=None, shell=True) stdout, stderr = proc.communicate() # proc.wait() @@ -234,10 +271,13 @@ def buildIntrinsics(self, node, additionalViews=()): cameraInitSfM = node.output.value return readSfMData(cameraInitSfM) - except Exception: + except Exception as e: + logging.debug("[CameraInit] Error while building intrinsics: {}".format(str(e))) raise finally: - shutil.rmtree(tmpCache) + if os.path.exists(tmpCache): + logging.debug("[CameraInit] Remove temp files in: {}".format(tmpCache)) + shutil.rmtree(tmpCache) def createViewpointsFile(self, node, additionalViews=()): node.viewpointsFile = "" diff --git a/meshroom/nodes/aliceVision/ConvertSfMFormat.py b/meshroom/nodes/aliceVision/ConvertSfMFormat.py index 5260a59b79..2ffc80225b 100644 --- a/meshroom/nodes/aliceVision/ConvertSfMFormat.py +++ b/meshroom/nodes/aliceVision/ConvertSfMFormat.py @@ -6,7 +6,12 @@ class ConvertSfMFormat(desc.CommandLineNode): commandLine = 'aliceVision_convertSfMFormat {allParams}' size = desc.DynamicNodeSize('input') - + + documentation = ''' +Convert an SfM scene from one file format to another. +It can also be used to remove specific parts of from an SfM scene (like filter all 3D landmarks or filter 2D observations). +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/DepthMap.py b/meshroom/nodes/aliceVision/DepthMap.py index 24fa430009..3c4596226a 100644 --- a/meshroom/nodes/aliceVision/DepthMap.py +++ b/meshroom/nodes/aliceVision/DepthMap.py @@ -10,6 +10,16 @@ class DepthMap(desc.CommandLineNode): parallelization = desc.Parallelization(blockSize=3) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +For each camera that have been estimated by the Structure-From-Motion, it estimates the depth value per pixel. + +Adjust the downscale factor to compute depth maps at a higher/lower resolution. +Use a downscale factor of one (full-resolution) only if the quality of the input images is really high (camera on a tripod with high-quality optics). + +## Online +[https://alicevision.org/#photogrammetry/depth_maps_estimation](https://alicevision.org/#photogrammetry/depth_maps_estimation) +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/DepthMapFilter.py b/meshroom/nodes/aliceVision/DepthMapFilter.py index a4f2ed5d14..7dd0eb0448 100644 --- a/meshroom/nodes/aliceVision/DepthMapFilter.py +++ b/meshroom/nodes/aliceVision/DepthMapFilter.py @@ -10,6 +10,11 @@ class DepthMapFilter(desc.CommandLineNode): parallelization = desc.Parallelization(blockSize=10) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +Filter depth map values that are not coherent in multiple depth maps. +This allows to filter unstable points before starting the fusion of all depth maps in the Meshing node. +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/ExportAnimatedCamera.py b/meshroom/nodes/aliceVision/ExportAnimatedCamera.py index b8d97557aa..eefeb48900 100644 --- a/meshroom/nodes/aliceVision/ExportAnimatedCamera.py +++ b/meshroom/nodes/aliceVision/ExportAnimatedCamera.py @@ -6,6 +6,11 @@ class ExportAnimatedCamera(desc.CommandLineNode): commandLine = 'aliceVision_exportAnimatedCamera {allParams}' + documentation = ''' +Convert cameras from an SfM scene into an animated cameras in Alembic file format. +Based on the input image filenames, it will recognize the input video sequence to create an animated camera. +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/ExportMaya.py b/meshroom/nodes/aliceVision/ExportMaya.py index 41568f5df1..9328852685 100644 --- a/meshroom/nodes/aliceVision/ExportMaya.py +++ b/meshroom/nodes/aliceVision/ExportMaya.py @@ -6,6 +6,13 @@ class ExportMaya(desc.CommandLineNode): commandLine = 'aliceVision_exportMeshroomMaya {allParams}' + documentation = ''' +Export a scene for Autodesk Maya, with an Alembic file describing the SfM: cameras and 3D points. +It will export half-size undistorted images to use as image planes for cameras and also export thumbnails. +Use the MeshroomMaya plugin, to load the ABC file. It will recognize the file structure and will setup the scene. +MeshroomMaya contains a user interface to browse all cameras. +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/FeatureExtraction.py b/meshroom/nodes/aliceVision/FeatureExtraction.py index e5c1cfd4cf..a18537787a 100644 --- a/meshroom/nodes/aliceVision/FeatureExtraction.py +++ b/meshroom/nodes/aliceVision/FeatureExtraction.py @@ -9,6 +9,26 @@ class FeatureExtraction(desc.CommandLineNode): parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +This node extracts distinctive groups of pixels that are, to some extent, invariant to changing camera viewpoints during image acquisition. +Hence, a feature in the scene should have similar feature descriptions in all images. + +This node implements multiple methods: + * **SIFT** +The most standard method. This is the default and recommended value for all use cases. + * **AKAZE** +AKAZE can be interesting solution to extract features in challenging condition. It could be able to match wider angle than SIFT but has drawbacks. +It may extract to many features, the repartition is not always good. +It is known to be good on challenging surfaces such as skin. + * **CCTAG** +CCTag is a marker type with 3 or 4 crowns. You can put markers in the scene during the shooting session to automatically re-orient and re-scale the scene to a known size. +It is robust to motion-blur, depth-of-field, occlusion. Be careful to have enough white margin around your CCTags. + + +## Online +[https://alicevision.org/#photogrammetry/natural_feature_extraction](https://alicevision.org/#photogrammetry/natural_feature_extraction) +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/FeatureMatching.py b/meshroom/nodes/aliceVision/FeatureMatching.py index cf0f1420ba..9ffcca0045 100644 --- a/meshroom/nodes/aliceVision/FeatureMatching.py +++ b/meshroom/nodes/aliceVision/FeatureMatching.py @@ -9,6 +9,28 @@ class FeatureMatching(desc.CommandLineNode): parallelization = desc.Parallelization(blockSize=20) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +This node performs the matching of all features between the candidate image pairs. + +It is performed in 2 steps: + + 1/ **Photometric Matches** + +It performs the photometric matches between the set of features descriptors from the 2 input images. +For each feature descriptor on the first image, it looks for the 2 closest descriptors in the second image and uses a relative threshold between them. +This assumption kill features on repetitive structure but has proved to be a robust criterion. + + 2/ **Geometric Filtering** + +It performs a geometric filtering of the photometric match candidates. +It uses the features positions in the images to make a geometric filtering by using epipolar geometry in an outlier detection framework +called RANSAC (RANdom SAmple Consensus). It randomly selects a small set of feature correspondences and compute the fundamental (or essential) matrix, +then it checks the number of features that validates this model and iterate through the RANSAC framework. + +## Online +[https://alicevision.org/#photogrammetry/feature_matching](https://alicevision.org/#photogrammetry/feature_matching) +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/GlobalSfM.py b/meshroom/nodes/aliceVision/GlobalSfM.py index a60b7a7895..fb06535161 100644 --- a/meshroom/nodes/aliceVision/GlobalSfM.py +++ b/meshroom/nodes/aliceVision/GlobalSfM.py @@ -10,6 +10,11 @@ class GlobalSfM(desc.CommandLineNode): commandLine = 'aliceVision_globalSfM {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +Performs the Structure-From-Motion with a global approach. +It is known to be faster but less robust to challenging datasets than the Incremental approach. +''' + inputs = [ desc.File( name='input', @@ -99,16 +104,23 @@ class GlobalSfM(desc.CommandLineNode): outputs = [ desc.File( name='output', - label='Output Folder', - description='', - value=desc.Node.internalFolder, + label='Output SfMData File', + description='Path to the output sfmdata file', + value=desc.Node.internalFolder + 'sfm.abc', uid=[], ), desc.File( - name='outSfMDataFilename', - label='Output SfMData File', - description='Path to the output sfmdata file', - value=desc.Node.internalFolder + 'SfmData.abc', + name='outputViewsAndPoses', + label='Output Poses', + description='''Path to the output sfmdata file with cameras (views and poses).''', + value=desc.Node.internalFolder + 'cameras.sfm', + uid=[], + ), + desc.File( + name='extraInfoFolder', + label='Output Folder', + description='Folder for intermediate reconstruction files and additional reconstruction information files.', + value=desc.Node.internalFolder, uid=[], ), ] diff --git a/meshroom/nodes/aliceVision/HDRIstitching.py b/meshroom/nodes/aliceVision/HDRIstitching.py deleted file mode 100644 index af81410eec..0000000000 --- a/meshroom/nodes/aliceVision/HDRIstitching.py +++ /dev/null @@ -1,89 +0,0 @@ -__version__ = "1.0" - -from meshroom.core import desc - - -class HDRIstitching(desc.CommandLineNode): - commandLine = 'aliceVision_utils_fisheyeProjection {allParams}' - - inputs = [ - desc.ListAttribute( - elementDesc=desc.File( - name='inputFile', - label='Input File/Folder', - description="", - value='', - uid=[0], - ), - name='input', - label='Input Folder', - description="List of fisheye images or folder containing them." - ), - desc.FloatParam( - name='blurWidth', - label='Blur Width', - description="Blur width of alpha channel for all fisheye (between 0 and 1). \n" - "Determine the transitions sharpness.", - value=0.2, - range=(0, 1, 0.1), - uid=[0], - ), - desc.ListAttribute( - elementDesc=desc.FloatParam( - name='imageXRotation', - label='Image X Rotation', - description="", - value=0, - range=(-20, 20, 1), - uid=[0], - ), - name='xRotation', - label='X Rotations', - description="Rotations in degree on axis X (horizontal axis) for each image.", - ), - desc.ListAttribute( - elementDesc=desc.FloatParam( - name='imageYRotation', - label='Image Y Rotation', - description="", - value=0, - range=(-30, 30, 5), - uid=[0], - ), - name='yRotation', - label='Y Rotations', - description="Rotations in degree on axis Y (vertical axis) for each image.", - ), - desc.ListAttribute( - elementDesc=desc.FloatParam( - name='imageZRotation', - label='Image Z Rotation', - description="", - value=0, - range=(-10, 10, 1), - uid=[0], - ), - name='zRotation', - label='Z Rotations', - description="Rotations in degree on axis Z (depth axis) for each image.", - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description="Verbosity level (fatal, error, warning, info, debug, trace).", - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - uid=[], - ), - ] - - outputs = [ - desc.File( - name='output', - label='Output Panorama', - description="Output folder for panorama", - value=desc.Node.internalFolder, - uid=[], - ), - ] \ No newline at end of file diff --git a/meshroom/nodes/aliceVision/ImageMatching.py b/meshroom/nodes/aliceVision/ImageMatching.py index 3e1ffb409c..c117548b1f 100644 --- a/meshroom/nodes/aliceVision/ImageMatching.py +++ b/meshroom/nodes/aliceVision/ImageMatching.py @@ -8,6 +8,30 @@ class ImageMatching(desc.CommandLineNode): commandLine = 'aliceVision_imageMatching {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +The goal of this node is to select the image pairs to match. The ambition is to find the images that are looking to the same areas of the scene. +Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs. + +It provides multiple methods: + * **VocabularyTree** +It uses image retrieval techniques to find images that share some content without the cost of resolving all feature matches in details. +Each image is represented in a compact image descriptor which allows to compute the distance between all images descriptors very efficiently. +If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected. + * **Sequential** +If your input is a video sequence, you can use this option to link images between them over time. + * **SequentialAndVocabularyTree** +Combines sequential approach with Voc Tree to enable connections between keyframes at different times. + * **Exhaustive** +Export all image pairs. + * **Frustum** +If images have known poses, computes the intersection between cameras frustums to create the list of image pairs. + * **FrustumOrVocabularyTree** +If images have known poses, use frustum intersection else use VocabularuTree. + +## Online +[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching) +''' + inputs = [ desc.File( name='input', @@ -31,9 +55,17 @@ class ImageMatching(desc.CommandLineNode): desc.ChoiceParam( name='method', label='Method', - description='Method used to select the image pairs to match.', + description='Method used to select the image pairs to match:\n' + ' * VocabularyTree: It uses image retrieval techniques to find images that share some content without the cost of resolving all \n' + 'feature matches in details. Each image is represented in a compact image descriptor which allows to compute the distance between all \n' + 'images descriptors very efficiently. If your scene contains less than "Voc Tree: Minimal Number of Images", all image pairs will be selected.\n' + ' * Sequential: If your input is a video sequence, you can use this option to link images between them over time.\n' + ' * SequentialAndVocabularyTree: Combines sequential approach with VocTree to enable connections between keyframes at different times.\n' + ' * Exhaustive: Export all image pairs.\n' + ' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n' + ' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n', value='VocabularyTree', - values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree','Exhaustive','Frustum'], + values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree'], exclusive=True, uid=[0], ), diff --git a/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py b/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py index b5601c840f..2d506227a5 100644 --- a/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py +++ b/meshroom/nodes/aliceVision/ImageMatchingMultiSfM.py @@ -9,6 +9,14 @@ class ImageMatchingMultiSfM(desc.CommandLineNode): # use both SfM inputs to define Node's size size = desc.MultiDynamicNodeSize(['input', 'inputB']) + documentation = ''' +The goal of this node is to select the image pairs to match in the context of an SfM augmentation. +The ambition is to find the images that are looking to the same areas of the scene. +Thanks to this node, the FeatureMatching node will only compute the matches between the selected image pairs. + +## Online +[https://alicevision.org/#photogrammetry/image_matching](https://alicevision.org/#photogrammetry/image_matching) +''' inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/ImageProcessing.py b/meshroom/nodes/aliceVision/ImageProcessing.py index f964c9f085..28c706b9fd 100644 --- a/meshroom/nodes/aliceVision/ImageProcessing.py +++ b/meshroom/nodes/aliceVision/ImageProcessing.py @@ -1,7 +1,9 @@ -__version__ = "1.1" +__version__ = "2.0" from meshroom.core import desc +import os.path + class ImageProcessing(desc.CommandLineNode): commandLine = 'aliceVision_utils_imageProcessing {allParams}' @@ -9,6 +11,10 @@ class ImageProcessing(desc.CommandLineNode): # parallelization = desc.Parallelization(blockSize=40) # commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +Convert or apply filtering to the input images. +''' + inputs = [ desc.File( name='input', @@ -177,14 +183,22 @@ class ImageProcessing(desc.CommandLineNode): name='outSfMData', label='Output sfmData', description='Output sfmData.', - value=desc.Node.internalFolder + 'sfmData.abc', + value=lambda attr: (desc.Node.internalFolder + os.path.basename(attr.node.input.value)) if (os.path.splitext(attr.node.input.value)[1] in ['.abc', '.sfm']) else '', uid=[], + group='', # do not export on the command line ), desc.File( - name='outputFolder', - label='Output Images Folder', + name='output', + label='Output Folder', description='Output Images Folder.', value=desc.Node.internalFolder, + uid=[], + ), + desc.File( + name='outputImages', + label='Output Images', + description='Output Image Files.', + value=lambda attr: desc.Node.internalFolder + os.path.basename(attr.node.input.value) if (os.path.splitext(attr.node.input.value)[1] not in ['', '.abc', '.sfm']) else (desc.Node.internalFolder + '*.' + (attr.node.extension.value or '*')), group='', # do not export on the command line uid=[], ), diff --git a/meshroom/nodes/aliceVision/KeyframeSelection.py b/meshroom/nodes/aliceVision/KeyframeSelection.py index 8d85dff80a..987af42367 100644 --- a/meshroom/nodes/aliceVision/KeyframeSelection.py +++ b/meshroom/nodes/aliceVision/KeyframeSelection.py @@ -7,6 +7,13 @@ class KeyframeSelection(desc.CommandLineNode): commandLine = 'aliceVision_utils_keyframeSelection {allParams}' + documentation = ''' +Allows to extract keyframes from a video and insert metadata. +It can extract frames from a synchronized multi-cameras rig. + +You can extract frames at regular interval by configuring only the min/maxFrameStep. +''' + inputs = [ desc.ListAttribute( elementDesc=desc.File( diff --git a/meshroom/nodes/aliceVision/LdrToHdrCalibration.py b/meshroom/nodes/aliceVision/LdrToHdrCalibration.py new file mode 100644 index 0000000000..62e31283cc --- /dev/null +++ b/meshroom/nodes/aliceVision/LdrToHdrCalibration.py @@ -0,0 +1,197 @@ +__version__ = "2.0" + +import json + +from meshroom.core import desc + +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.iteritems(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue + + + +class LdrToHdrCalibration(desc.CommandLineNode): + commandLine = 'aliceVision_LdrToHdrCalibration {allParams}' + size = desc.DynamicNodeSize('input') + + documentation = ''' + Calibrate LDR to HDR response curve from samples +''' + + inputs = [ + desc.File( + name='input', + label='Input', + description='SfMData file.', + value='', + uid=[0], + ), + desc.File( + name='samples', + label='Samples folder', + description='Samples folder', + value=desc.Node.internalFolder, + uid=[0], + ), + desc.ChoiceParam( + name='calibrationMethod', + label='Calibration Method', + description="Method used for camera calibration \n" + " * Linear: Disable the calibration and assumes a linear Camera Response Function. If images are encoded in a known colorspace (like sRGB for JPEG), the images will be automatically converted to linear. \n" + " * Debevec: This is the standard method for HDR calibration. \n" + " * Grossberg: Based on learned database of cameras, it allows to reduce the CRF to few parameters while keeping all the precision. \n" + " * Laguerre: Simple but robust method estimating the minimal number of parameters. \n" + " * Robertson: First method for HDR calibration in the literature. \n", + values=['linear', 'debevec', 'grossberg', 'laguerre'], + value='debevec', + exclusive=True, + uid=[0], + ), + desc.ChoiceParam( + name='calibrationWeight', + label='Calibration Weight', + description="Weight function used to calibrate camera response \n" + " * default (automatically selected according to the calibrationMethod) \n" + " * gaussian \n" + " * triangle \n" + " * plateau", + value='default', + values=['default', 'gaussian', 'triangle', 'plateau'], + exclusive=True, + uid=[0], + ), + desc.IntParam( + name='userNbBrackets', + label='Number of Brackets', + description='Number of exposure brackets per HDR image (0 for automatic detection).', + value=0, + range=(0, 15, 1), + uid=[0], + group='user', # not used directly on the command line + ), + desc.IntParam( + name='nbBrackets', + label='Automatic Nb Brackets', + description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', + value=0, + range=(0, 10, 1), + uid=[], + ), + desc.IntParam( + name='channelQuantizationPower', + label='Channel Quantization Power', + description='Quantization level like 8 bits or 10 bits.', + value=10, + range=(8, 14, 1), + uid=[0], + advanced=True, + ), + desc.IntParam( + name='maxTotalPoints', + label='Max Number of Points', + description='Max number of points selected by the sampling strategy.\n' + 'This ensures that this sampling step will extract a number of pixels values\n' + 'that the calibration step can manage (in term of computation time and memory usage).', + value=1000000, + range=(8, 10000000, 1000), + uid=[0], + advanced=True, + ), + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='verbosity level (fatal, error, warning, info, debug, trace).', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[], + ) + ] + + outputs = [ + desc.File( + name='response', + label='Output response File', + description='Path to the output response file', + value=desc.Node.internalFolder + 'response.csv', + uid=[], + ) + ] + + @classmethod + def update(cls, node): + if not isinstance(node.nodeDesc, cls): + raise ValueError("Node {} is not an instance of type {}".format(node, cls)) + # TODO: use Node version for this test + if 'userNbBrackets' not in node.getAttributes().keys(): + # Old version of the node + return + if node.userNbBrackets.value != 0: + node.nbBrackets.value = node.userNbBrackets.value + return + # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) + cameraInitOutput = node.input.getLinkParam(recursive=True) + if not cameraInitOutput: + node.nbBrackets.value = 0 + return + if not cameraInitOutput.node.hasAttribute('viewpoints'): + if cameraInitOutput.node.hasAttribute('input'): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) + viewpoints = cameraInitOutput.node.viewpoints.value + + # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) + inputs = [] + for viewpoint in viewpoints: + jsonMetadata = viewpoint.metadata.value + if not jsonMetadata: + # no metadata, we cannot found the number of brackets + node.nbBrackets.value = 0 + return + d = json.loads(jsonMetadata) + fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") + shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "") + iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") + if not fnumber and not shutterSpeed: + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 + return + inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) + inputs.sort() + + exposureGroups = [] + exposures = [] + for path, exp in inputs: + if exposures and exp != exposures[-1] and exp == exposures[0]: + exposureGroups.append(exposures) + exposures = [exp] + else: + exposures.append(exp) + exposureGroups.append(exposures) + exposures = None + bracketSizes = set() + if len(exposureGroups) == 1: + node.nbBrackets.value = 1 + else: + for expGroup in exposureGroups: + bracketSizes.add(len(expGroup)) + if len(bracketSizes) == 1: + node.nbBrackets.value = bracketSizes.pop() + # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) + else: + node.nbBrackets.value = 0 + # logging.info("[LDRToHDR] Update end") + diff --git a/meshroom/nodes/aliceVision/LDRToHDR.py b/meshroom/nodes/aliceVision/LdrToHdrMerge.py similarity index 59% rename from meshroom/nodes/aliceVision/LDRToHDR.py rename to meshroom/nodes/aliceVision/LdrToHdrMerge.py index 56e2f247fa..b58537bdf0 100644 --- a/meshroom/nodes/aliceVision/LDRToHDR.py +++ b/meshroom/nodes/aliceVision/LdrToHdrMerge.py @@ -1,46 +1,76 @@ __version__ = "2.0" import json -import os from meshroom.core import desc +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.iteritems(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue -class DividedInputNodeSize(desc.DynamicNodeSize): - """ - The LDR2HDR will reduce the amount of views in the SfMData. - This class converts the number of LDR input views into the number of HDR output views. - """ - def __init__(self, param, divParam): - super(DividedInputNodeSize, self).__init__(param) - self._divParam = divParam - def computeSize(self, node): - s = super(DividedInputNodeSize, self).computeSize(node) - divParam = node.attribute(self._divParam) - if divParam.value == 0: - return s - return s / divParam.value +class LdrToHdrMerge(desc.CommandLineNode): + commandLine = 'aliceVision_LdrToHdrMerge {allParams}' + size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=2) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' -class LDRToHDR(desc.CommandLineNode): - commandLine = 'aliceVision_convertLDRToHDR {allParams}' - size = DividedInputNodeSize('input', 'nbBrackets') - - cpu = desc.Level.INTENSIVE - ram = desc.Level.NORMAL + documentation = ''' + Calibrate LDR to HDR response curve from samples +''' inputs = [ desc.File( name='input', label='Input', - description="SfM Data File", + description='SfMData file.', + value='', + uid=[0], + ), + desc.File( + name='response', + label='Response file', + description='Response file', value='', uid=[0], ), + desc.IntParam( + name='offsetRefBracketIndex', + label='Offset Ref Bracket Index', + description='Zero to use the center bracket. +N to use a more exposed bracket or -N to use a less exposed backet.', + value=1, + range=(-4, 4, 1), + uid=[0], + ), + desc.ChoiceParam( + name='fusionWeight', + label='Fusion Weight', + description="Weight function used to fuse all LDR images together:\n" + " * gaussian \n" + " * triangle \n" + " * plateau", + value='gaussian', + values=['gaussian', 'triangle', 'plateau'], + exclusive=True, + uid=[0], + ), desc.IntParam( name='userNbBrackets', label='Number of Brackets', - description='Number of exposure brackets per HDR image (0 for automatic).', + description='Number of exposure brackets per HDR image (0 for automatic detection).', value=0, range=(0, 15, 1), uid=[0], @@ -53,6 +83,22 @@ class LDRToHDR(desc.CommandLineNode): value=0, range=(0, 10, 1), uid=[], + ), + desc.BoolParam( + name='byPass', + label='bypass convert', + description="Bypass HDR creation and use the medium bracket as the source for the next steps.", + value=False, + uid=[0], + advanced=True, + ), + desc.IntParam( + name='channelQuantizationPower', + label='Channel Quantization Power', + description='Quantization level like 8 bits or 10 bits.', + value=10, + range=(8, 14, 1), + uid=[0], advanced=True, ), desc.FloatParam( @@ -60,7 +106,7 @@ class LDRToHDR(desc.CommandLineNode): label='Highlights Correction', description='Pixels saturated in all input images have a partial information about their real luminance.\n' 'We only know that the value should be >= to the standard hdr fusion.\n' - 'This parameter allows to perform a post-processing step to put saturated pixels to a constant ' + 'This parameter allows to perform a post-processing step to put saturated pixels to a constant\n' 'value defined by the `highlightsMaxLuminance` parameter.\n' 'This parameter is float to enable to weight this correction.', value=1.0, @@ -89,109 +135,20 @@ class LDRToHDR(desc.CommandLineNode): range=(1000.0, 150000.0, 1.0), uid=[0], ), - desc.BoolParam( - name='fisheyeLens', - label='Fisheye Lens', - description="Enable if a fisheye lens has been used.\n " - "This will improve the estimation of the Camera's Response Function by considering only the pixels in the center of the image\n" - "and thus ignore undefined/noisy pixels outside the circle defined by the fisheye lens.", - value=False, - uid=[0], - ), - desc.BoolParam( - name='calibrationRefineExposures', - label='Refine Exposures', - description="Refine exposures provided by metadata (shutter speed, f-number, iso). Only available for 'laguerre' calibration method.", - value=False, - uid=[0], - ), - desc.BoolParam( - name='byPass', - label='bypass convert', - description="Bypass HDR creation and use the medium bracket as the source for the next steps", - value=False, - uid=[0], - ), - desc.ChoiceParam( - name='calibrationMethod', - label='Calibration Method', - description="Method used for camera calibration \n" - " * linear \n" - " * robertson \n" - " * debevec \n" - " * grossberg \n" - " * laguerre", - values=['linear', 'robertson', 'debevec', 'grossberg', 'laguerre'], - value='debevec', - exclusive=True, - uid=[0], - ), - desc.ChoiceParam( - name='calibrationWeight', - label='Calibration Weight', - description="Weight function used to calibrate camera response \n" - " * default (automatically selected according to the calibrationMethod) \n" - " * gaussian \n" - " * triangle \n" - " * plateau", - value='default', - values=['default', 'gaussian', 'triangle', 'plateau'], - exclusive=True, - uid=[0], - ), - desc.ChoiceParam( - name='fusionWeight', - label='Fusion Weight', - description="Weight function used to fuse all LDR images together \n" - " * gaussian \n" - " * triangle \n" - " * plateau", - value='gaussian', - values=['gaussian', 'triangle', 'plateau'], - exclusive=True, - uid=[0], - ), - desc.IntParam( - name='calibrationNbPoints', - label='Calibration Nb Points', - description='Internal number of points used for calibration.', - value=0, - range=(0, 10000000, 1000), - uid=[0], - advanced=True, - ), - desc.IntParam( - name='calibrationDownscale', - label='Calibration Downscale', - description='Scaling factor applied to images before calibration of the response function to reduce the impact of misalignment.', - value=4, - range=(1, 16, 1), - uid=[0], - advanced=True, - ), - desc.IntParam( - name='channelQuantizationPower', - label='Channel Quantization Power', - description='Quantization level like 8 bits or 10 bits.', - value=10, - range=(8, 14, 1), - uid=[0], - advanced=True, - ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', - description='Verbosity level (fatal, error, warning, info, debug, trace).', + description='verbosity level (fatal, error, warning, info, debug, trace).', value='info', values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], exclusive=True, uid=[], - ), + ) ] outputs = [ desc.File( - name='outSfMDataFilename', + name='outSfMData', label='Output SfMData File', description='Path to the output sfmdata file', value=desc.Node.internalFolder + 'sfmData.sfm', @@ -211,10 +168,13 @@ def update(cls, node): node.nbBrackets.value = node.userNbBrackets.value return # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) - cameraInitOutput = node.input.getLinkParam() + cameraInitOutput = node.input.getLinkParam(recursive=True) if not cameraInitOutput: node.nbBrackets.value = 0 return + if not cameraInitOutput.node.hasAttribute('viewpoints'): + if cameraInitOutput.node.hasAttribute('input'): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) viewpoints = cameraInitOutput.node.viewpoints.value # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) @@ -226,12 +186,13 @@ def update(cls, node): node.nbBrackets.value = 0 return d = json.loads(jsonMetadata) - fnumber = d.get("FNumber", d.get("Exif:ApertureValue", "")) - shutterSpeed = d.get("Exif:ShutterSpeedValue", "") # also "ExposureTime"? - iso = d.get("Exif:ISOSpeedRatings", "") + fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") + shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "") + iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") if not fnumber and not shutterSpeed: - # if one image without shutter or fnumber, we cannot found the number of brackets - node.nbBrackets.value = 0 + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 return inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) inputs.sort() @@ -247,13 +208,15 @@ def update(cls, node): exposureGroups.append(exposures) exposures = None bracketSizes = set() - for expGroup in exposureGroups: - bracketSizes.add(len(expGroup)) - if len(bracketSizes) == 1: - node.nbBrackets.value = bracketSizes.pop() - # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) + if len(exposureGroups) == 1: + node.nbBrackets.value = 1 else: - node.nbBrackets.value = 0 + for expGroup in exposureGroups: + bracketSizes.add(len(expGroup)) + if len(bracketSizes) == 1: + node.nbBrackets.value = bracketSizes.pop() + # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) + else: + node.nbBrackets.value = 0 # logging.info("[LDRToHDR] Update end") - diff --git a/meshroom/nodes/aliceVision/LdrToHdrSampling.py b/meshroom/nodes/aliceVision/LdrToHdrSampling.py new file mode 100644 index 0000000000..4e9387e3dd --- /dev/null +++ b/meshroom/nodes/aliceVision/LdrToHdrSampling.py @@ -0,0 +1,210 @@ +__version__ = "2.0" + +import json + +from meshroom.core import desc + + +def findMetadata(d, keys, defaultValue): + v = None + for key in keys: + v = d.get(key, None) + k = key.lower() + if v is not None: + return v + for dk, dv in d.iteritems(): + dkm = dk.lower().replace(" ", "") + if dkm == key.lower(): + return dv + dkm = dkm.split(":")[-1] + dkm = dkm.split("/")[-1] + if dkm == k: + return dv + return defaultValue + + +class DividedInputNodeSize(desc.DynamicNodeSize): + """ + The LDR2HDR will reduce the amount of views in the SfMData. + This class converts the number of LDR input views into the number of HDR output views. + """ + def __init__(self, param, divParam): + super(DividedInputNodeSize, self).__init__(param) + self._divParam = divParam + def computeSize(self, node): + s = super(DividedInputNodeSize, self).computeSize(node) + divParam = node.attribute(self._divParam) + if divParam.value == 0: + return s + return s / divParam.value + + +class LdrToHdrSampling(desc.CommandLineNode): + commandLine = 'aliceVision_LdrToHdrSampling {allParams}' + size = DividedInputNodeSize('input', 'nbBrackets') + parallelization = desc.Parallelization(blockSize=2) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + documentation = ''' + Sample pixels from Low range images for HDR creation +''' + + inputs = [ + desc.File( + name='input', + label='Input', + description='SfMData file.', + value='', + uid=[0], + ), + desc.IntParam( + name='userNbBrackets', + label='Number of Brackets', + description='Number of exposure brackets per HDR image (0 for automatic detection).', + value=0, + range=(0, 15, 1), + uid=[0], + group='user', # not used directly on the command line + ), + desc.IntParam( + name='nbBrackets', + label='Automatic Nb Brackets', + description='Number of exposure brackets used per HDR image. It is detected automatically from input Viewpoints metadata if "userNbBrackets" is 0, else it is equal to "userNbBrackets".', + value=0, + range=(0, 10, 1), + uid=[], + ), + desc.BoolParam( + name='byPass', + label='bypass convert', + description="Bypass HDR creation and use the medium bracket as the source for the next steps", + value=False, + uid=[0], + group='internal', + ), + desc.IntParam( + name='channelQuantizationPower', + label='Channel Quantization Power', + description='Quantization level like 8 bits or 10 bits.', + value=10, + range=(8, 14, 1), + uid=[0], + advanced=True, + ), + desc.IntParam( + name='blockSize', + label='Block Size', + description='Size of the image tile to extract a sample.', + value=256, + range=(8, 1024, 1), + uid=[0], + advanced=True, + ), + desc.IntParam( + name='radius', + label='Patch Radius', + description='Radius of the patch used to analyze the sample statistics.', + value=5, + range=(0, 10, 1), + uid=[0], + advanced=True, + ), + desc.IntParam( + name='maxCountSample', + label='Max Number of Samples', + description='Max number of samples per image group.', + value=200, + range=(10, 1000, 10), + uid=[0], + advanced=True, + ), + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='verbosity level (fatal, error, warning, info, debug, trace).', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[], + ) + ] + + outputs = [ + desc.File( + name='output', + label='Output Folder', + description='Output path for the samples.', + value=desc.Node.internalFolder, + uid=[], + ), + ] + + def processChunk(self, chunk): + if chunk.node.byPass.value: + return + super(LdrToHdrSampling, self).processChunk(chunk) + + @classmethod + def update(cls, node): + if not isinstance(node.nodeDesc, cls): + raise ValueError("Node {} is not an instance of type {}".format(node, cls)) + # TODO: use Node version for this test + if 'userNbBrackets' not in node.getAttributes().keys(): + # Old version of the node + return + if node.userNbBrackets.value != 0: + node.nbBrackets.value = node.userNbBrackets.value + return + # logging.info("[LDRToHDR] Update start: version:" + str(node.packageVersion)) + cameraInitOutput = node.input.getLinkParam(recursive=True) + if not cameraInitOutput: + node.nbBrackets.value = 0 + return + if not cameraInitOutput.node.hasAttribute('viewpoints'): + if cameraInitOutput.node.hasAttribute('input'): + cameraInitOutput = cameraInitOutput.node.input.getLinkParam(recursive=True) + viewpoints = cameraInitOutput.node.viewpoints.value + + # logging.info("[LDRToHDR] Update start: nb viewpoints:" + str(len(viewpoints))) + inputs = [] + for viewpoint in viewpoints: + jsonMetadata = viewpoint.metadata.value + if not jsonMetadata: + # no metadata, we cannot found the number of brackets + node.nbBrackets.value = 0 + return + d = json.loads(jsonMetadata) + fnumber = findMetadata(d, ["FNumber", "Exif:ApertureValue", "ApertureValue", "Aperture"], "") + shutterSpeed = findMetadata(d, ["Exif:ShutterSpeedValue", "ShutterSpeedValue", "ShutterSpeed"], "") + iso = findMetadata(d, ["Exif:ISOSpeedRatings", "ISOSpeedRatings", "ISO"], "") + if not fnumber and not shutterSpeed: + # If one image without shutter or fnumber, we cannot found the number of brackets. + # We assume that there is no multi-bracketing, so nothing to do. + node.nbBrackets.value = 1 + return + inputs.append((viewpoint.path.value, (fnumber, shutterSpeed, iso))) + inputs.sort() + + exposureGroups = [] + exposures = [] + for path, exp in inputs: + if exposures and exp != exposures[-1] and exp == exposures[0]: + exposureGroups.append(exposures) + exposures = [exp] + else: + exposures.append(exp) + exposureGroups.append(exposures) + exposures = None + bracketSizes = set() + if len(exposureGroups) == 1: + node.nbBrackets.value = 1 + else: + for expGroup in exposureGroups: + bracketSizes.add(len(expGroup)) + if len(bracketSizes) == 1: + node.nbBrackets.value = bracketSizes.pop() + # logging.info("[LDRToHDR] nb bracket size:" + str(node.nbBrackets.value)) + else: + node.nbBrackets.value = 0 + # logging.info("[LDRToHDR] Update end") + diff --git a/meshroom/nodes/aliceVision/MeshDecimate.py b/meshroom/nodes/aliceVision/MeshDecimate.py index 280e9319a4..8b928350b0 100644 --- a/meshroom/nodes/aliceVision/MeshDecimate.py +++ b/meshroom/nodes/aliceVision/MeshDecimate.py @@ -9,6 +9,10 @@ class MeshDecimate(desc.CommandLineNode): cpu = desc.Level.NORMAL ram = desc.Level.NORMAL + documentation = ''' +This node allows to reduce the density of the Mesh. +''' + inputs = [ desc.File( name="input", diff --git a/meshroom/nodes/aliceVision/MeshDenoising.py b/meshroom/nodes/aliceVision/MeshDenoising.py index 807dc7dfa7..1b6863edd5 100644 --- a/meshroom/nodes/aliceVision/MeshDenoising.py +++ b/meshroom/nodes/aliceVision/MeshDenoising.py @@ -6,6 +6,11 @@ class MeshDenoising(desc.CommandLineNode): commandLine = 'aliceVision_meshDenoising {allParams}' + documentation = ''' +This experimental node allows to reduce noise from a Mesh. +for now, the parameters are difficult to control and vary a lot from one dataset to another. +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/MeshFiltering.py b/meshroom/nodes/aliceVision/MeshFiltering.py index 4a1ae7ea41..deddc28ff2 100644 --- a/meshroom/nodes/aliceVision/MeshFiltering.py +++ b/meshroom/nodes/aliceVision/MeshFiltering.py @@ -6,6 +6,11 @@ class MeshFiltering(desc.CommandLineNode): commandLine = 'aliceVision_meshFiltering {allParams}' + documentation = ''' +This node applies a Laplacian filtering to remove local defects from the raw Meshing cut. + +''' + inputs = [ desc.File( name='inputMesh', diff --git a/meshroom/nodes/aliceVision/MeshResampling.py b/meshroom/nodes/aliceVision/MeshResampling.py index e6966366a3..64c4cab717 100644 --- a/meshroom/nodes/aliceVision/MeshResampling.py +++ b/meshroom/nodes/aliceVision/MeshResampling.py @@ -9,6 +9,10 @@ class MeshResampling(desc.CommandLineNode): cpu = desc.Level.NORMAL ram = desc.Level.NORMAL + documentation = ''' +This node allows to recompute the mesh surface with a new topology and uniform density. +''' + inputs = [ desc.File( name="input", diff --git a/meshroom/nodes/aliceVision/Meshing.py b/meshroom/nodes/aliceVision/Meshing.py index 3b6c478fff..e46bffbf29 100644 --- a/meshroom/nodes/aliceVision/Meshing.py +++ b/meshroom/nodes/aliceVision/Meshing.py @@ -9,6 +9,17 @@ class Meshing(desc.CommandLineNode): cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE + documentation = ''' +This node creates a dense geometric surface representation of the scene. + +First, it fuses all the depth maps into a global dense point cloud with an adaptive resolution. +It then performs a 3D Delaunay tetrahedralization and a voting procedure is done to compute weights on cells and weights on facets connecting the cells. +A Graph Cut Max-Flow is applied to optimally cut the volume. This cut represents the extracted mesh surface. + +## Online +[https://alicevision.org/#photogrammetry/meshing](https://alicevision.org/#photogrammetry/meshing) +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/PanoramaCompositing.py b/meshroom/nodes/aliceVision/PanoramaCompositing.py index 34af53ad70..5b34fc247a 100644 --- a/meshroom/nodes/aliceVision/PanoramaCompositing.py +++ b/meshroom/nodes/aliceVision/PanoramaCompositing.py @@ -10,11 +10,25 @@ class PanoramaCompositing(desc.CommandLineNode): commandLine = 'aliceVision_panoramaCompositing {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +Once the images have been transformed geometrically (in PanoramaWarping), +they have to be fused together in a single panorama image which looks like a single photography. +The Multi-band Blending method provides the best quality. It averages the pixel values using multiple bands in the frequency domain. +Multiple cameras are contributing to the low frequencies and only the best one contributes to the high frequencies. +''' + inputs = [ desc.File( name='input', - label='Input', - description="Panorama Warping result", + label='Input SfMData', + description="Input SfMData.", + value='', + uid=[0], + ), + desc.File( + name='warpingFolder', + label='Warping Folder', + description="Panorama Warping results", value='', uid=[0], ), @@ -31,12 +45,28 @@ class PanoramaCompositing(desc.CommandLineNode): desc.ChoiceParam( name='compositerType', label='Compositer Type', - description='Which compositer should be used to blend images', + description='Which compositer should be used to blend images:\n' + ' * multiband: high quality transition by fusing images by frequency bands\n' + ' * replace: debug option with straight transitions\n' + ' * alpha: debug option with linear transitions\n', value='multiband', values=['replace', 'alpha', 'multiband'], exclusive=True, uid=[0] ), + desc.ChoiceParam( + name='overlayType', + label='Overlay Type', + description='Overlay on top of panorama to analyze transitions:\n' + ' * none: no overlay\n' + ' * borders: display image borders\n' + ' * seams: display transitions between images\n', + value='none', + values=['none', 'borders', 'seams'], + exclusive=True, + advanced=True, + uid=[0] + ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', diff --git a/meshroom/nodes/aliceVision/PanoramaEstimation.py b/meshroom/nodes/aliceVision/PanoramaEstimation.py index 6aaff58f36..23dac100cc 100644 --- a/meshroom/nodes/aliceVision/PanoramaEstimation.py +++ b/meshroom/nodes/aliceVision/PanoramaEstimation.py @@ -10,6 +10,10 @@ class PanoramaEstimation(desc.CommandLineNode): commandLine = 'aliceVision_panoramaEstimation {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +Estimate relative camera rotations between input images. +''' + inputs = [ desc.File( name='input', @@ -53,15 +57,6 @@ class PanoramaEstimation(desc.CommandLineNode): uid=[0], joinChar=',', ), - desc.IntParam( - name='orientation', - label='Orientation', - description='Orientation', - value=0, - range=(0, 6, 1), - uid=[0], - advanced=True, - ), desc.FloatParam( name='offsetLongitude', label='Longitude offset (deg.)', @@ -69,7 +64,6 @@ class PanoramaEstimation(desc.CommandLineNode): value=0.0, range=(-180.0, 180.0, 1.0), uid=[0], - advanced=True, ), desc.FloatParam( name='offsetLatitude', @@ -78,7 +72,6 @@ class PanoramaEstimation(desc.CommandLineNode): value=0.0, range=(-90.0, 90.0, 1.0), uid=[0], - advanced=True, ), desc.ChoiceParam( name='rotationAveraging', @@ -97,9 +90,10 @@ class PanoramaEstimation(desc.CommandLineNode): label='Relative Rotation Method', description="Method for relative rotation :\n" " * from essential matrix\n" - " * from homography matrix", - values=['essential_matrix', 'homography_matrix'], - value='homography_matrix', + " * from homography matrix\n" + " * from rotation matrix", + values=['essential_matrix', 'homography_matrix', 'rotation_matrix'], + value='rotation_matrix', exclusive=True, uid=[0], advanced=True, @@ -113,13 +107,47 @@ class PanoramaEstimation(desc.CommandLineNode): ), desc.BoolParam( name='lockAllIntrinsics', - label='Force Lock of All Intrinsic Camera Parameters.', + label='Force Lock of All Intrinsics', description='Force to keep constant all the intrinsics parameters of the cameras (focal length, \n' 'principal point, distortion if any) during the reconstruction.\n' 'This may be helpful if the input cameras are already fully calibrated.', value=False, uid=[0], ), + desc.FloatParam( + name='maxAngleToPrior', + label='Max Angle To Priors (deg.)', + description='''Maximal angle allowed regarding the input prior (in degrees).''', + value=20.0, + range=(0.0, 360.0, 1.0), + uid=[0], + advanced=True, + ), + desc.FloatParam( + name='maxAngularError', + label='Max Angular Error (deg.)', + description='''Maximal angular error in global rotation averging (in degrees).''', + value=100.0, + range=(0.0, 360.0, 1.0), + uid=[0], + advanced=True, + ), + desc.BoolParam( + name='intermediateRefineWithFocal', + label='Intermediate Refine: Focal', + description='Intermediate refine with rotation and focal length only.', + value=False, + uid=[0], + advanced=True, + ), + desc.BoolParam( + name='intermediateRefineWithFocalDist', + label='Intermediate Refine: Focal And Distortion', + description='Intermediate refine with rotation, focal length and distortion.', + value=False, + uid=[0], + advanced=True, + ), desc.ChoiceParam( name='verboseLevel', label='Verbose Level', @@ -134,16 +162,16 @@ class PanoramaEstimation(desc.CommandLineNode): outputs = [ desc.File( name='output', - label='Output Folder', - description='', - value=desc.Node.internalFolder, + label='Output SfMData File', + description='Path to the output sfmdata file', + value=desc.Node.internalFolder + 'panorama.abc', uid=[], ), desc.File( - name='outSfMDataFilename', - label='Output SfMData File', - description='Path to the output sfmdata file', - value=desc.Node.internalFolder + 'sfmData.abc', + name='outputViewsAndPoses', + label='Output Poses', + description='''Path to the output sfmdata file with cameras (views and poses).''', + value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ] diff --git a/meshroom/nodes/aliceVision/PanoramaExternalInfo.py b/meshroom/nodes/aliceVision/PanoramaExternalInfo.py deleted file mode 100644 index 4fca9880ad..0000000000 --- a/meshroom/nodes/aliceVision/PanoramaExternalInfo.py +++ /dev/null @@ -1,60 +0,0 @@ -__version__ = "1.0" - -import json -import os - -from meshroom.core import desc - - -class PanoramaExternalInfo(desc.CommandLineNode): - commandLine = 'aliceVision_panoramaExternalInfo {allParams}' - size = desc.DynamicNodeSize('input') - - inputs = [ - desc.File( - name='input', - label='Input', - description="SfM Data File", - value='', - uid=[0], - ), - desc.File( - name='config', - label='Xml Config', - description="XML Data File", - value='', - uid=[0], - ), - desc.ListAttribute( - elementDesc=desc.File( - name='matchesFolder', - label='Matches Folder', - description="", - value='', - uid=[0], - ), - name='matchesFolders', - label='Matches Folders', - description="Folder(s) in which computed matches are stored. (WORKAROUND for valid Tractor graph submission)", - group='forDependencyOnly', - ), - desc.ChoiceParam( - name='verboseLevel', - label='Verbose Level', - description='Verbosity level (fatal, error, warning, info, debug, trace).', - value='info', - values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], - exclusive=True, - uid=[], - ), - ] - - outputs = [ - desc.File( - name='outSfMDataFilename', - label='Output SfMData File', - description='Path to the output sfmdata file', - value=desc.Node.internalFolder + 'sfmData.abc', - uid=[], - ) - ] diff --git a/meshroom/nodes/aliceVision/PanoramaInit.py b/meshroom/nodes/aliceVision/PanoramaInit.py new file mode 100644 index 0000000000..9abcdcd871 --- /dev/null +++ b/meshroom/nodes/aliceVision/PanoramaInit.py @@ -0,0 +1,109 @@ +__version__ = "1.0" + +from meshroom.core import desc + + +class PanoramaInit(desc.CommandLineNode): + commandLine = 'aliceVision_panoramaInit {allParams}' + size = desc.DynamicNodeSize('input') + + documentation = ''' +This node allows to setup the Panorama: + +1/ Enables the initialization the cameras from known position in an XML file (provided by +["Roundshot VR Drive"](https://www.roundshot.com/xml_1/internet/fr/application/d394/d395/f396.cfm) ). + +2/ Enables to setup Full Fisheye Optics (to use an Equirectangular camera model). + +3/ To automatically detects the Fisheye Circle (radius + center) in input images or manually adjust it. + +''' + + inputs = [ + desc.File( + name='input', + label='Input', + description="SfM Data File", + value='', + uid=[0], + ), + desc.File( + name='config', + label='Xml Config', + description="XML Data File", + value='', + uid=[0], + ), + desc.ListAttribute( + elementDesc=desc.File( + name='dependency', + label='', + description="", + value='', + uid=[], + ), + name='dependency', + label='Dependency', + description="Folder(s) in which computed features are stored. (WORKAROUND for valid Tractor graph submission)", + group='forDependencyOnly', # not a command line argument + ), + desc.BoolParam( + name='useFisheye', + label='Full Fisheye', + description='To declare a full fisheye panorama setup', + value=False, + uid=[0], + ), + desc.BoolParam( + name='estimateFisheyeCircle', + label='Estimate Fisheye Circle', + description='Automatically estimate the Fisheye Circle center and radius instead of using user values.', + value=True, + uid=[0], + ), + desc.GroupAttribute( + name="fisheyeCenterOffset", + label="Fisheye Center", + description="Center of the Fisheye circle (XY offset to the center in pixels).", + groupDesc=[ + desc.FloatParam( + name="fisheyeCenterOffset_x", label="x", description="X Offset in pixels", + value=0.0, + uid=[0], + range=(-1000.0, 10000.0, 1.0)), + desc.FloatParam( + name="fisheyeCenterOffset_y", label="y", description="Y Offset in pixels", + value=0.0, + uid=[0], + range=(-1000.0, 10000.0, 1.0)), + ], + group=None, # skip group from command line + ), + desc.FloatParam( + name='fisheyeRadius', + label='Radius', + description='Fisheye visibillity circle radius (% of image shortest side).', + value=96.0, + range=(0.0, 150.0, 0.01), + uid=[0], + ), + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='Verbosity level (fatal, error, warning, info, debug, trace).', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[], + ), + ] + + outputs = [ + desc.File( + name='outSfMData', + label='Output SfMData File', + description='Path to the output sfmdata file', + value=desc.Node.internalFolder + 'sfmData.sfm', + uid=[], + ) + ] diff --git a/meshroom/nodes/aliceVision/PanoramaPrepareImages.py b/meshroom/nodes/aliceVision/PanoramaPrepareImages.py new file mode 100644 index 0000000000..67a6357bb3 --- /dev/null +++ b/meshroom/nodes/aliceVision/PanoramaPrepareImages.py @@ -0,0 +1,43 @@ +__version__ = "1.1" + +from meshroom.core import desc + +import os.path + + +class PanoramaPrepareImages(desc.CommandLineNode): + commandLine = 'aliceVision_panoramaPrepareImages {allParams}' + size = desc.DynamicNodeSize('input') + + documentation = ''' +Prepare images for Panorama pipeline: ensures that images orientations are coherent. +''' + + inputs = [ + desc.File( + name='input', + label='Input', + description='SfMData file.', + value='', + uid=[0], + ), + desc.ChoiceParam( + name='verboseLevel', + label='Verbose Level', + description='verbosity level (fatal, error, warning, info, debug, trace).', + value='info', + values=['fatal', 'error', 'warning', 'info', 'debug', 'trace'], + exclusive=True, + uid=[], + ) + ] + + outputs = [ + desc.File( + name='output', + label='Output sfmData', + description='Output sfmData.', + value=lambda attr: desc.Node.internalFolder + os.path.basename(attr.node.input.value), + uid=[], + ), + ] diff --git a/meshroom/nodes/aliceVision/PanoramaWarping.py b/meshroom/nodes/aliceVision/PanoramaWarping.py index a127fe3524..7cba255f74 100644 --- a/meshroom/nodes/aliceVision/PanoramaWarping.py +++ b/meshroom/nodes/aliceVision/PanoramaWarping.py @@ -10,6 +10,13 @@ class PanoramaWarping(desc.CommandLineNode): commandLine = 'aliceVision_panoramaWarping {allParams}' size = desc.DynamicNodeSize('input') + parallelization = desc.Parallelization(blockSize=5) + commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + + documentation = ''' +Compute the image warping for each input image in the panorama coordinate system. +''' + inputs = [ desc.File( name='input', @@ -21,7 +28,8 @@ class PanoramaWarping(desc.CommandLineNode): desc.IntParam( name='panoramaWidth', label='Panorama Width', - description='Panorama width (pixels). 0 For automatic size', + description='Panorama Width (in pixels).\n' + 'Set 0 to let the software choose the size automatically, so that on average the input resolution is kept (to limit over/under sampling).', value=10000, range=(0, 50000, 1000), uid=[0] diff --git a/meshroom/nodes/aliceVision/PrepareDenseScene.py b/meshroom/nodes/aliceVision/PrepareDenseScene.py index 5467d576ab..afd5b4b27d 100644 --- a/meshroom/nodes/aliceVision/PrepareDenseScene.py +++ b/meshroom/nodes/aliceVision/PrepareDenseScene.py @@ -9,6 +9,10 @@ class PrepareDenseScene(desc.CommandLineNode): parallelization = desc.Parallelization(blockSize=40) commandLineRange = '--rangeStart {rangeStart} --rangeSize {rangeBlockSize}' + documentation = ''' +This node export undistorted images so the depth map and texturing can be computed on Pinhole images without distortion. +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/nodes/aliceVision/Publish.py b/meshroom/nodes/aliceVision/Publish.py index 447bd65d53..556499f9e4 100644 --- a/meshroom/nodes/aliceVision/Publish.py +++ b/meshroom/nodes/aliceVision/Publish.py @@ -10,6 +10,11 @@ class Publish(desc.Node): size = desc.DynamicNodeSize('inputFiles') + + documentation = ''' +This node allows to copy files into a specific folder. +''' + inputs = [ desc.ListAttribute( elementDesc=desc.File( diff --git a/meshroom/nodes/aliceVision/SfMAlignment.py b/meshroom/nodes/aliceVision/SfMAlignment.py index 798ce1c59c..0b21005175 100644 --- a/meshroom/nodes/aliceVision/SfMAlignment.py +++ b/meshroom/nodes/aliceVision/SfMAlignment.py @@ -1,12 +1,26 @@ -__version__ = "1.0" +__version__ = "2.0" from meshroom.core import desc +import os.path + class SfMAlignment(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmAlignment {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +This node allows to change the coordinate system of one SfM scene to align it on another one. + +The alignment can be based on: + * from_cameras_viewid: Align cameras in both SfM on the specified viewId + * from_cameras_poseid: Align cameras in both SfM on the specified poseId + * from_cameras_filepath: Align cameras with a filepath matching, using 'fileMatchingPattern' + * from_cameras_metadata: Align cameras with matching metadata, using 'metadataMatchingList' + * from_markers: Align from markers with the same Id + +''' + inputs = [ desc.File( name='input', @@ -95,9 +109,16 @@ class SfMAlignment(desc.CommandLineNode): outputs = [ desc.File( name='output', - label='Output', - description='''Aligned SfMData file .''', - value=desc.Node.internalFolder + 'alignedSfM.abc', + label='Output SfMData File', + description='SfMData file.', + value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc', + uid=[], + ), + desc.File( + name='outputViewsAndPoses', + label='Output Poses', + description='''Path to the output sfmdata file with cameras (views and poses).''', + value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ] diff --git a/meshroom/nodes/aliceVision/SfMTransfer.py b/meshroom/nodes/aliceVision/SfMTransfer.py index a30695ca26..3a112d52d6 100644 --- a/meshroom/nodes/aliceVision/SfMTransfer.py +++ b/meshroom/nodes/aliceVision/SfMTransfer.py @@ -1,12 +1,18 @@ -__version__ = "1.0" +__version__ = "2.0" from meshroom.core import desc +import os.path + class SfMTransfer(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmTransfer {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +This node allows to transfer poses and/or intrinsics form one SfM scene onto another one. +''' + inputs = [ desc.File( name='input', @@ -86,9 +92,16 @@ class SfMTransfer(desc.CommandLineNode): outputs = [ desc.File( name='output', - label='Output', + label='Output SfMData File', description='SfMData file.', - value=desc.Node.internalFolder + 'sfmData.abc', + value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc', + uid=[], + ), + desc.File( + name='outputViewsAndPoses', + label='Output Poses', + description='''Path to the output sfmdata file with cameras (views and poses).''', + value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ] diff --git a/meshroom/nodes/aliceVision/SfMTransform.py b/meshroom/nodes/aliceVision/SfMTransform.py index 6a9a975a84..f0b1c61f54 100644 --- a/meshroom/nodes/aliceVision/SfMTransform.py +++ b/meshroom/nodes/aliceVision/SfMTransform.py @@ -1,12 +1,26 @@ -__version__ = "1.1" +__version__ = "2.0" from meshroom.core import desc +import os.path + class SfMTransform(desc.CommandLineNode): commandLine = 'aliceVision_utils_sfmTransform {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +This node allows to change the coordinate system of one SfM scene. + +The transformation can be based on: + * transformation: Apply a given transformation + * auto_from_cameras: Fit all cameras into a box [-1,1] + * auto_from_landmarks: Fit all landmarks into a box [-1,1] + * from_single_camera: Use a specific camera as the origin of the coordinate system + * from_markers: Align specific markers to custom coordinates + +''' + inputs = [ desc.File( name='input', @@ -104,9 +118,16 @@ class SfMTransform(desc.CommandLineNode): outputs = [ desc.File( name='output', - label='Output', + label='Output SfMData File', description='''Aligned SfMData file .''', - value=desc.Node.internalFolder + 'transformedSfM.abc', + value=lambda attr: desc.Node.internalFolder + (os.path.splitext(os.path.basename(attr.node.input.value))[0] or 'sfmData') + '.abc', + uid=[], + ), + desc.File( + name='outputViewsAndPoses', + label='Output Poses', + description='''Path to the output sfmdata file with cameras (views and poses).''', + value=desc.Node.internalFolder + 'cameras.sfm', uid=[], ), ] diff --git a/meshroom/nodes/aliceVision/SketchfabUpload.py b/meshroom/nodes/aliceVision/SketchfabUpload.py index 27bea2fb28..06571b0f5c 100644 --- a/meshroom/nodes/aliceVision/SketchfabUpload.py +++ b/meshroom/nodes/aliceVision/SketchfabUpload.py @@ -51,6 +51,11 @@ def progressUpdate(size=None, progress=None, logManager=None): class SketchfabUpload(desc.Node): size = desc.DynamicNodeSize('inputFiles') + + documentation = ''' +Upload a textured mesh on Sketchfab. +''' + inputs = [ desc.ListAttribute( elementDesc=desc.File( diff --git a/meshroom/nodes/aliceVision/StructureFromMotion.py b/meshroom/nodes/aliceVision/StructureFromMotion.py index 91d257f2a5..685aa94534 100644 --- a/meshroom/nodes/aliceVision/StructureFromMotion.py +++ b/meshroom/nodes/aliceVision/StructureFromMotion.py @@ -1,8 +1,5 @@ __version__ = "2.0" -import json -import os - from meshroom.core import desc @@ -10,6 +7,59 @@ class StructureFromMotion(desc.CommandLineNode): commandLine = 'aliceVision_incrementalSfM {allParams}' size = desc.DynamicNodeSize('input') + documentation = ''' +This node will analyze feature matches to understand the geometric relationship behind all the 2D observations, +and infer the rigid scene structure (3D points) with the pose (position and orientation) and internal calibration of all cameras. +The pipeline is a growing reconstruction process (called incremental SfM): it first computes an initial two-view reconstruction that is iteratively extended by adding new views. + +1/ Fuse 2-View Matches into Tracks + +It fuses all feature matches between image pairs into tracks. Each track represents a candidate point in space, visible from multiple cameras. +However, at this step of the pipeline, it still contains many outliers. + +2/ Initial Image Pair + +It chooses the best initial image pair. This choice is critical for the quality of the final reconstruction. +It should indeed provide robust matches and contain reliable geometric information. +So, this image pair should maximize the number of matches and the repartition of the corresponding features in each image. +But at the same time, the angle between the cameras should also be large enough to provide reliable geometric information. + +3/ Initial 2-View Geometry + +It computes the fundamental matrix between the 2 images selected and consider that the first one is the origin of the coordinate system. + +4/ Triangulate + +Now with the pose of the 2 first cameras, it triangulates the corresponding 2D features into 3D points. + +5/ Next Best View Selection + +After that, it selects all the images that have enough associations with the features that are already reconstructed in 3D. + +6/ Estimate New Cameras + +Based on these 2D-3D associations it performs the resectioning of each of these new cameras. +The resectioning is a Perspective-n-Point algorithm (PnP) in a RANSAC framework to find the pose of the camera that validates most of the features associations. +On each camera, a non-linear minimization is performed to refine the pose. + +7/ Triangulate + +From these new cameras poses, some tracks become visible by 2 or more resected cameras and it triangulates them. + +8/ Optimize + +It performs a Bundle Adjustment to refine everything: extrinsics and intrinsics parameters of all cameras as well as the position of all 3D points. +It filters the results of the Bundle Adjustment by removing all observations that have high reprojection error or insufficient angles between observations. + +9/ Loop from 5 to 9 + +As we have triangulated new points, we get more image candidates for next best views selection and we can iterate from 5 to 9. +It iterates like that, adding cameras and triangulating new 2D features into 3D points and removing 3D points that became invalidated, until we cannot localize new views. + +## Online +[https://alicevision.org/#photogrammetry/sfm](https://alicevision.org/#photogrammetry/sfm) +''' + inputs = [ desc.File( name='input', @@ -281,7 +331,7 @@ class StructureFromMotion(desc.CommandLineNode): ), desc.File( name='outputViewsAndPoses', - label='Output SfMData File', + label='Output Poses', description='''Path to the output sfmdata file with cameras (views and poses).''', value=desc.Node.internalFolder + 'cameras.sfm', uid=[], @@ -294,30 +344,3 @@ class StructureFromMotion(desc.CommandLineNode): uid=[], ), ] - - @staticmethod - def getResults(node): - """ - Parse SfM result and return views, poses and intrinsics as three dicts with viewId, poseId and intrinsicId as keys. - """ - reportFile = node.outputViewsAndPoses.value - if not os.path.exists(reportFile): - return {}, {}, {} - - with open(reportFile) as jsonFile: - report = json.load(jsonFile) - - views = dict() - poses = dict() - intrinsics = dict() - - for view in report['views']: - views[view['viewId']] = view - - for pose in report['poses']: - poses[pose['poseId']] = pose['pose'] - - for intrinsic in report['intrinsics']: - intrinsics[intrinsic['intrinsicId']] = intrinsic - - return views, poses, intrinsics diff --git a/meshroom/nodes/aliceVision/Texturing.py b/meshroom/nodes/aliceVision/Texturing.py index 117201a82f..1e36756f06 100644 --- a/meshroom/nodes/aliceVision/Texturing.py +++ b/meshroom/nodes/aliceVision/Texturing.py @@ -7,6 +7,20 @@ class Texturing(desc.CommandLineNode): commandLine = 'aliceVision_texturing {allParams}' cpu = desc.Level.INTENSIVE ram = desc.Level.INTENSIVE + + documentation = ''' +This node computes the texturing on the mesh. + +If the mesh has no associated UV, it automatically computes UV maps. + +For each triangle, it uses the visibility information associated to each vertex to retrieve the texture candidates. +It select the best cameras based on the resolution covering the triangle. Finally it averages the pixel values using multiple bands in the frequency domain. +Many cameras are contributing to the low frequencies and only the best ones contributes to the high frequencies. + +## Online +[https://alicevision.org/#photogrammetry/texturing](https://alicevision.org/#photogrammetry/texturing) +''' + inputs = [ desc.File( name='input', diff --git a/meshroom/ui/app.py b/meshroom/ui/app.py index 25d446f9be..5abe029c53 100644 --- a/meshroom/ui/app.py +++ b/meshroom/ui/app.py @@ -8,6 +8,8 @@ import meshroom from meshroom.core import nodesDesc +from meshroom.core import pyCompatibility + from meshroom.ui import components from meshroom.ui.components.clipboard import ClipboardHelper from meshroom.ui.components.filepath import FilepathHelper @@ -183,8 +185,19 @@ def _recentProjectFiles(self): return projects @Slot(str) + @Slot(QUrl) def addRecentProjectFile(self, projectFile): - projectFile = QUrl(projectFile).toLocalFile() + if not isinstance(projectFile, (QUrl, pyCompatibility.basestring)): + raise TypeError("Unexpected data type: {}".format(projectFile.__class__)) + if isinstance(projectFile, QUrl): + projectFileNorm = projectFile.toLocalFile() + if not projectFileNorm: + projectFileNorm = projectFile.toString() + else: + projectFileNorm = QUrl(projectFile).toLocalFile() + if not projectFileNorm: + projectFileNorm = QUrl.fromLocalFile(projectFile).toLocalFile() + projects = self._recentProjectFiles() # remove duplicates while preserving order @@ -192,10 +205,10 @@ def addRecentProjectFile(self, projectFile): uniqueProjects = OrderedDict.fromkeys(projects) projects = list(uniqueProjects) # remove previous usage of the value - if projectFile in uniqueProjects: - projects.remove(projectFile) + if projectFileNorm in uniqueProjects: + projects.remove(projectFileNorm) # add the new value in the first place - projects.insert(0, projectFile) + projects.insert(0, projectFileNorm) # keep only the 10 first elements projects = projects[0:20] @@ -211,6 +224,43 @@ def addRecentProjectFile(self, projectFile): self.recentProjectFilesChanged.emit() + @Slot(str) + @Slot(QUrl) + def removeRecentProjectFile(self, projectFile): + if not isinstance(projectFile, (QUrl, pyCompatibility.basestring)): + raise TypeError("Unexpected data type: {}".format(projectFile.__class__)) + if isinstance(projectFile, QUrl): + projectFileNorm = projectFile.toLocalFile() + if not projectFileNorm: + projectFileNorm = projectFile.toString() + else: + projectFileNorm = QUrl(projectFile).toLocalFile() + if not projectFileNorm: + projectFileNorm = QUrl.fromLocalFile(projectFile).toLocalFile() + + projects = self._recentProjectFiles() + + # remove duplicates while preserving order + from collections import OrderedDict + uniqueProjects = OrderedDict.fromkeys(projects) + projects = list(uniqueProjects) + # remove previous usage of the value + if projectFileNorm not in uniqueProjects: + return + + projects.remove(projectFileNorm) + + settings = QSettings() + settings.beginGroup("RecentFiles") + size = settings.beginWriteArray("Projects") + for i, p in enumerate(projects): + settings.setArrayIndex(i) + settings.setValue("filepath", p) + settings.endArray() + settings.sync() + + self.recentProjectFilesChanged.emit() + @Slot(str, result=str) def markdownToHtml(self, md): """ diff --git a/meshroom/ui/graph.py b/meshroom/ui/graph.py index 566c122c58..f6fb501d5f 100644 --- a/meshroom/ui/graph.py +++ b/meshroom/ui/graph.py @@ -309,16 +309,14 @@ def stopChildThreads(self): self.stopExecution() self._chunksMonitor.stop() - def load(self, filepath, setupProjectFile=True): + @Slot(str, result=bool) + def loadGraph(self, filepath, setupProjectFile=True): g = Graph('') - g.load(filepath, setupProjectFile) + status = g.load(filepath, setupProjectFile) if not os.path.exists(g.cacheDir): os.mkdir(g.cacheDir) self.setGraph(g) - - @Slot(QUrl) - def loadUrl(self, url): - self.load(url.toLocalFile()) + return status @Slot(QUrl) def saveAs(self, url): diff --git a/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml b/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml index 408a4823a4..5aee77bd6e 100644 --- a/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml +++ b/meshroom/ui/qml/GraphEditor/AttributeItemDelegate.qml @@ -235,6 +235,10 @@ RowLayout { property string displayValue: String(slider.active && slider.item.pressed ? slider.item.formattedValue : attribute.value) text: displayValue selectByMouse: true + // Note: Use autoScroll as a workaround for alignment + // When the value change keep the text align to the left to be able to read the most important part + // of the number. When we are editing (item is in focus), the content should follow the editing. + autoScroll: activeFocus validator: attribute.type == "FloatParam" ? doubleValidator : intValidator onEditingFinished: setTextFieldAttribute(text) onAccepted: { diff --git a/meshroom/ui/qml/GraphEditor/NodeDocumentation.qml b/meshroom/ui/qml/GraphEditor/NodeDocumentation.qml new file mode 100644 index 0000000000..337f3e3711 --- /dev/null +++ b/meshroom/ui/qml/GraphEditor/NodeDocumentation.qml @@ -0,0 +1,38 @@ +import QtQuick 2.11 +import QtQuick.Controls 2.3 +import QtQuick.Layouts 1.3 +import Controls 1.0 + +import "common.js" as Common + +/** + * Displays Node documentation + */ +FocusScope { + id: root + + property variant node + + SystemPalette { id: activePalette } + + ScrollView { + width: parent.width + height: parent.height + ScrollBar.vertical.policy: ScrollBar.AlwaysOn + ScrollBar.horizontal.policy: ScrollBar.AlwaysOff + clip: true + + TextEdit { + width: parent.parent.width + height: parent.height + + padding: 8 + textFormat: TextEdit.MarkdownText + selectByMouse: true + selectionColor: activePalette.highlight + color: activePalette.text + text: node.documentation + wrapMode: TextEdit.Wrap + } + } +} diff --git a/meshroom/ui/qml/GraphEditor/NodeEditor.qml b/meshroom/ui/qml/GraphEditor/NodeEditor.qml index b0d1041d24..4cc96ae6d9 100644 --- a/meshroom/ui/qml/GraphEditor/NodeEditor.qml +++ b/meshroom/ui/qml/GraphEditor/NodeEditor.qml @@ -148,6 +148,12 @@ Panel { chunkCurrentIndex: m.chunkCurrentIndex onChangeCurrentChunk: { m.chunkCurrentIndex = chunkIndex } } + + NodeDocumentation { + id: nodeDocumentation + Layout.fillWidth: true + node: root.node + } } } } @@ -185,6 +191,12 @@ Panel { leftPadding: 8 rightPadding: leftPadding } + TabButton { + text: "Documentation" + width: implicitWidth + leftPadding: 8 + rightPadding: leftPadding + } } } } diff --git a/meshroom/ui/qml/ImageGallery/ImageDelegate.qml b/meshroom/ui/qml/ImageGallery/ImageDelegate.qml index 9100216f63..bc5beb116b 100644 --- a/meshroom/ui/qml/ImageGallery/ImageDelegate.qml +++ b/meshroom/ui/qml/ImageGallery/ImageDelegate.qml @@ -56,6 +56,12 @@ Item { enabled: !root.readOnly onClicked: removeRequest() } + MenuItem { + text: "Define As Center Image" + property var activeNode: _reconstruction.activeNodes.get("SfMTransform").node + enabled: !root.readOnly && _viewpoint.viewId != -1 && _reconstruction && activeNode + onClicked: activeNode.attribute("transformation").value = _viewpoint.viewId.toString() + } } ColumnLayout { diff --git a/meshroom/ui/qml/ImageGallery/ImageGallery.qml b/meshroom/ui/qml/ImageGallery/ImageGallery.qml index d4dcd759ce..3029a2411c 100644 --- a/meshroom/ui/qml/ImageGallery/ImageGallery.qml +++ b/meshroom/ui/qml/ImageGallery/ImageGallery.qml @@ -16,10 +16,12 @@ Panel { property variant cameraInits property variant cameraInit - property variant hdrCameraInit + property variant tempCameraInit readonly property alias currentItem: grid.currentItem readonly property string currentItemSource: grid.currentItem ? grid.currentItem.source : "" readonly property var currentItemMetadata: grid.currentItem ? grid.currentItem.metadata : undefined + readonly property int centerViewId: (_reconstruction && _reconstruction.sfmTransform) ? parseInt(_reconstruction.sfmTransform.attribute("transformation").value) : 0 + property int defaultCellSize: 160 property int currentIndex: 0 property bool readOnly: false @@ -36,7 +38,7 @@ Panel { QtObject { id: m - property variant currentCameraInit: displayHDR.checked ? _reconstruction.hdrCameraInit : root.cameraInit + property variant currentCameraInit: _reconstruction.tempCameraInit ? _reconstruction.tempCameraInit : root.cameraInit property variant viewpoints: currentCameraInit ? currentCameraInit.attribute('viewpoints').value : undefined property bool readOnly: root.readOnly || displayHDR.checked } @@ -189,6 +191,16 @@ Panel { } } + // Center of SfMTransform + Loader { + id: sfmTransformIndicator + active: viewpoint && (viewpoint.get("viewId").value == centerViewId) + sourceComponent: ImageBadge { + text: MaterialIcons.gamepad + ToolTip.text: "Camera used to define the center of the scene." + } + } + Item { Layout.fillWidth: true } // Reconstruction status indicator @@ -331,47 +343,114 @@ Panel { } footerContent: RowLayout { + // Images count + MaterialToolLabel { + ToolTip.text: grid.model.count + " Input Images" + iconText: MaterialIcons.image + label: grid.model.count.toString() + // enabled: grid.model.count > 0 + // margin: 4 + } + // cameras count + MaterialToolLabel { + ToolTip.text: label + " Estimated Cameras" + iconText: MaterialIcons.videocam + label: _reconstruction ? _reconstruction.nbCameras.toString() : "0" + // margin: 4 + // enabled: _reconstruction.cameraInit && _reconstruction.nbCameras + } - // Image count - RowLayout { - Layout.fillWidth: true - spacing: 8 - RowLayout { - MaterialLabel { text: MaterialIcons.image } - Label { text: grid.model.count } + Item { Layout.fillHeight: true; Layout.fillWidth: true } + + MaterialToolLabelButton { + id: displayHDR + property var activeNode: _reconstruction.activeNodes.get("LdrToHdrMerge").node + ToolTip.text: "Visualize HDR images: " + (activeNode ? activeNode.label : "No Node") + iconText: MaterialIcons.filter + label: activeNode ? activeNode.attribute("nbBrackets").value : "" + // visible: activeNode + enabled: activeNode && activeNode.isComputed + property string nodeID: activeNode ? (activeNode.label + activeNode.isComputed) : "" + onNodeIDChanged: { + if(checked) { + open(); + } } - RowLayout { - visible: _reconstruction.cameraInit && _reconstruction.nbCameras - MaterialLabel { text: MaterialIcons.videocam } - Label { text: _reconstruction.cameraInit ? _reconstruction.nbCameras : 0 } + onEnabledChanged: { + // Reset the toggle to avoid getting stuck + // with the HDR node checked but disabled. + if(checked) { + checked = false; + close(); + } + } + checkable: true + checked: false + onClicked: { + if(checked) { + open(); + } else { + close(); + } + } + function open() { + if(imageProcessing.checked) + imageProcessing.checked = false; + _reconstruction.setupTempCameraInit(activeNode, "outSfMData"); + } + function close() { + _reconstruction.clearTempCameraInit(); } } MaterialToolButton { - id: displayHDR - font.pointSize: 20 + id: imageProcessing + property var activeNode: _reconstruction.activeNodes.get("ImageProcessing").node + font.pointSize: 15 padding: 0 - anchors.margins: 0 - implicitHeight: 14 - ToolTip.text: "Visualize HDR images" - text: MaterialIcons.hdr_on - visible: _reconstruction.ldr2hdr - enabled: visible && _reconstruction.ldr2hdr.isComputed() + ToolTip.text: "Preprocessed Images: " + (activeNode ? activeNode.label : "No Node") + text: MaterialIcons.wallpaper + visible: activeNode && activeNode.attribute("outSfMData").value + enabled: activeNode && activeNode.isComputed + property string nodeID: activeNode ? (activeNode.label + activeNode.isComputed) : "" + onNodeIDChanged: { + if(checked) { + open(); + } + } onEnabledChanged: { // Reset the toggle to avoid getting stuck // with the HDR node checked but disabled. - checked = false; + if(checked) { + checked = false; + close(); + } } checkable: true checked: false - onClicked: { _reconstruction.setupLDRToHDRCameraInit(); } + onClicked: { + if(checked) { + open(); + } else { + close(); + } + } + function open() { + if(displayHDR.checked) + displayHDR.checked = false; + _reconstruction.setupTempCameraInit(activeNode, "outSfMData"); + } + function close() { + _reconstruction.clearTempCameraInit(); + } } - Item { Layout.fillHeight: true; Layout.fillWidth: true } + Item { Layout.fillHeight: true; width: 1 } // Thumbnail size icon and slider MaterialToolButton { text: MaterialIcons.photo_size_select_large + ToolTip.text: "Thumbnails Scale" padding: 0 anchors.margins: 0 font.pointSize: 11 @@ -385,5 +464,4 @@ Panel { implicitWidth: 70 } } - } diff --git a/meshroom/ui/qml/MaterialIcons/MLabel.qml b/meshroom/ui/qml/MaterialIcons/MLabel.qml new file mode 100644 index 0000000000..2251a6b6ed --- /dev/null +++ b/meshroom/ui/qml/MaterialIcons/MLabel.qml @@ -0,0 +1,23 @@ +import QtQuick 2.9 +import QtQuick.Controls 2.4 + + +/** + * MLabel is a standard Label. + * If ToolTip.text is set, it shows up a tooltip when hovered. + */ +Label { + padding: 4 + MouseArea { + id: mouseArea + anchors.fill: parent + hoverEnabled: true + acceptedButtons: Qt.NoButton + } + ToolTip.visible: mouseArea.containsMouse + ToolTip.delay: 500 + background: Rectangle { + anchors.fill: parent + color: mouseArea.containsMouse ? Qt.darker(parent.palette.base, 0.6) : "transparent" + } +} diff --git a/meshroom/ui/qml/MaterialIcons/MaterialToolButton.qml b/meshroom/ui/qml/MaterialIcons/MaterialToolButton.qml index 2eea541122..b24e2ad457 100644 --- a/meshroom/ui/qml/MaterialIcons/MaterialToolButton.qml +++ b/meshroom/ui/qml/MaterialIcons/MaterialToolButton.qml @@ -1,5 +1,6 @@ import QtQuick 2.9 import QtQuick.Controls 2.3 +import QtQuick.Layouts 1.3 /** @@ -7,6 +8,7 @@ import QtQuick.Controls 2.3 * It also shows up its tooltip when hovered. */ ToolButton { + id: control font.family: MaterialIcons.fontFamily padding: 4 font.pointSize: 13 diff --git a/meshroom/ui/qml/MaterialIcons/MaterialToolLabel.qml b/meshroom/ui/qml/MaterialIcons/MaterialToolLabel.qml new file mode 100644 index 0000000000..b32df53d2e --- /dev/null +++ b/meshroom/ui/qml/MaterialIcons/MaterialToolLabel.qml @@ -0,0 +1,45 @@ +import QtQuick 2.9 +import QtQuick.Controls 2.3 +import QtQuick.Layouts 1.3 + + +/** + * MaterialToolLabel is a Label with an icon (using MaterialIcons). + * It shows up its tooltip when hovered. + */ +Item { + id: control + property alias iconText: icon.text + property alias iconSize: icon.font.pointSize + property alias label: labelItem.text + width: childrenRect.width + height: childrenRect.height + + RowLayout { + Label { + id: icon + font.family: MaterialIcons.fontFamily + font.pointSize: 13 + padding: 0 + text: "" + color: palette.text + } + Label { + id: labelItem + text: "" + color: palette.text + } + Item { + width: 5 + } + } + + MouseArea { + id: mouseArea + anchors.fill: parent + hoverEnabled: true + acceptedButtons: Qt.NoButton + } + ToolTip.visible: mouseArea.containsMouse + ToolTip.delay: 500 +} diff --git a/meshroom/ui/qml/MaterialIcons/MaterialToolLabelButton.qml b/meshroom/ui/qml/MaterialIcons/MaterialToolLabelButton.qml new file mode 100644 index 0000000000..6613dd5133 --- /dev/null +++ b/meshroom/ui/qml/MaterialIcons/MaterialToolLabelButton.qml @@ -0,0 +1,51 @@ +import QtQuick 2.9 +import QtQuick.Controls 2.3 +import QtQuick.Layouts 1.3 + + +/** + * MaterialToolButton is a standard ToolButton using MaterialIcons font. + * It also shows up its tooltip when hovered. + */ +ToolButton { + id: control + property alias iconText: icon.text + property alias iconSize: icon.font.pointSize + property alias label: labelItem.text + padding: 0 + ToolTip.visible: ToolTip.text && hovered + ToolTip.delay: 100 + width: childrenRect.width + height: childrenRect.height + contentItem: RowLayout { + Layout.margins: 0 + Label { + id: icon + font.family: MaterialIcons.fontFamily + font.pointSize: 13 + padding: 0 + text: "" + color: (checked ? palette.highlight : palette.text) + } + Label { + id: labelItem + text: "" + padding: 0 + color: (checked ? palette.highlight : palette.text) + } + } + background: Rectangle { + color: { + if(pressed || checked || hovered) + { + if(pressed || checked) + return Qt.darker(parent.palette.base, 1.3) + if(hovered) + return Qt.darker(parent.palette.base, 0.6) + } + return "transparent"; + } + + border.color: checked ? Qt.darker(parent.palette.base, 1.4) : "transparent" + } +} diff --git a/meshroom/ui/qml/MaterialIcons/qmldir b/meshroom/ui/qml/MaterialIcons/qmldir index c3d64e4b28..4160608111 100644 --- a/meshroom/ui/qml/MaterialIcons/qmldir +++ b/meshroom/ui/qml/MaterialIcons/qmldir @@ -1,4 +1,7 @@ module MaterialIcons singleton MaterialIcons 2.2 MaterialIcons.qml MaterialToolButton 2.2 MaterialToolButton.qml +MaterialToolLabelButton 2.2 MaterialToolLabelButton.qml +MaterialToolLabel 2.2 MaterialToolLabel.qml MaterialLabel 2.2 MaterialLabel.qml +MLabel 2.2 MLabel.qml diff --git a/meshroom/ui/qml/Viewer/CircleGizmo.qml b/meshroom/ui/qml/Viewer/CircleGizmo.qml new file mode 100644 index 0000000000..0b7d9e9814 --- /dev/null +++ b/meshroom/ui/qml/Viewer/CircleGizmo.qml @@ -0,0 +1,100 @@ +import QtQuick 2.11 + +Rectangle { + id: root + + property bool readOnly: false + + signal moved() + signal incrementRadius(real radiusOffset) + + width: radius * 2 + height: width + color: "transparent" + border.width: 5 + border.color: readOnly ? "green" : "yellow" + + /* + // visualize top-left corner for debugging purpose + Rectangle { + color: "red" + width: 500 + height: 50 + } + Rectangle { + color: "red" + width: 50 + height: 500 + } + */ + // Cross to visualize the circle center + Rectangle { + color: parent.border.color + anchors.centerIn: parent + width: parent.width * 0.2 + height: parent.border.width * 0.5 + } + Rectangle { + color: parent.border.color + anchors.centerIn: parent + width: parent.border.width * 0.5 + height: parent.height * 0.2 + } + + Behavior on x { + NumberAnimation { + duration: 100 + } + } + + Behavior on y { + NumberAnimation { + duration: 100 + } + } + + Behavior on radius { + NumberAnimation { + duration: 100 + } + } + + Loader { + anchors.fill: parent + active: !root.readOnly + + sourceComponent: MouseArea { + id: mArea + anchors.fill: parent + cursorShape: root.readOnly ? Qt.ArrowCursor : (controlModifierEnabled ? Qt.SizeBDiagCursor : (pressed ? Qt.ClosedHandCursor : Qt.OpenHandCursor)) + propagateComposedEvents: true + + property bool controlModifierEnabled: false + onPositionChanged: { + mArea.controlModifierEnabled = (mouse.modifiers & Qt.ControlModifier) + mouse.accepted = false; + } + acceptedButtons: Qt.LeftButton + hoverEnabled: true + drag.target: root + + drag.onActiveChanged: { + if(!drag.active) { + moved(); + } + } + onPressed: { + forceActiveFocus(); + } + onWheel: { + mArea.controlModifierEnabled = (wheel.modifiers & Qt.ControlModifier) + if (wheel.modifiers & Qt.ControlModifier) { + incrementRadius(wheel.angleDelta.y / 120.0); + wheel.accepted = true; + } else { + wheel.accepted = false; + } + } + } + } +} diff --git a/meshroom/ui/qml/Viewer/FeaturesInfoOverlay.qml b/meshroom/ui/qml/Viewer/FeaturesInfoOverlay.qml index 292982db1e..deef580d17 100644 --- a/meshroom/ui/qml/Viewer/FeaturesInfoOverlay.qml +++ b/meshroom/ui/qml/Viewer/FeaturesInfoOverlay.qml @@ -76,7 +76,7 @@ FloatingPane { spacing: 4 - // Features visibility toogle + // Features visibility toggle MaterialToolButton { id: featuresVisibilityButton checkable: true diff --git a/meshroom/ui/qml/Viewer/FeaturesViewer.qml b/meshroom/ui/qml/Viewer/FeaturesViewer.qml index 83b796cc52..3730747384 100644 --- a/meshroom/ui/qml/Viewer/FeaturesViewer.qml +++ b/meshroom/ui/qml/Viewer/FeaturesViewer.qml @@ -10,13 +10,13 @@ import Utils 1.0 Repeater { id: root - /// ViewID to display the features of + /// ViewID to display the features of a specific view property int viewId /// SfMData to display the data of SfM property var sfmData /// Folder containing the features files property string featureFolder - /// Folder containing the matches files + /// Tracks object loading all the matches files property var tracks /// The list of describer types to load property alias describerTypes: root.model diff --git a/meshroom/ui/qml/Viewer/FloatImage.qml b/meshroom/ui/qml/Viewer/FloatImage.qml index 36bc77cb9f..24809c4da4 100644 --- a/meshroom/ui/qml/Viewer/FloatImage.qml +++ b/meshroom/ui/qml/Viewer/FloatImage.qml @@ -50,5 +50,7 @@ AliceVision.FloatImageViewer { id: mouseArea anchors.fill: parent hoverEnabled: true + // Do not intercept mouse events, only get the mouse over information + acceptedButtons: Qt.NoButton } } diff --git a/meshroom/ui/qml/Viewer/HdrImageToolbar.qml b/meshroom/ui/qml/Viewer/HdrImageToolbar.qml index 88f11317b8..53a6cb956a 100644 --- a/meshroom/ui/qml/Viewer/HdrImageToolbar.qml +++ b/meshroom/ui/qml/Viewer/HdrImageToolbar.qml @@ -118,7 +118,7 @@ FloatingPane { id: gammaCtrl Layout.fillWidth: true from: 0.01 - to: 4 + to: 16 value: 1 stepSize: 0.01 } diff --git a/meshroom/ui/qml/Viewer/SfmGlobalStats.qml b/meshroom/ui/qml/Viewer/SfmGlobalStats.qml index 81f8aea843..e97d8c96e2 100644 --- a/meshroom/ui/qml/Viewer/SfmGlobalStats.qml +++ b/meshroom/ui/qml/Viewer/SfmGlobalStats.qml @@ -20,7 +20,7 @@ FloatingPane { property var mTracks property color textColor: Colors.sysPalette.text - visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed()) ? root.visible : false + visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed) ? root.visible : false clip: true padding: 4 diff --git a/meshroom/ui/qml/Viewer/SfmStatsView.qml b/meshroom/ui/qml/Viewer/SfmStatsView.qml index 974078e3b0..d4ba8b2f3c 100644 --- a/meshroom/ui/qml/Viewer/SfmStatsView.qml +++ b/meshroom/ui/qml/Viewer/SfmStatsView.qml @@ -21,7 +21,7 @@ FloatingPane { property int viewId property color textColor: Colors.sysPalette.text - visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed()) ? root.visible : false + visible: (_reconstruction.sfm && _reconstruction.sfm.isComputed) ? root.visible : false clip: true padding: 4 diff --git a/meshroom/ui/qml/Viewer/Viewer2D.qml b/meshroom/ui/qml/Viewer/Viewer2D.qml index 1759d82026..03b80089dc 100644 --- a/meshroom/ui/qml/Viewer/Viewer2D.qml +++ b/meshroom/ui/qml/Viewer/Viewer2D.qml @@ -18,7 +18,9 @@ FocusScope { property alias useFloatImageViewer: displayHDR.checked property string loadingModules: { - var res = "" + if(!imgContainer.image) + return ""; + var res = ""; if(imgContainer.image.status === Image.Loading) res += " Image"; if(featuresViewerLoader.status === Loader.Ready) @@ -102,10 +104,11 @@ FocusScope { } function getImageFile(type) { + var depthMapNode = _reconstruction.activeNodes.get('allDepthMap').node; if (type == "image") { return root.source; - } else if (_reconstruction.depthMap != undefined && _reconstruction.selectedViewId >= 0) { - return Filepath.stringToUrl(_reconstruction.depthMap.internalFolder+_reconstruction.selectedViewId+"_"+type+"Map.exr"); + } else if (depthMapNode != undefined && _reconstruction.selectedViewId >= 0) { + return Filepath.stringToUrl(depthMapNode.internalFolder+_reconstruction.selectedViewId+"_"+type+"Map.exr"); } return ""; } @@ -180,17 +183,21 @@ FocusScope { visible: (floatImageViewerLoader.status === Loader.Ready) anchors.centerIn: parent - Component.onCompleted: { - // instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource - // Note: It does not work to use previously created component, - // so we re-create it with setSource. - // floatViewerComp.createObject(floatImageViewerLoader, { - setSource("FloatImage.qml", { - 'source': Qt.binding(function() { return getImageFile(imageType.type); }), - 'gamma': Qt.binding(function() { return hdrImageToolbar.gammaValue; }), - 'offset': Qt.binding(function() { return hdrImageToolbar.offsetValue; }), - 'channelModeString': Qt.binding(function() { return hdrImageToolbar.channelModeValue; }), - }) + onActiveChanged: { + if(active) { + // instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource + // Note: It does not work to use previously created component, so we re-create it with setSource. + // floatViewerComp.createObject(floatImageViewerLoader, { + setSource("FloatImage.qml", { + 'source': Qt.binding(function() { return getImageFile(imageType.type); }), + 'gamma': Qt.binding(function() { return hdrImageToolbar.gammaValue; }), + 'offset': Qt.binding(function() { return hdrImageToolbar.offsetValue; }), + 'channelModeString': Qt.binding(function() { return hdrImageToolbar.channelModeValue; }), + }) + } else { + // Force the unload (instead of using Component.onCompleted to load it once and for all) is necessary since Qt 5.14 + setSource("", {}) + } } } @@ -236,11 +243,11 @@ FocusScope { scale: 1.0 // FeatureViewer: display view extracted feature points - // note: requires QtAliceVision plugin - use a Loader to evaluate plugin avaibility at runtime + // note: requires QtAliceVision plugin - use a Loader to evaluate plugin availability at runtime Loader { id: featuresViewerLoader - active: displayFeatures.checked + property var activeNode: _reconstruction.activeNodes.get("FeatureExtraction").node // handle rotation/position based on available metadata rotation: { @@ -259,8 +266,8 @@ FocusScope { // instantiate and initialize a FeaturesViewer component dynamically using Loader.setSource setSource("FeaturesViewer.qml", { 'viewId': Qt.binding(function() { return _reconstruction.selectedViewId; }), - 'model': Qt.binding(function() { return _reconstruction.featureExtraction ? _reconstruction.featureExtraction.attribute("describerTypes").value : ""; }), - 'featureFolder': Qt.binding(function() { return _reconstruction.featureExtraction ? Filepath.stringToUrl(_reconstruction.featureExtraction.attribute("output").value) : ""; }), + 'model': Qt.binding(function() { return activeNode ? activeNode.attribute("describerTypes").value : ""; }), + 'featureFolder': Qt.binding(function() { return activeNode ? Filepath.stringToUrl(activeNode.attribute("output").value) : ""; }), 'tracks': Qt.binding(function() { return mtracksLoader.status === Loader.Ready ? mtracksLoader.item : null; }), 'sfmData': Qt.binding(function() { return msfmDataLoader.status === Loader.Ready ? msfmDataLoader.item : null; }), }) @@ -270,6 +277,51 @@ FocusScope { } } } + + // FisheyeCircleViewer: display fisheye circle + // note: use a Loader to evaluate if a PanoramaInit node exist and displayFisheyeCircle checked at runtime + Loader { + anchors.centerIn: parent + property var activeNode: _reconstruction.activeNodes.get("PanoramaInit").node + active: (displayFisheyeCircleLoader.checked && activeNode) + + // handle rotation/position based on available metadata + rotation: { + var orientation = metadata ? metadata["Orientation"] : 0 + switch(orientation) { + case "6": return 90; + case "8": return -90; + default: return 0; + } + } + + sourceComponent: CircleGizmo { + property bool useAuto: activeNode.attribute("estimateFisheyeCircle").value + readOnly: useAuto + visible: (!useAuto) || activeNode.isComputed + property real userFisheyeRadius: activeNode.attribute("fisheyeRadius").value + property variant fisheyeAutoParams: _reconstruction.getAutoFisheyeCircle(activeNode) + + x: useAuto ? fisheyeAutoParams.x : activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_x").value + y: useAuto ? fisheyeAutoParams.y : activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_y").value + radius: useAuto ? fisheyeAutoParams.z : ((imgContainer.image ? Math.min(imgContainer.image.width, imgContainer.image.height) : 1.0) * 0.5 * (userFisheyeRadius * 0.01)) + + border.width: Math.max(1, (3.0 / imgContainer.scale)) + onMoved: { + if(!useAuto) + { + _reconstruction.setAttribute(activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_x"), x) + _reconstruction.setAttribute(activeNode.attribute("fisheyeCenterOffset.fisheyeCenterOffset_y"), y) + } + } + onIncrementRadius: { + if(!useAuto) + { + _reconstruction.setAttribute(activeNode.attribute("fisheyeRadius"), activeNode.attribute("fisheyeRadius").value + radiusOffset) + } + } + } + } } ColumnLayout { @@ -302,8 +354,9 @@ FocusScope { // show which depthmap node is active Label { id: depthMapNodeName - visible: (_reconstruction.depthMap != undefined) && (imageType.type != "image") - text: (_reconstruction.depthMap != undefined ? _reconstruction.depthMap.label : "") + property var activeNode: _reconstruction.activeNodes.get("allDepthMap").node + visible: (activeNode != undefined) && (imageType.type != "image") + text: (activeNode != undefined ? activeNode.label : "") font.pointSize: 8 horizontalAlignment: TextInput.AlignLeft @@ -334,11 +387,11 @@ FocusScope { Loader { id: msfmDataLoader - // active: _reconstruction.sfm && _reconstruction.sfm.isComputed() + // active: _reconstruction.sfm && _reconstruction.sfm.isComputed property bool isUsed: displayFeatures.checked || displaySfmStatsView.checked || displaySfmDataGlobalStats.checked property var activeNode: _reconstruction.sfm - property bool isComputed: activeNode && activeNode.isComputed() + property bool isComputed: activeNode && activeNode.isComputed active: false // It takes time to load tracks, so keep them looaded, if we may use it again. @@ -372,11 +425,10 @@ FocusScope { } Loader { id: mtracksLoader - // active: _reconstruction.featureMatching property bool isUsed: displayFeatures.checked || displaySfmStatsView.checked || displaySfmDataGlobalStats.checked - property var activeNode: _reconstruction.featureMatching - property bool isComputed: activeNode && activeNode.isComputed() + property var activeNode: _reconstruction.activeNodes.get('FeatureMatching').node + property bool isComputed: activeNode && activeNode.isComputed active: false // It takes time to load tracks, so keep them looaded, if we may use it again. @@ -445,10 +497,10 @@ FocusScope { left: parent.left margins: 2 } - active: displayFeatures.checked + active: displayFeatures.checked && featuresViewerLoader.status === Loader.Ready sourceComponent: FeaturesInfoOverlay { - featureExtractionNode: _reconstruction.featureExtraction + featureExtractionNode: _reconstruction.activeNodes.get('FeatureExtraction').node pluginStatus: featuresViewerLoader.status featuresViewer: featuresViewerLoader.item } @@ -464,9 +516,8 @@ FocusScope { anchors.fill: parent // zoom label - Label { + MLabel { text: ((imgContainer.image && (imgContainer.image.status === Image.Ready)) ? imgContainer.scale.toFixed(2) : "1.00") + "x" - state: "xsmall" MouseArea { anchors.fill: parent acceptedButtons: Qt.LeftButton | Qt.RightButton @@ -483,6 +534,7 @@ FocusScope { } } } + ToolTip.text: "Zoom" } MaterialToolButton { id: displayAlphaBackground @@ -514,21 +566,32 @@ FocusScope { checkable: true checked: false } + MaterialToolButton { + id: displayFisheyeCircleLoader + property var activeNode: _reconstruction.activeNodes.get('PanoramaInit').node + ToolTip.text: "Display Fisheye Circle: " + (activeNode ? activeNode.label : "No Node") + text: MaterialIcons.vignette + // text: MaterialIcons.panorama_fish_eye + font.pointSize: 11 + Layout.minimumWidth: 0 + checkable: true + checked: false + enabled: activeNode && activeNode.attribute("useFisheye").value + visible: activeNode + } + Label { id: resolutionLabel Layout.fillWidth: true - text: imgContainer.image ? (imgContainer.image.sourceSize.width + "x" + imgContainer.image.sourceSize.height) : "" + text: (imgContainer.image && imgContainer.image.sourceSize.width > 0) ? (imgContainer.image.sourceSize.width + "x" + imgContainer.image.sourceSize.height) : "" elide: Text.ElideRight horizontalAlignment: Text.AlignHCenter - /*Rectangle { - anchors.fill: parent - color: "blue" - }*/ } ComboBox { id: imageType + property var activeNode: _reconstruction.activeNodes.get('allDepthMap').node // set min size to 5 characters + one margin for the combobox clip: true Layout.minimumWidth: 0 @@ -539,12 +602,13 @@ FocusScope { property string type: enabled ? types[currentIndex] : types[0] model: types - enabled: _reconstruction.depthMap != undefined + enabled: activeNode } MaterialToolButton { - enabled: _reconstruction.depthMap != undefined - ToolTip.text: "View Depth Map in 3D (" + (_reconstruction.depthMap != undefined ? _reconstruction.depthMap.label : "No DepthMap Node Selected") + ")" + property var activeNode: _reconstruction.activeNodes.get('allDepthMap').node + enabled: activeNode + ToolTip.text: "View Depth Map in 3D (" + (activeNode ? activeNode.label : "No DepthMap Node Selected") + ")" text: MaterialIcons.input font.pointSize: 11 Layout.minimumWidth: 0 @@ -556,6 +620,7 @@ FocusScope { MaterialToolButton { id: displaySfmStatsView + property var activeNode: _reconstruction.activeNodes.get('sfm').node font.family: MaterialIcons.fontFamily text: MaterialIcons.assessment @@ -568,10 +633,9 @@ FocusScope { smooth: false flat: true checkable: enabled - enabled: _reconstruction.sfm && _reconstruction.sfm.isComputed() && _reconstruction.selectedViewId >= 0 + enabled: activeNode && activeNode.isComputed && _reconstruction.selectedViewId >= 0 onCheckedChanged: { - if(checked == true) - { + if(checked == true) { displaySfmDataGlobalStats.checked = false metadataCB.checked = false } @@ -580,6 +644,7 @@ FocusScope { MaterialToolButton { id: displaySfmDataGlobalStats + property var activeNode: _reconstruction.activeNodes.get('sfm').node font.family: MaterialIcons.fontFamily text: MaterialIcons.language @@ -592,10 +657,9 @@ FocusScope { smooth: false flat: true checkable: enabled - enabled: _reconstruction.sfm && _reconstruction.sfm.isComputed() + enabled: activeNode && activeNode.isComputed onCheckedChanged: { - if(checked == true) - { + if(checked == true) { displaySfmStatsView.checked = false metadataCB.checked = false } diff --git a/meshroom/ui/qml/Viewer3D/ImageOverlay.qml b/meshroom/ui/qml/Viewer3D/ImageOverlay.qml index a6ddf90ac5..5f79d4df3c 100644 --- a/meshroom/ui/qml/Viewer3D/ImageOverlay.qml +++ b/meshroom/ui/qml/Viewer3D/ImageOverlay.qml @@ -4,7 +4,7 @@ import QtQuick.Layouts 1.12 /** * ImageOverlay enables to display a Viewpoint image on top of a 3D View. * It takes the principal point correction into account and handle image ratio to - * correclty fit or crop according to original image ratio and parent Item ratio. + * correctly fit or crop according to original image ratio and parent Item ratio. */ Item { id: root diff --git a/meshroom/ui/qml/WorkspaceView.qml b/meshroom/ui/qml/WorkspaceView.qml index 3a5e80fb18..4caafc0204 100644 --- a/meshroom/ui/qml/WorkspaceView.qml +++ b/meshroom/ui/qml/WorkspaceView.qml @@ -65,7 +65,7 @@ Item { readOnly: root.readOnly cameraInits: root.cameraInits cameraInit: reconstruction.cameraInit - hdrCameraInit: reconstruction.hdrCameraInit + tempCameraInit: reconstruction.tempCameraInit currentIndex: reconstruction.cameraInitIndex onRemoveImageRequest: reconstruction.removeAttribute(attribute) onFilesDropped: reconstruction.handleFilesDrop(drop, augmentSfm ? null : cameraInit) @@ -191,7 +191,7 @@ Item { mediaLibrary: viewer3D.library camera: viewer3D.mainCamera uigraph: reconstruction - onNodeActivated: _reconstruction.setActiveNodeOfType(node) + onNodeActivated: _reconstruction.setActiveNode(node) } } } diff --git a/meshroom/ui/qml/main.qml b/meshroom/ui/qml/main.qml index 75b8cbeb4d..c4249fbd73 100755 --- a/meshroom/ui/qml/main.qml +++ b/meshroom/ui/qml/main.qml @@ -4,7 +4,10 @@ import QtQuick.Controls 1.4 as Controls1 // For SplitView import QtQuick.Layouts 1.1 import QtQuick.Window 2.3 import QtQml.Models 2.2 + import Qt.labs.platform 1.0 as Platform +import QtQuick.Dialogs 1.3 + import Qt.labs.settings 1.0 import GraphEditor 1.0 import MaterialIcons 2.2 @@ -202,13 +205,27 @@ ApplicationWindow { } } - Platform.FileDialog { + FileDialog { id: openFileDialog title: "Open File" nameFilters: ["Meshroom Graphs (*.mg)"] onAccepted: { - _reconstruction.loadUrl(file.toString()) - MeshroomApp.addRecentProjectFile(file.toString()) + if(_reconstruction.loadUrl(fileUrl)) + { + MeshroomApp.addRecentProjectFile(fileUrl.toString()) + } + } + } + + FileDialog { + id: importFilesDialog + title: "Import Images" + selectExisting: true + selectMultiple: true + nameFilters: [] + onAccepted: { + console.warn("importFilesDialog fileUrls: " + importFilesDialog.fileUrls) + _reconstruction.importImagesUrls(importFilesDialog.fileUrls) } } @@ -326,6 +343,10 @@ ApplicationWindow { text: "HDRI" onTriggered: ensureSaved(function() { _reconstruction.new("hdri") }) } + Action { + text: "HDRI Fisheye" + onTriggered: ensureSaved(function() { _reconstruction.new("hdriFisheye") }) + } } Action { id: openActionItem @@ -353,8 +374,14 @@ ApplicationWindow { MenuItem { onTriggered: ensureSaved(function() { openRecentMenu.dismiss(); - _reconstruction.load(modelData); - MeshroomApp.addRecentProjectFile(modelData); + if(_reconstruction.loadUrl(modelData)) + { + MeshroomApp.addRecentProjectFile(modelData); + } + else + { + MeshroomApp.removeRecentProjectFile(modelData); + } }) text: fileTextMetrics.elidedText @@ -367,6 +394,12 @@ ApplicationWindow { } } } + Action { + id: importActionItem + text: "Import Images" + shortcut: "Ctrl+I" + onTriggered: importFilesDialog.open() + } Action { id: saveAction text: "Save" @@ -670,7 +703,6 @@ ApplicationWindow { } } - GraphEditor { id: graphEditor @@ -680,13 +712,13 @@ ApplicationWindow { readOnly: graphLocked onNodeDoubleClicked: { - _reconstruction.setActiveNodeOfType(node); + _reconstruction.setActiveNode(node); let viewable = false; for(var i=0; i < node.attributes.count; ++i) { var attr = node.attributes.at(i) - if(attr.isOutput && workspaceView.viewAttribute(attr)) + if(attr.isOutput && workspaceView.viewAttribute(attr, mouse)) break; } } diff --git a/meshroom/ui/reconstruction.py b/meshroom/ui/reconstruction.py index 80fa3b9fbe..7c8ca6a3ea 100755 --- a/meshroom/ui/reconstruction.py +++ b/meshroom/ui/reconstruction.py @@ -3,18 +3,26 @@ import math import os from threading import Thread +from collections import Iterable from PySide2.QtCore import QObject, Slot, Property, Signal, QUrl, QSizeF from PySide2.QtGui import QMatrix4x4, QMatrix3x3, QQuaternion, QVector3D, QVector2D import meshroom.core +import meshroom.common from meshroom import multiview from meshroom.common.qt import QObjectListModel from meshroom.core import Version -from meshroom.core.node import Node, Status, Position +from meshroom.core.node import Node, CompatibilityNode, Status, Position from meshroom.ui.graph import UIGraph from meshroom.ui.utils import makeProperty +# Python2 compatibility +try: + FileNotFoundError +except NameError: + FileNotFoundError = IOError + class Message(QObject): """ Simple structure wrapping a high-level message. """ @@ -190,6 +198,7 @@ def __init__(self, viewpointAttribute, reconstruction): self._reconstructed = False # PrepareDenseScene self._undistortedImagePath = '' + self._activeNode_PrepareDenseScene = self._reconstruction.activeNodes.get("PrepareDenseScene") # update internally cached variables self._updateInitialParams() @@ -199,7 +208,7 @@ def __init__(self, viewpointAttribute, reconstruction): # trigger internal members updates when reconstruction members changes self._reconstruction.cameraInitChanged.connect(self._updateInitialParams) self._reconstruction.sfmReportChanged.connect(self._updateSfMParams) - self._reconstruction.prepareDenseSceneChanged.connect(self._updateDenseSceneParams) + self._activeNode_PrepareDenseScene.nodeChanged.connect(self._updateDenseSceneParams) def _updateInitialParams(self): """ Update internal members depending on CameraInit. """ @@ -229,11 +238,11 @@ def _updateSfMParams(self): def _updateDenseSceneParams(self): """ Update internal members depending on PrepareDenseScene. """ # undistorted image path - if not self._reconstruction.prepareDenseScene: + if not self._activeNode_PrepareDenseScene.node: self._undistortedImagePath = '' else: - filename = "{}.{}".format(self._viewpoint.viewId.value, self._reconstruction.prepareDenseScene.outputFileType.value) - self._undistortedImagePath = os.path.join(self._reconstruction.prepareDenseScene.output.value, filename) + filename = "{}.{}".format(self._viewpoint.viewId.value, self._activeNode_PrepareDenseScene.node.outputFileType.value) + self._undistortedImagePath = os.path.join(self._activeNode_PrepareDenseScene.node.output.value, filename) self.denseSceneParamsChanged.emit() @Property(type=QObject, constant=True) @@ -356,33 +365,75 @@ def undistortedImageSource(self): return QUrl.fromLocalFile(self._undistortedImagePath) +def parseSfMJsonFile(sfmJsonFile): + """ + Parse the SfM Json file and return views, poses and intrinsics as three dicts with viewId, poseId and intrinsicId as keys. + """ + if not os.path.exists(sfmJsonFile): + return {}, {}, {} + + with open(sfmJsonFile) as jsonFile: + report = json.load(jsonFile) + + views = dict() + poses = dict() + intrinsics = dict() + + for view in report['views']: + views[view['viewId']] = view + + for pose in report['poses']: + poses[pose['poseId']] = pose['pose'] + + for intrinsic in report['intrinsics']: + intrinsics[intrinsic['intrinsicId']] = intrinsic + + return views, poses, intrinsics + + +class ActiveNode(QObject): + """ + Hold one active node for a given NodeType. + """ + def __init__(self, nodeType, parent=None): + super(ActiveNode, self).__init__(parent) + self.nodeType = nodeType + self._node = None + + nodeChanged = Signal() + node = makeProperty(QObject, "_node", nodeChanged, resetOnDestroy=True) + + class Reconstruction(UIGraph): """ Specialization of a UIGraph designed to manage a 3D reconstruction. """ + activeNodeCategories = { + "sfm": ["StructureFromMotion", "GlobalSfM", "PanoramaEstimation", "SfMTransfer", "SfMTransform", + "SfMAlignment"], + "undistort": ["PrepareDenseScene", "PanoramaWarping"], + "allDepthMap": ["DepthMap", "DepthMapFilter"], + } def __init__(self, defaultPipeline='', parent=None): super(Reconstruction, self).__init__(parent) # initialize member variables for key steps of the 3D reconstruction pipeline + self._activeNodes = meshroom.common.DictModel(keyAttrName="nodeType") + self.initActiveNodes() + # - CameraInit self._cameraInit = None # current CameraInit node self._cameraInits = QObjectListModel(parent=self) # all CameraInit nodes self._buildingIntrinsics = False self.intrinsicsBuilt.connect(self.onIntrinsicsAvailable) - self._hdrCameraInit = None + self.cameraInitChanged.connect(self.onCameraInitChanged) - self.importImagesFailed.connect(self.onImportImagesFailed) + self._tempCameraInit = None - # - Feature Extraction - self._featureExtraction = None - self.cameraInitChanged.connect(self.updateFeatureExtraction) - - # - Feature Matching - self._featureMatching = None - self.cameraInitChanged.connect(self.updateFeatureMatching) + self.importImagesFailed.connect(self.onImportImagesFailed) # - SfM self._sfm = None @@ -393,20 +444,6 @@ def __init__(self, defaultPipeline='', parent=None): self._selectedViewpoint = None self._liveSfmManager = LiveSfmManager(self) - # - Prepare Dense Scene (undistorted images) - self._prepareDenseScene = None - - # - Depth Map - self._depthMap = None - self.cameraInitChanged.connect(self.updateDepthMapNode) - - # - Texturing - self._texturing = None - - # - LDR2HDR - self._ldr2hdr = None - self.cameraInitChanged.connect(self.updateLdr2hdrNode) - # react to internal graph changes to update those variables self.graphChanged.connect(self.onGraphChanged) @@ -415,6 +452,18 @@ def __init__(self, defaultPipeline='', parent=None): def setDefaultPipeline(self, defaultPipeline): self._defaultPipeline = defaultPipeline + def initActiveNodes(self): + # Create all possible entries + for category, _ in self.activeNodeCategories.items(): + self._activeNodes.add(ActiveNode(category, self)) + for nodeType, _ in meshroom.core.nodesDesc.items(): + self._activeNodes.add(ActiveNode(nodeType, self)) + + def onCameraInitChanged(self): + # Update active nodes when CameraInit changes + nodes = self._graph.nodesFromNode(self._cameraInit)[0] + self.setActiveNodes(nodes) + @Slot() @Slot(str) def new(self, pipeline=None): @@ -426,14 +475,17 @@ def new(self, pipeline=None): elif p.lower() == "hdri": # default hdri pipeline self.setGraph(multiview.hdri()) + elif p.lower() == "hdrifisheye": + # default hdri pipeline + self.setGraph(multiview.hdriFisheye()) else: # use the user-provided default photogrammetry project file self.load(p, setupProjectFile=False) - @Slot(str) + @Slot(str, result=bool) def load(self, filepath, setupProjectFile=True): try: - super(Reconstruction, self).load(filepath, setupProjectFile) + status = super(Reconstruction, self).loadGraph(filepath, setupProjectFile) # warn about pre-release projects being automatically upgraded if Version(self._graph.fileReleaseVersion).major == "0": self.warning.emit(Message( @@ -442,30 +494,48 @@ def load(self, filepath, setupProjectFile=True): "Data might have been lost in the process.", "Open it with the corresponding version of Meshroom to recover your data." )) + return status + except FileNotFoundError as e: + self.error.emit( + Message( + "No Such File", + "Error While Loading '{}': No Such File.".format(os.path.basename(filepath)), + "" + ) + ) + logging.error("Error while loading '{}': No Such File.".format(os.path.basename(filepath))) + return False except Exception as e: import traceback trace = traceback.format_exc() self.error.emit( Message( - "Error while loading {}".format(os.path.basename(filepath)), - "An unexpected error has occurred", + "Error While Loading Project File", + "An unexpected error has occurred while loading file: '{}'".format(os.path.basename(filepath)), trace ) ) logging.error(trace) + return False + + @Slot(QUrl, result=bool) + def loadUrl(self, url): + if isinstance(url, (QUrl)): + # depending how the QUrl has been initialized, + # toLocalFile() may return the local path or an empty string + localFile = url.toLocalFile() + if not localFile: + localFile = url.toString() + else: + localFile = url + return self.load(localFile) def onGraphChanged(self): """ React to the change of the internal graph. """ self._liveSfmManager.reset() self.selectedViewId = "-1" - self.featureExtraction = None - self.featureMatching = None self.sfm = None - self.prepareDenseScene = None - self.depthMap = None - self.texturing = None - self.ldr2hdr = None - self.hdrCameraInit = None + self.tempCameraInit = None self.updateCameraInits() if not self._graph: return @@ -481,6 +551,7 @@ def runAsync(func, args=(), kwargs=None): thread.start() return thread + @Slot(QObject) def getViewpoints(self): """ Return the Viewpoints model. """ # TODO: handle multiple Viewpoints models @@ -506,47 +577,60 @@ def setCameraInitIndex(self, idx): camInit = self._cameraInits[idx] if self._cameraInits else None self.cameraInit = camInit - def updateFeatureExtraction(self): - """ Set the current FeatureExtraction node based on the current CameraInit node. """ - self.featureExtraction = self.lastNodeOfType('FeatureExtraction', self.cameraInit) if self.cameraInit else None - - def updateFeatureMatching(self): - """ Set the current FeatureMatching node based on the current CameraInit node. """ - self.featureMatching = self.lastNodeOfType('FeatureMatching', self.cameraInit) if self.cameraInit else None - - def updateDepthMapNode(self): - """ Set the current FeatureExtraction node based on the current CameraInit node. """ - self.depthMap = self.lastNodeOfType('DepthMapFilter', self.cameraInit) if self.cameraInit else None - - def updateLdr2hdrNode(self): - """ Set the current LDR2HDR node based on the current CameraInit node. """ - self.ldr2hdr = self.lastNodeOfType('LDRToHDR', self.cameraInit) if self.cameraInit else None - @Slot() - def setupLDRToHDRCameraInit(self): - if not self.ldr2hdr: - self.hdrCameraInit = Node("CameraInit") + def clearTempCameraInit(self): + self.tempCameraInit = None + + @Slot(QObject, str) + def setupTempCameraInit(self, node, attrName): + if not node or not attrName: + self.tempCameraInit = None return - sfmFile = self.ldr2hdr.attribute("outSfMDataFilename").value + sfmFile = node.attribute(attrName).value if not sfmFile or not os.path.isfile(sfmFile): - self.hdrCameraInit = Node("CameraInit") + self.tempCameraInit = None return nodeDesc = meshroom.core.nodesDesc["CameraInit"]() views, intrinsics = nodeDesc.readSfMData(sfmFile) tmpCameraInit = Node("CameraInit", viewpoints=views, intrinsics=intrinsics) - self.hdrCameraInit = tmpCameraInit + self.tempCameraInit = tmpCameraInit + + @Slot(QObject, result=QVector3D) + def getAutoFisheyeCircle(self, panoramaInit): + if not panoramaInit or not panoramaInit.isComputed: + return QVector3D(0.0, 0.0, 0.0) + if not panoramaInit.attribute("estimateFisheyeCircle").value: + return QVector3D(0.0, 0.0, 0.0) + + sfmFile = panoramaInit.attribute('outSfMData').value + if not os.path.exists(sfmFile): + return QVector3D(0.0, 0.0, 0.0) + import io # use io.open for Python2/3 compatibility (allow to specify encoding + errors handling) + # skip decoding errors to avoid potential exceptions due to non utf-8 characters in images metadata + with io.open(sfmFile, 'r', encoding='utf-8', errors='ignore') as f: + data = json.load(f) + + intrinsics = data.get('intrinsics', []) + if len(intrinsics) == 0: + return QVector3D(0.0, 0.0, 0.0) + intrinsic = intrinsics[0] + + res = QVector3D(float(intrinsic.get("fisheyeCircleCenterX", 0.0)) - float(intrinsic.get("width", 0.0)) * 0.5, + float(intrinsic.get("fisheyeCircleCenterY", 0.0)) - float(intrinsic.get("height", 0.0)) * 0.5, + float(intrinsic.get("fisheyeCircleRadius", 0.0))) + return res def lastSfmNode(self): """ Retrieve the last SfM node from the initial CameraInit node. """ - return self.lastNodeOfType("StructureFromMotion", self._cameraInit, Status.SUCCESS) + return self.lastNodeOfType(self.activeNodeCategories['sfm'], self._cameraInit, Status.SUCCESS) - def lastNodeOfType(self, nodeType, startNode, preferredStatus=None): + def lastNodeOfType(self, nodeTypes, startNode, preferredStatus=None): """ Returns the last node of the given type starting from 'startNode'. If 'preferredStatus' is specified, the last node with this status will be considered in priority. Args: - nodeType (str): the node type + nodeTypes (str list): the node types startNode (Node): the node to start from preferredStatus (Status): (optional) the node status to prioritize @@ -555,7 +639,7 @@ def lastNodeOfType(self, nodeType, startNode, preferredStatus=None): """ if not startNode: return None - nodes = self._graph.nodesFromNode(startNode, nodeType)[0] + nodes = self._graph.nodesFromNode(startNode, nodeTypes)[0] if not nodes: return None node = nodes[-1] @@ -643,22 +727,22 @@ def handleFilesDrop(self, drop, cameraInit): "", )) else: - panoramaExternalInfoNodes = self.graph.nodesByType('PanoramaExternalInfo') + panoramaInitNodes = self.graph.nodesByType('PanoramaInit') for panoramaInfoFile in filesByType.panoramaInfo: - for panoramaInfoNode in panoramaExternalInfoNodes: - panoramaInfoNode.attribute('config').value = panoramaInfoFile - if panoramaExternalInfoNodes: + for panoramaInitNode in panoramaInitNodes: + panoramaInitNode.attribute('config').value = panoramaInfoFile + if panoramaInitNodes: self.info.emit( Message( "Panorama XML", - "XML file declared on PanoramaExternalInfo node", - "XML file '{}' set on node '{}'".format(','.join(filesByType.panoramaInfo), ','.join([n.getLabel() for n in panoramaExternalInfoNodes])), + "XML file declared on PanoramaInit node", + "XML file '{}' set on node '{}'".format(','.join(filesByType.panoramaInfo), ','.join([n.getLabel() for n in panoramaInitNodes])), )) else: self.error.emit( Message( - "No PanoramaExternalInfo Node", - "No PanoramaExternalInfo Node to set the Panorama file:\n'{}'.".format(','.join(filesByType.panoramaInfo)), + "No PanoramaInit Node", + "No PanoramaInit Node to set the Panorama file:\n'{}'.".format(','.join(filesByType.panoramaInfo)), "", )) @@ -702,10 +786,24 @@ def importImagesFromFolder(self, path, recursive=False): recursive: List files in folders recursively. """ + logging.debug("importImagesFromFolder: " + str(path)) filesByType = multiview.findFilesByTypeInFolder(path, recursive) if filesByType.images: self.buildIntrinsics(self.cameraInit, filesByType.images) + @Slot("QVariant") + def importImagesUrls(self, imagePaths, recursive=False): + paths = [] + for imagePath in imagePaths: + if isinstance(imagePath, (QUrl)): + p = imagePath.toLocalFile() + if not p: + p = imagePath.toString() + else: + p = imagePath + paths.append(p) + self.importImagesFromFolder(paths) + def importImagesAsync(self, images, cameraInit): """ Add the given list of images to the Reconstruction. """ # Start the process of updating views and intrinsics @@ -819,10 +917,11 @@ def setBuildingIntrinsics(self, value): self._buildingIntrinsics = value self.buildingIntrinsicsChanged.emit() + activeNodes = makeProperty(QObject, "_activeNodes", resetOnDestroy=True) cameraInitChanged = Signal() cameraInit = makeProperty(QObject, "_cameraInit", cameraInitChanged, resetOnDestroy=True) - hdrCameraInitChanged = Signal() - hdrCameraInit = makeProperty(QObject, "_hdrCameraInit", hdrCameraInitChanged, resetOnDestroy=True) + tempCameraInitChanged = Signal() + tempCameraInit = makeProperty(QObject, "_tempCameraInit", tempCameraInitChanged, resetOnDestroy=True) cameraInitIndex = Property(int, getCameraInitIndex, setCameraInitIndex, notify=cameraInitChanged) viewpoints = Property(QObject, getViewpoints, notify=cameraInitChanged) cameraInits = Property(QObject, lambda self: self._cameraInits, constant=True) @@ -833,31 +932,42 @@ def setBuildingIntrinsics(self, value): liveSfmManager = Property(QObject, lambda self: self._liveSfmManager, constant=True) @Slot(QObject) - def setActiveNodeOfType(self, node): + def setActiveNode(self, node): """ Set node as the active node of its type. """ - if node.nodeType == "StructureFromMotion": - self.sfm = node - elif node.nodeType == "FeatureExtraction": - self.featureExtraction = node - elif node.nodeType == "FeatureMatching": - self.featureMatching = node - elif node.nodeType == "CameraInit": - self.cameraInit = node - elif node.nodeType == "PrepareDenseScene": - self.prepareDenseScene = node - elif node.nodeType in ("DepthMap", "DepthMapFilter"): - self.depthMap = node + for category, nodeTypes in self.activeNodeCategories.items(): + if node.nodeType in nodeTypes: + self.activeNodes.get(category).node = node + if category == 'sfm': + self.setSfm(node) + self.activeNodes.get(node.nodeType).node = node + + @Slot(QObject) + def setActiveNodes(self, nodes): + """ Set node as the active node of its type. """ + # Setup the active node per category only once, on the last one + nodesByCategory = {} + for node in nodes: + for category, nodeTypes in self.activeNodeCategories.items(): + if node.nodeType in nodeTypes: + nodesByCategory[category] = node + for category, node in nodesByCategory.items(): + self.activeNodes.get(category).node = node + if category == 'sfm': + self.setSfm(node) + for node in nodes: + if not isinstance(node, CompatibilityNode): + self.activeNodes.get(node.nodeType).node = node def updateSfMResults(self): """ Update internal views, poses and solved intrinsics based on the current SfM node. """ - if not self._sfm: + if not self._sfm or ('outputViewsAndPoses' not in self._sfm.getAttributes().keys()): self._views = dict() self._poses = dict() self._solvedIntrinsics = dict() else: - self._views, self._poses, self._solvedIntrinsics = self._sfm.nodeDesc.getResults(self._sfm) + self._views, self._poses, self._solvedIntrinsics = parseSfMJsonFile(self._sfm.outputViewsAndPoses.value) self.sfmReportChanged.emit() def getSfm(self): @@ -895,9 +1005,6 @@ def setSfm(self, node): self._sfm.destroyed.disconnect(self._unsetSfm) self._setSfm(node) - self.texturing = self.lastNodeOfType("Texturing", self._sfm, Status.SUCCESS) - self.prepareDenseScene = self.lastNodeOfType("PrepareDenseScene", self._sfm, Status.SUCCESS) - @Slot(QObject, result=bool) def isInViews(self, viewpoint): if not viewpoint: @@ -957,7 +1064,11 @@ def setSelectedViewpoint(self, viewpointAttribute): def reconstructedCamerasCount(self): """ Get the number of reconstructed cameras in the current context. """ - return len([v for v in self.getViewpoints() if self.isReconstructed(v)]) + viewpoints = self.getViewpoints() + # Check that the object is iterable to avoid error with undefined Qt Property + if not isinstance(viewpoints, Iterable): + return 0 + return len([v for v in viewpoints if self.isReconstructed(v)]) @Slot(QObject, result="QVariant") def getSolvedIntrinsics(self, viewpoint): @@ -1000,29 +1111,11 @@ def getPoseRT(self, viewpoint): sfmChanged = Signal() sfm = Property(QObject, getSfm, setSfm, notify=sfmChanged) - featureExtractionChanged = Signal() - featureExtraction = makeProperty(QObject, "_featureExtraction", featureExtractionChanged, resetOnDestroy=True) - - featureMatchingChanged = Signal() - featureMatching = makeProperty(QObject, "_featureMatching", featureMatchingChanged, resetOnDestroy=True) - sfmReportChanged = Signal() # convenient property for QML binding re-evaluation when sfm report changes sfmReport = Property(bool, lambda self: len(self._poses) > 0, notify=sfmReportChanged) sfmAugmented = Signal(Node, Node) - prepareDenseSceneChanged = Signal() - prepareDenseScene = makeProperty(QObject, "_prepareDenseScene", notify=prepareDenseSceneChanged, resetOnDestroy=True) - - depthMapChanged = Signal() - depthMap = makeProperty(QObject, "_depthMap", depthMapChanged, resetOnDestroy=True) - - texturingChanged = Signal() - texturing = makeProperty(QObject, "_texturing", notify=texturingChanged) - - ldr2hdrChanged = Signal() - ldr2hdr = makeProperty(QObject, "_ldr2hdr", notify=ldr2hdrChanged, resetOnDestroy=True) - nbCameras = Property(int, reconstructedCamerasCount, notify=sfmReportChanged) # Signals to propagate high-level messages diff --git a/tests/test_graph.py b/tests/test_graph.py index c7fa6e4da5..b2793d05c8 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -180,7 +180,7 @@ def test_graph_reverse_dfs(): nodes = graph.nodesFromNode(B)[0] assert set(nodes) == {B, D, C, E, F} # Get all nodes of type AppendText from B - nodes = graph.nodesFromNode(B, filterType='AppendText')[0] + nodes = graph.nodesFromNode(B, filterTypes=['AppendText'])[0] assert set(nodes) == {B, D, C, F} # Get all nodes from C (order guaranteed) nodes = graph.nodesFromNode(C)[0]