source: TI01-discovery/branches/ingestAutomation-upgrade/OAIBatch/PostgresRecord.py @ 6133

Subversion URL: http://proj.badc.rl.ac.uk/svn/ndg/TI01-discovery/branches/ingestAutomation-upgrade/OAIBatch/PostgresRecord.py@6133
Revision 6133, 24.1 KB checked in by sdonegan, 10 years ago (diff)

Further debugs w.r.t non local file handling

RevLine 
[3800]1#!/usr/bin/env python
2'''
[3967]3Class representing the a document to be ingested into the postgres DB table
[3800]4C Byrom Apr 08
5'''
[4854]6from xml.etree import cElementTree
[5248]7import os, sys, logging, re, pkg_resources
[5167]8import csml.csml2Moles.molesReadWrite as MRW
[4854]9from ndg.common.src.models.ndgObject import ndgObject
[5167]10from ndg.common.src.lib.ndgresources import ndgResources
[4854]11import ndg.common.src.lib.fileutilities as FileUtilities
[3847]12from SpatioTemporalData import SpatioTemporalData
[3869]13import keywordAdder
[3800]14
[5248]15SAXON_JAR_FILE = 'lib/saxon9.jar'
16
[3800]17class PostgresRecord:
18    '''
[3967]19    Class representing the a document to be ingested into the postgres DB table
[3800]20    @param filename: Name of file to use a metadata record
[3967]21    @param ndg_dataprovider
22    @param datacentre_groups
23    @param datacentre_namespace
24    @param discovery_id
25    @param xq
26    @param doctype - type of doc to process
[3800]27    '''
[3912]28    # TODO MDIP transforms do not work very well for lots of files - so currently hiding these
[5252]29    documentTypes = ['MOLES', 'DIF', 'DC', 'ISO19139']#, 'MDIP']
[3967]30   
31    # vocab server - used for finding scope values in the moles files
32    ndg_data_provider_vocab = "http://vocab.ndg.nerc.ac.uk/term/N010"
[3809]33       
[5415]34    #def __init__(self, filename, ndg_dataprovider, datacentre_groups, datacentre_namespace, discovery_id, xq, docType):
[6133]35    def __init__(self, filename, ndg_dataprovider, datacentre_groups, datacentre_namespace, discovery_id,datasetName,datacentreName,datasetLastEditUpdateDate,datasetStartDateNom, datasetEndDateNom, xq, docType,baseDir):
[5415]36                 
[3816]37        logging.info("Setting up Postgres record for file, " + filename)
38        self.filename = filename
[3821]39   
40        # NB, if we're dealing with an NDG data provider, the details are slightly different
41        if ndg_dataprovider:
42            discObj=ndgObject(discovery_id)
43            self._local_id = discObj.localID
44            self._repository_local_id = discObj.repository
45        else:
46            self._local_id = discovery_id
47            self._repository_local_id = datacentre_namespace
48           
[3816]49        self._datacentre_groups = datacentre_groups
[3809]50        self._repository = datacentre_namespace
51        self.discovery_id = discovery_id
52        self._xq = xq
[6133]53        self.base_dir = baseDir
54       
[5252]55        # simplify processing by uppercasing format at initialisation
56        self.docType = docType.upper()   
[5415]57       
[5846]58        #make sure we escape any special characters in this field... SJD 20/10/09       
59        self.dataset_name = self.escapeSpecialCharacters(datasetName)
60        self.datacentre_name = self.escapeSpecialCharacters(datacentreName)
61       
62        self.dataset_lastEdit = datasetLastEditUpdateDate   
[5464]63        self.datasetStartNom = datasetStartDateNom
[5524]64        self.datasetEndNom = datasetEndDateNom
[3816]65
66        self._molesFormat = None    # initialise this, so we can guarantee a value - to avoid using getattr
[3809]67        self._allDocs = []  # array to store all the transformed docs - for easy retrieval by the DAO
68
[3821]69        # get the dir of the file - needed by the xquery to use as the target collection
70        tmp = filename.split('/')
71        self._dir = '/'.join(tmp[0:len(tmp)-1])
[4854]72        self.shortFilename = tmp[-1]
[3821]73       
74        # dir to store a temp copy of the moles file, when produced - for use by other transforms
75        self._molesDir = None
[3967]76        # object to hold the moles file - this will be loaded in when it is created - in order to extract
77        # spatiotemporal data, etc
78        self.dgMeta = None
[3821]79
[3800]80        # firstly load contents of file
[3809]81        self.originalFormat = file(filename).read()
82       
[3860]83        # escape any apostrophes
84        self.originalFormat = self.escapeSpecialCharacters(self.originalFormat)
85
[3800]86        # initialise the various record fields
87        self.db_id = None    # the DB ID of the record, for easy reference when it is created
[3809]88        self.molesFormat = None
89        self.dcFormat = None
90        self.mdipFormat = None
91        self.iso19139Format = None
[3853]92        self.scn = 1    # system change number - keeps track of number of mods to a particular row
[3809]93       
[3862]94        # spatiotemporal data object
95        self.stData = None
[3967]96       
97        # fields to hold author, parameter and scope data
98        self.authors = None
99        self.parameters = None
100        self.scope = None
[3800]101
[3860]102    def escapeSpecialCharacters(self, inputString):
103        '''
104        Adjust the input string to escape any characters that would interfere with string or DB
105        operations
106        @param inputString: string to correct
107        @return: corrected string
108        '''
109        return re.sub(r'\'', '\\\'', inputString)
[3972]110
111
112    def unescapeSpecialCharacters(self, inputString):
113        '''
114        Adjust the input string to remove escaped characters that would interfere with string or DB
115        operations
116        @param inputString: string to correct
117        @return: corrected string
118        '''
119        str = re.sub(r'%20', ' ', inputString)
120        return 
[3816]121   
[3860]122   
[3816]123    def doRecordTransforms(self):
124        '''
125        Run various transforms on the original doc, to populate the record with
126        the other types of doc used elsewhere
127        '''
128        logging.info("Running transforms for all document types")
129        for docType in self.documentTypes:
130            self.getDocumentFormat(docType)
[3821]131           
[3816]132        logging.info("Transforms complete")
[3800]133
[3816]134
[3821]135    def createMolesFile(self):
136        '''
137        Check if a moles file exists on the system; if not, assume the moles transform has not
138        been ran and then produce this file - to allow for use in the various xqueries
139        '''
140        logging.info("Creating moles file on system - for use with other xquery transforms")
141        self._molesDir = self._dir + "/moles/"
[4423]142        FileUtilities.setUpDir(self._molesDir)
[3821]143       
144        if self._molesFormat is None:
145            self.doMolesTransform()
146           
[4423]147        FileUtilities.createFile(self._molesDir + self.shortFilename, self._molesFormat)
[3821]148        logging.info("Moles file created - at %s" %self._molesDir)
[3967]149       
150        # now load this moles file, for use when parsing out spatiotemporal, author and parameters data later on       
[3972]151        molesFile = self._molesDir + self.shortFilename
[3967]152        logging.info('Retrieving spatiotemporal info from moles file, %s' %molesFile)
153       
154        # load in the moles file and put this into an object for direct access to the xml elements
[5167]155       
[3967]156        self.dgMeta=MRW.dgMetadata()
157        try:
158            self.dgMeta.fromXML(cElementTree.ElementTree(file=molesFile).getroot())
159        except Exception, detail:
160            raise SystemError, 'Cannot parse the XML moles document %s. Detail:\n%s' %(molesFile, detail)
161
[3821]162
[3816]163    def doTransform(self, xQueryType):
[3800]164        '''
[3809]165        Transform the record according to the specified XQuery type
166        @param xQueryType: XQuery doc to use to do the transform
167        @return: the metadata record in the required transformed format
[3800]168        '''
[3816]169        logging.info("Running XQuery transform, " + xQueryType + " to create transformed document")
[3800]170
[3821]171        # firstly, check if this is a moles -> something else query; if so, ensure there is a valid
172        # moles file available for the transform - and use the correct dir for the xquery collection
173        dir = self._dir
174        if xQueryType.find('moles2') > -1:
175            if self._molesDir is None:
176                self.createMolesFile()
177               
178            dir = self._molesDir
179           
[3816]180        # get the query and set this up to use properly
[5167]181       
182        #xquery = self._xq.actual(xQueryType, dir, self._repository_local_id, self._local_id)
183        #SJD - added this bit in (missed?) to upgrade to ndgCommon.
184        self.xqueryLib = ndgResources()       
185        xquery = self.xqueryLib.createXQuery(xQueryType,dir, self._repository_local_id, self._local_id)
186     
[3816]187        # sort out the input ID stuff
[3809]188        xquery=xquery.replace('Input_Entry_ID', self.discovery_id)
[3816]189        xquery=xquery.replace('repository_localid', self._repository)
[3800]190
[3821]191        # strip out the eXist reference to the libraries; these files should be available in the
192        # running dir - as set up by oai_ingest.py
193        xquery=xquery.replace('xmldb:exist:///db/xqueryLib/Vocabs/', '')
194        xquery=xquery.replace('xmldb:exist:///db/xqueryLib/Utilities/', '')
195
[3816]196        # write the query to file, to make it easier to input
197        # NB, running directly at the command line leads to problems with the interpretation of $ characters
[5167]198        xqFile = "currentQuery" + xQueryType + ".xq" 
[4423]199        FileUtilities.createFile(xqFile, xquery)
[5248]200       
201        # ensure the jar file is available - NB, this may be running from a different
202        # location - e.g. the OAIInfoEditor.lib.harvester - and this won't have the
203        # saxon file directly on its filesystem
204        jarFile = pkg_resources.resource_filename('OAIBatch', SAXON_JAR_FILE)
[3816]205
[3809]206        # Now do the transform
[3816]207        os.putenv ('PATH', ':/usr/java/jdk1.5.0_03/bin:/usr/java/jdk1.5.0_03:/usr/java/jdk1.5.0_03/lib/tools.jar:/usr/local/WSClients/OAIBatch:/usr/local/exist-client/bin:/bin:/usr/bin:.')
[5248]208        xqCommand = "java -cp %s net.sf.saxon.Query %s !omit-xml-declaration=yes" %(jarFile, xqFile)
[3816]209        logging.debug("Running saxon command: " + xqCommand)
210        pipe = os.popen(xqCommand + " 2>&1")
211        output = pipe.read()
212        status = pipe.close()
[3800]213
[3816]214        if status is not None:
[3846]215            raise SystemError, 'Failed at running the XQuery'
[3809]216
[3816]217        # now remove the temp xquery file
[5167]218        '''status = os.unlink(xqFile)
[3816]219        if status is not None:
[5167]220            raise OSError, 'Failed to remove the temporary xquery file, ' + xqFile'''
[3816]221       
222        logging.info("Transform completed successfully")
[3860]223
[3816]224        return output
[3809]225
[3816]226
227    def doMolesTransform(self):
[3800]228        '''
[3816]229        Set up the basic moles doc - according to the type of document first ingested
230        '''
231        logging.info("Creating moles document - for use with other transforms")
232        xqName = None
233        if self.docType == "DIF":
234            xqName = "dif2moles"
235        elif self.docType == "MDIP":
236            xqName = "mdip2moles"
237        else:
[3912]238            raise TypeError, "ERROR: No XQuery exists to transform input document type, %s, into moles format" \
239                     %self.docType
[3816]240
[3869]241        # now run the appropriate transform and set the attribute
242        setattr(self, "_molesFormat", self.doTransform(xqName))
243
[3816]244        # add keywords, if required
[4257]245        if self._datacentre_groups:
[3869]246            self.addKeywords()
[3860]247       
248        # escape any apostrophes
249        self._molesFormat = self.escapeSpecialCharacters(self._molesFormat)
250
[3816]251        logging.info("moles document created")
252       
253
254    def addKeywords(self):
255        '''
256        If datacentre groups have been specified, these need to be added as keywords
257        - NB, this is rather clumsy approach but uses old code to achieve the result
258        '''
259        logging.info("Adding datacentre keywords to moles file")
[3869]260
[3816]261        # NB, use temporary directories to do the keyword additions
[6133]262        tmpDir = self.baseDir + "/tmp/"
[3869]263        tmpKeywordsDir = os.getcwd() + "/keywordsAdded/"
[4423]264        FileUtilities.setUpDir(tmpDir)
265        FileUtilities.setUpDir(tmpKeywordsDir)
[3816]266        tmpFile = 'tmpFile.xml'
[4423]267        FileUtilities.createFile(tmpDir + tmpFile, self._molesFormat)
[3816]268
[3869]269        keywordAdder.main(tmpDir, tmpKeywordsDir, self._datacentre_groups)
[3816]270
271        # Now load in the converted file
272        f=open(tmpKeywordsDir + "/" + tmpFile, 'r')
273        self._molesFormat = f.read()
274        f.close
275       
276        # Finally, tidy up temp dirs
[4423]277        FileUtilities.cleanDir(tmpDir)
278        FileUtilities.cleanDir(tmpKeywordsDir)
[3816]279        logging.info("Completed adding keywords")
280       
281
282    def getDocumentFormat(self, docType):
283        '''
[3809]284        Lookup document format; if it is already defined then return it, else do the required XQuery
[3816]285        transform.  NB, transforms are ran on the molesFormat document - so ensure this is available
[3809]286        @param docType: format of document to return
[3800]287        '''
[3816]288        logging.info("Retrieving document type, " + docType)
[3809]289        xqName = {'DIF':'moles2dif', 'MOLES':'moles', 'DC':'moles2DC', 'MDIP':'moles2mdip', 'ISO19139':'moles2iso19139'}[docType]
290        attributeName = {'DIF':'_difFormat', 'MOLES':'_molesFormat', 'DC':'_dcFormat', 'MDIP':'_mdipFormat', 'ISO19139':'_iso19139Format'}[docType]
[3800]291       
[3816]292        # check we have the moles format available; if not create it
293        if self._molesFormat is None:
294            self.doMolesTransform()
[3821]295            self.createMolesFile()
[3816]296       
[3809]297        # check the document isn't already defined
[3816]298        try:
299            doc = getattr(self, attributeName)
300            if doc is not None:
301                logging.info("Found existing document - returning this now")
302                return doc
303        except:
[3821]304            logging.info("Document not available - creating new transformed document")
[3800]305
[3809]306        # the doc type doesn't exist - so run the xquery
[3862]307        transformedDoc = self.doTransform(xqName)
308        setattr(self, attributeName, transformedDoc)
309        return transformedDoc
[3809]310       
311   
[3816]312    def getAllDocs(self):
[3800]313        '''
[3809]314        Return a list of all the available doc types in the record
[3800]315        '''
[3862]316        # if the stored docs array is the same size as the array of all doc types
317        # assume all transforms have been done - and just return these
318        if len(self._allDocs) == len(self.documentTypes):
[3809]319            return self._allDocs
320       
[3846]321        for docType in self.documentTypes:
322            self._allDocs.append([docType, self.getDocumentFormat(docType)])
[3862]323
[3809]324        return self._allDocs
[3800]325       
[3862]326   
327    def getTemporalData(self):
328        '''
329        Retrieves the temporal data for the record; if this hasn't been discovered yet,
330        do the necessary parsing
331        @return: TimeRange object array with temporal data
332        '''
333        if self.stData is None:
334            self.getSpatioTemporalData()
[4257]335       
[3862]336        return self.stData.getTemporalData()
337       
338   
339    def getSpatialData(self):
340        '''
341        Retrieves the spatial data for the record; if this hasn't been discovered yet,
342        do the necessary parsing
343        @return: Coords object array with spatial data
344        '''
345        if self.stData is None:
346            self.getSpatioTemporalData()
347       
348        return self.stData.getSpatialData()
349       
[3809]350
[3839]351    def listify(self, item):
[3809]352        '''
353        listify checks if an item is a list, if it isn't it puts it
354        inside a list and returns it. Always returns a list object.
355        @param item: object to check
356        @return: item as a list object
357        '''
358        if type(item) is list:
359            return item
360        else:
361            return [item]
362       
[3800]363   
364    def getSpatioTemporalData(self):
365        '''
366        Extract spatio temporal data from the original document
367        '''
[3967]368        logging.info('Retrieving spatiotemporal info from moles file')
[3847]369        # initialise the various spatiotemporal arrays used to extract data to
370        self.stData = SpatioTemporalData()
[3800]371       
[3967]372        if self.dgMeta is None:
373            self.createMolesFile()
374           
[3847]375        # do quick checks to see if the relevant data exists
[3967]376        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary:
[3847]377            logging.info("No data summary elements found - assuming no spatiotemporal data available")
378            return
[3839]379       
[3967]380        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage:
[3847]381            logging.info("No data coverage elements found - assuming no spatiotemporal data available")
382            return
383       
[3967]384        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgSpatialCoverage:
[3847]385            logging.info("No spatial coverage elements found - assuming no spatial data available")
386        else:
[3967]387            self.getCoordData(self.dgMeta)
[3847]388
[4223]389        #SJD error with line below- this is where 23/09/08 edit in PostgresDAO fudge sorts...
[3967]390        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgTemporalCoverage:
[3847]391            logging.info("No temporal coverage elements found - assuming no temporal data available")
392        else:
[3967]393            self.getTimeRangeData(self.dgMeta)
[3847]394
[3967]395   
396    def getAuthorsInfo(self):
397        '''
398        Extract authors info from the moles file
399        '''
400        logging.info('Retrieving authors info from moles file')
401       
402        if self.dgMeta is None:
403            self.createMolesFile()
404           
405        logging.info("Extracting author info")
406        creators = ""
407        authors = ""
408        try:
[3972]409            # TODO: check this is the correct path for author data - NB, this is not obvious from example files
410            # nb, if this is correct, need to escape out the %20 and %3 characters else it doesn't work - see unescape.. fn
411            creators = self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataRoles.dgDataCreator.dgRoleHolder.dgMetadataID.localIdentifier
[3967]412            logging.info("Found creator information - adding this to authors record")
413           
414        except Exception, detail:
415            logging.info("Exception thrown whilst trying to find creator information:")
416            logging.info(detail)
417            logging.info("- this suggests document does not contain creator information.")
[3847]418
[3967]419        try:
420            authors = self.dgMeta.dgMetadataRecord.dgMetadataDescription.abstract.abstractOnlineReference.dgCitation.authors
421            logging.info("Found cited author information - adding this to authors record")
422           
423        except Exception, detail:
424            logging.info("Exception thrown whilst trying to find cited author information:")
425            logging.info(detail)
426            logging.info("- this suggests document does not contain cited author information.")
[3972]427       
[3967]428        self.authors = authors + " " + creators
429        return self.authors
430   
431   
432    def getParametersInfo(self):
433        '''
434        Extract parameters info from the moles file
435        '''
436        logging.info('Retrieving parameters info from moles file')
437       
438        if self.dgMeta is None:
439            self.createMolesFile()
440           
441        params = ""
442        try:
[3972]443            # TODO: check this is the correct path for parameters data - NB, this is not obvious from example files
444            parameters = self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgParameterSummary.dgStdParameterMeasured
445            parameters_list = self.listify(parameters)
446            for parameter in parameters_list:
447                if parameters.dgValidTerm:
448                    logging.info("Found parameter information - adding this to record")
449                    params += " " + parameters.dgValidTerm
[3967]450           
[3972]451           
[3967]452        except Exception, detail:
453            logging.info("Exception thrown whilst trying to find parameter information:")
454            logging.info(detail)
455            logging.info("- this suggests document does not contain parameter information.")
456       
457        self.parameters = params
458        return self.parameters
459   
460   
461    def getScopeInfo(self):
462        '''
463        Extract scope info from the moles file
464        '''
465        logging.info('Retrieving scope info from moles file')
466       
467        if self.dgMeta is None:
468            self.createMolesFile()
469           
470        scope = ""
471        try:
472            keywords = self.dgMeta.dgMetadataRecord.dgStructuredKeyword
473            logging.info("Found keyword information - parsing this for scope")
474
475            keywords_list = self.listify(keywords)
476            for keyword in keywords_list:
477                if keyword.dgValidTermID:
478                    if keyword.dgValidTermID.ParentListID.strip().startswith(self.ndg_data_provider_vocab):
479                        logging.info("Found scope value - adding this to record")
480                        scope += " " + keyword.dgValidTerm.strip()
481           
482        except Exception, detail:
483            logging.info("Exception thrown whilst trying to find scope information:")
484            logging.info(detail)
485            logging.info("- this suggests document does not contain scope information.")
[3972]486
487        # NB, to_tsvector will remove any underscores -leading to, e.g. NERC_DDC becoming tokenised as 'NERC' and 'DDC'
488        # - to avoid this, use the following delimiter
489        self.scope = re.sub(r'_', 'UNDERSCORE', scope)
[3967]490        return self.scope
491           
492           
[3847]493    def getTimeRangeData(self, dgMeta):
494        '''
495        Parse an xml tree and add any time range data found
496        @param dgMeta: xml fragment for the time range
497        '''
498        logging.info("Extracting time range info")
[3800]499        try:
[3847]500            dates = dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgTemporalCoverage.DateRange
501           
502            if not dates:
503                logging.info("No temporal info found for document")
504               
505            dates_list = self.listify(dates)
506            for date in dates_list:
507                startdate=date.DateRangeStart
508                enddate= date.DateRangeEnd
509                if startdate==None or startdate=='None':
[3853]510                    startdate="null"
[3847]511                if enddate==None or enddate=='None':
[3853]512                    enddate="null"
[3847]513                   
514                self.stData.addTimeRange(startdate, enddate)
515                logging.info("Temporal info: startdate " + \
516                             startdate + ", enddate " + enddate) 
[3839]517        except Exception, detail:
[3847]518            logging.info("Document does not contain temporal info.")
519            logging.info(detail)
[3800]520
[3847]521       
522    def getCoordData(self, dgMeta):
523        '''
524        Parse an xml tree and add any coord data found
525        @param dgMeta: xml fragment for the bounding boxes
526        '''
527        logging.info("Extracting bounding box info")
[3800]528        try:
529
[3847]530            bboxes = dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgSpatialCoverage.BoundingBox
531           
532            if not bboxes:
533                logging.info("No bounding box info found for document")
534                return
[4257]535               
[3847]536            bbox_list=self.listify(bboxes)
537            #parse the list of coordinates
538            for bbox in bbox_list:
539                north = self.parseCoord(bbox.LimitNorth, 'S', 'N')
540                south = self.parseCoord(bbox.LimitSouth, 'S', 'N')
541                east = self.parseCoord(bbox.LimitEast, 'W', 'E')
542                west = self.parseCoord(bbox.LimitWest, 'W', 'E')
543                self.stData.addCoords(north, south, east, west)
544                logging.info("Spatial info: west= " + west + ",south " + south + ", east " + \
545                    east + ", north " + north + "")
546               
547        except Exception, detail:
548            logging.warning("Problem encountered whilst parsing bounding box info - this may lead \n" + \
549                            "to an incomplete set of metadata being ingested. \nDetail: %s" %detail)
[3800]550
551
[3839]552    def parseCoord(self, coordValue, minField, maxField):
553        '''
554        Take a coordinate value extracted from a molefile bbox limit - together with
555        the appropriate max/min limits and extract the correct value from it
556        @param coordValue: the contents of the bbox limit tage
557        @param minField: the expected min field of the coord range - i.e. 'W' or 'S'
558        @param maxField: the expected max field of the coord range - i.e. 'E' or 'N'
559        @return: coord - the value of the coordinate as a string   
560        '''
[3847]561        logging.debug("Parsing document coordinates")
[3846]562        try:
563            coord = coordValue.strip()
564            if coord.endswith(maxField):
565                coord=coordValue.split(maxField)[0]
566            elif coord.endswith(minField):
567                if coord.startswith('-'):
568                    coord = coordValue.split(minField)[0]
569                else:
570                    coord = "-" + coordValue.split(minField)[0]
571   
572            return '%s' % float(coord)
573        except:
574            raise SyntaxError, 'Will not process File: contains incorrect bounding box limit: ' + coordValue
[3839]575
[3800]576           
[3809]577    def hasNullCoords():
[3839]578        '''
579        Checks a record to determine whether it has any coordinates set to null
580        '''
[3809]581        if str(self.west)=='null' or \
582            str(self.south)=='null' or \
583            str(self.east)=='null' or \
584            str(self.north)=='null':
585            return True;
586        else:
587            return False;
[3800]588       
Note: See TracBrowser for help on using the repository browser.