source: TI01-discovery/branches/ingestAutomation-upgrade/OAIBatch/PostgresRecord.py @ 5846

Subversion URL: http://proj.badc.rl.ac.uk/svn/ndg/TI01-discovery/branches/ingestAutomation-upgrade/OAIBatch/PostgresRecord.py@5846
Revision 5846, 24.1 KB checked in by sdonegan, 10 years ago (diff)

Fixed bug so handles special chars in dataset title and handling of invalid keyword args

RevLine 
[3800]1#!/usr/bin/env python
2'''
[3967]3Class representing the a document to be ingested into the postgres DB table
[3800]4C Byrom Apr 08
5'''
[4854]6from xml.etree import cElementTree
[5248]7import os, sys, logging, re, pkg_resources
[5167]8import csml.csml2Moles.molesReadWrite as MRW
[4854]9from ndg.common.src.models.ndgObject import ndgObject
[5167]10from ndg.common.src.lib.ndgresources import ndgResources
[4854]11import ndg.common.src.lib.fileutilities as FileUtilities
[3847]12from SpatioTemporalData import SpatioTemporalData
[3869]13import keywordAdder
[3800]14
[5248]15SAXON_JAR_FILE = 'lib/saxon9.jar'
16
[3800]17class PostgresRecord:
18    '''
[3967]19    Class representing the a document to be ingested into the postgres DB table
[3800]20    @param filename: Name of file to use a metadata record
[3967]21    @param ndg_dataprovider
22    @param datacentre_groups
23    @param datacentre_namespace
24    @param discovery_id
25    @param xq
26    @param doctype - type of doc to process
[3800]27    '''
[3912]28    # TODO MDIP transforms do not work very well for lots of files - so currently hiding these
[5252]29    documentTypes = ['MOLES', 'DIF', 'DC', 'ISO19139']#, 'MDIP']
[3967]30   
31    # vocab server - used for finding scope values in the moles files
32    ndg_data_provider_vocab = "http://vocab.ndg.nerc.ac.uk/term/N010"
[3809]33       
[5415]34    #def __init__(self, filename, ndg_dataprovider, datacentre_groups, datacentre_namespace, discovery_id, xq, docType):
[5524]35    def __init__(self, filename, ndg_dataprovider, datacentre_groups, datacentre_namespace, discovery_id,datasetName,datacentreName,datasetLastEditUpdateDate,datasetStartDateNom, datasetEndDateNom, xq, docType):
[5415]36                 
[3816]37        logging.info("Setting up Postgres record for file, " + filename)
38        self.filename = filename
[3821]39   
40        # NB, if we're dealing with an NDG data provider, the details are slightly different
41        if ndg_dataprovider:
42            discObj=ndgObject(discovery_id)
43            self._local_id = discObj.localID
44            self._repository_local_id = discObj.repository
45        else:
46            self._local_id = discovery_id
47            self._repository_local_id = datacentre_namespace
48           
[3816]49        self._datacentre_groups = datacentre_groups
[3809]50        self._repository = datacentre_namespace
51        self.discovery_id = discovery_id
52        self._xq = xq
[5252]53        # simplify processing by uppercasing format at initialisation
54        self.docType = docType.upper()   
[5415]55       
[5846]56        #make sure we escape any special characters in this field... SJD 20/10/09       
57        self.dataset_name = self.escapeSpecialCharacters(datasetName)
58        self.datacentre_name = self.escapeSpecialCharacters(datacentreName)
59       
60        self.dataset_lastEdit = datasetLastEditUpdateDate   
[5464]61        self.datasetStartNom = datasetStartDateNom
[5524]62        self.datasetEndNom = datasetEndDateNom
[3816]63
64        self._molesFormat = None    # initialise this, so we can guarantee a value - to avoid using getattr
[3809]65        self._allDocs = []  # array to store all the transformed docs - for easy retrieval by the DAO
66
[3821]67        # get the dir of the file - needed by the xquery to use as the target collection
68        tmp = filename.split('/')
69        self._dir = '/'.join(tmp[0:len(tmp)-1])
[4854]70        self.shortFilename = tmp[-1]
[3821]71       
72        # dir to store a temp copy of the moles file, when produced - for use by other transforms
73        self._molesDir = None
[3967]74        # object to hold the moles file - this will be loaded in when it is created - in order to extract
75        # spatiotemporal data, etc
76        self.dgMeta = None
[3821]77
[3800]78        # firstly load contents of file
[3809]79        self.originalFormat = file(filename).read()
80       
[3860]81        # escape any apostrophes
82        self.originalFormat = self.escapeSpecialCharacters(self.originalFormat)
83
[3800]84        # initialise the various record fields
85        self.db_id = None    # the DB ID of the record, for easy reference when it is created
[3809]86        self.molesFormat = None
87        self.dcFormat = None
88        self.mdipFormat = None
89        self.iso19139Format = None
[3853]90        self.scn = 1    # system change number - keeps track of number of mods to a particular row
[3809]91       
[3862]92        # spatiotemporal data object
93        self.stData = None
[3967]94       
95        # fields to hold author, parameter and scope data
96        self.authors = None
97        self.parameters = None
98        self.scope = None
[3800]99
[3860]100    def escapeSpecialCharacters(self, inputString):
101        '''
102        Adjust the input string to escape any characters that would interfere with string or DB
103        operations
104        @param inputString: string to correct
105        @return: corrected string
106        '''
107        return re.sub(r'\'', '\\\'', inputString)
[3972]108
109
110    def unescapeSpecialCharacters(self, inputString):
111        '''
112        Adjust the input string to remove escaped characters that would interfere with string or DB
113        operations
114        @param inputString: string to correct
115        @return: corrected string
116        '''
117        str = re.sub(r'%20', ' ', inputString)
118        return 
[3816]119   
[3860]120   
[3816]121    def doRecordTransforms(self):
122        '''
123        Run various transforms on the original doc, to populate the record with
124        the other types of doc used elsewhere
125        '''
126        logging.info("Running transforms for all document types")
127        for docType in self.documentTypes:
128            self.getDocumentFormat(docType)
[3821]129           
[3816]130        logging.info("Transforms complete")
[3800]131
[3816]132
[3821]133    def createMolesFile(self):
134        '''
135        Check if a moles file exists on the system; if not, assume the moles transform has not
136        been ran and then produce this file - to allow for use in the various xqueries
137        '''
138        logging.info("Creating moles file on system - for use with other xquery transforms")
139        self._molesDir = self._dir + "/moles/"
[4423]140        FileUtilities.setUpDir(self._molesDir)
[3821]141       
142        if self._molesFormat is None:
143            self.doMolesTransform()
144           
[4423]145        FileUtilities.createFile(self._molesDir + self.shortFilename, self._molesFormat)
[3821]146        logging.info("Moles file created - at %s" %self._molesDir)
[3967]147       
148        # now load this moles file, for use when parsing out spatiotemporal, author and parameters data later on       
[3972]149        molesFile = self._molesDir + self.shortFilename
[3967]150        logging.info('Retrieving spatiotemporal info from moles file, %s' %molesFile)
151       
152        # load in the moles file and put this into an object for direct access to the xml elements
[5167]153       
[3967]154        self.dgMeta=MRW.dgMetadata()
155        try:
156            self.dgMeta.fromXML(cElementTree.ElementTree(file=molesFile).getroot())
157        except Exception, detail:
158            raise SystemError, 'Cannot parse the XML moles document %s. Detail:\n%s' %(molesFile, detail)
159
[3821]160
[3816]161    def doTransform(self, xQueryType):
[3800]162        '''
[3809]163        Transform the record according to the specified XQuery type
164        @param xQueryType: XQuery doc to use to do the transform
165        @return: the metadata record in the required transformed format
[3800]166        '''
[3816]167        logging.info("Running XQuery transform, " + xQueryType + " to create transformed document")
[3800]168
[3821]169        # firstly, check if this is a moles -> something else query; if so, ensure there is a valid
170        # moles file available for the transform - and use the correct dir for the xquery collection
171        dir = self._dir
172        if xQueryType.find('moles2') > -1:
173            if self._molesDir is None:
174                self.createMolesFile()
175               
176            dir = self._molesDir
177           
[3816]178        # get the query and set this up to use properly
[5167]179       
180        #xquery = self._xq.actual(xQueryType, dir, self._repository_local_id, self._local_id)
181        #SJD - added this bit in (missed?) to upgrade to ndgCommon.
182        self.xqueryLib = ndgResources()       
183        xquery = self.xqueryLib.createXQuery(xQueryType,dir, self._repository_local_id, self._local_id)
184     
[3816]185        # sort out the input ID stuff
[3809]186        xquery=xquery.replace('Input_Entry_ID', self.discovery_id)
[3816]187        xquery=xquery.replace('repository_localid', self._repository)
[3800]188
[3821]189        # strip out the eXist reference to the libraries; these files should be available in the
190        # running dir - as set up by oai_ingest.py
191        xquery=xquery.replace('xmldb:exist:///db/xqueryLib/Vocabs/', '')
192        xquery=xquery.replace('xmldb:exist:///db/xqueryLib/Utilities/', '')
193
[3816]194        # write the query to file, to make it easier to input
195        # NB, running directly at the command line leads to problems with the interpretation of $ characters
[5167]196        xqFile = "currentQuery" + xQueryType + ".xq" 
[4423]197        FileUtilities.createFile(xqFile, xquery)
[5248]198       
199        # ensure the jar file is available - NB, this may be running from a different
200        # location - e.g. the OAIInfoEditor.lib.harvester - and this won't have the
201        # saxon file directly on its filesystem
202        jarFile = pkg_resources.resource_filename('OAIBatch', SAXON_JAR_FILE)
[3816]203
[3809]204        # Now do the transform
[3816]205        os.putenv ('PATH', ':/usr/java/jdk1.5.0_03/bin:/usr/java/jdk1.5.0_03:/usr/java/jdk1.5.0_03/lib/tools.jar:/usr/local/WSClients/OAIBatch:/usr/local/exist-client/bin:/bin:/usr/bin:.')
[5248]206        xqCommand = "java -cp %s net.sf.saxon.Query %s !omit-xml-declaration=yes" %(jarFile, xqFile)
[3816]207        logging.debug("Running saxon command: " + xqCommand)
208        pipe = os.popen(xqCommand + " 2>&1")
209        output = pipe.read()
210        status = pipe.close()
[3800]211
[3816]212        if status is not None:
[3846]213            raise SystemError, 'Failed at running the XQuery'
[3809]214
[3816]215        # now remove the temp xquery file
[5167]216        '''status = os.unlink(xqFile)
[3816]217        if status is not None:
[5167]218            raise OSError, 'Failed to remove the temporary xquery file, ' + xqFile'''
[3816]219       
220        logging.info("Transform completed successfully")
[3860]221
[3816]222        return output
[3809]223
[3816]224
225    def doMolesTransform(self):
[3800]226        '''
[3816]227        Set up the basic moles doc - according to the type of document first ingested
228        '''
229        logging.info("Creating moles document - for use with other transforms")
230        xqName = None
231        if self.docType == "DIF":
232            xqName = "dif2moles"
233        elif self.docType == "MDIP":
234            xqName = "mdip2moles"
235        else:
[3912]236            raise TypeError, "ERROR: No XQuery exists to transform input document type, %s, into moles format" \
237                     %self.docType
[3816]238
[3869]239        # now run the appropriate transform and set the attribute
240        setattr(self, "_molesFormat", self.doTransform(xqName))
241
[3816]242        # add keywords, if required
[4257]243        if self._datacentre_groups:
[3869]244            self.addKeywords()
[3860]245       
246        # escape any apostrophes
247        self._molesFormat = self.escapeSpecialCharacters(self._molesFormat)
248
[3816]249        logging.info("moles document created")
250       
251
252    def addKeywords(self):
253        '''
254        If datacentre groups have been specified, these need to be added as keywords
255        - NB, this is rather clumsy approach but uses old code to achieve the result
256        '''
257        logging.info("Adding datacentre keywords to moles file")
[3869]258
[3816]259        # NB, use temporary directories to do the keyword additions
[3869]260        tmpDir = os.getcwd() + "/tmp/"
261        tmpKeywordsDir = os.getcwd() + "/keywordsAdded/"
[4423]262        FileUtilities.setUpDir(tmpDir)
263        FileUtilities.setUpDir(tmpKeywordsDir)
[3816]264        tmpFile = 'tmpFile.xml'
[4423]265        FileUtilities.createFile(tmpDir + tmpFile, self._molesFormat)
[3816]266
[3869]267        keywordAdder.main(tmpDir, tmpKeywordsDir, self._datacentre_groups)
[3816]268
269        # Now load in the converted file
270        f=open(tmpKeywordsDir + "/" + tmpFile, 'r')
271        self._molesFormat = f.read()
272        f.close
273       
274        # Finally, tidy up temp dirs
[4423]275        FileUtilities.cleanDir(tmpDir)
276        FileUtilities.cleanDir(tmpKeywordsDir)
[3816]277        logging.info("Completed adding keywords")
278       
279
280    def getDocumentFormat(self, docType):
281        '''
[3809]282        Lookup document format; if it is already defined then return it, else do the required XQuery
[3816]283        transform.  NB, transforms are ran on the molesFormat document - so ensure this is available
[3809]284        @param docType: format of document to return
[3800]285        '''
[3816]286        logging.info("Retrieving document type, " + docType)
[3809]287        xqName = {'DIF':'moles2dif', 'MOLES':'moles', 'DC':'moles2DC', 'MDIP':'moles2mdip', 'ISO19139':'moles2iso19139'}[docType]
288        attributeName = {'DIF':'_difFormat', 'MOLES':'_molesFormat', 'DC':'_dcFormat', 'MDIP':'_mdipFormat', 'ISO19139':'_iso19139Format'}[docType]
[3800]289       
[3816]290        # check we have the moles format available; if not create it
291        if self._molesFormat is None:
292            self.doMolesTransform()
[3821]293            self.createMolesFile()
[3816]294       
[3809]295        # check the document isn't already defined
[3816]296        try:
297            doc = getattr(self, attributeName)
298            if doc is not None:
299                logging.info("Found existing document - returning this now")
300                return doc
301        except:
[3821]302            logging.info("Document not available - creating new transformed document")
[3800]303
[3809]304        # the doc type doesn't exist - so run the xquery
[3862]305        transformedDoc = self.doTransform(xqName)
306        setattr(self, attributeName, transformedDoc)
307        return transformedDoc
[3809]308       
309   
[3816]310    def getAllDocs(self):
[3800]311        '''
[3809]312        Return a list of all the available doc types in the record
[3800]313        '''
[3862]314        # if the stored docs array is the same size as the array of all doc types
315        # assume all transforms have been done - and just return these
316        if len(self._allDocs) == len(self.documentTypes):
[3809]317            return self._allDocs
318       
[3846]319        for docType in self.documentTypes:
320            self._allDocs.append([docType, self.getDocumentFormat(docType)])
[3862]321
[3809]322        return self._allDocs
[3800]323       
[3862]324   
325    def getTemporalData(self):
326        '''
327        Retrieves the temporal data for the record; if this hasn't been discovered yet,
328        do the necessary parsing
329        @return: TimeRange object array with temporal data
330        '''
331        if self.stData is None:
332            self.getSpatioTemporalData()
[4257]333       
[3862]334        return self.stData.getTemporalData()
335       
336   
337    def getSpatialData(self):
338        '''
339        Retrieves the spatial data for the record; if this hasn't been discovered yet,
340        do the necessary parsing
341        @return: Coords object array with spatial data
342        '''
343        if self.stData is None:
344            self.getSpatioTemporalData()
345       
346        return self.stData.getSpatialData()
347       
[3809]348
[3839]349    def listify(self, item):
[3809]350        '''
351        listify checks if an item is a list, if it isn't it puts it
352        inside a list and returns it. Always returns a list object.
353        @param item: object to check
354        @return: item as a list object
355        '''
356        if type(item) is list:
357            return item
358        else:
359            return [item]
360       
[3800]361   
362    def getSpatioTemporalData(self):
363        '''
364        Extract spatio temporal data from the original document
365        '''
[3967]366        logging.info('Retrieving spatiotemporal info from moles file')
[3847]367        # initialise the various spatiotemporal arrays used to extract data to
368        self.stData = SpatioTemporalData()
[3800]369       
[3967]370        if self.dgMeta is None:
371            self.createMolesFile()
372           
[3847]373        # do quick checks to see if the relevant data exists
[3967]374        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary:
[3847]375            logging.info("No data summary elements found - assuming no spatiotemporal data available")
376            return
[3839]377       
[3967]378        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage:
[3847]379            logging.info("No data coverage elements found - assuming no spatiotemporal data available")
380            return
381       
[3967]382        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgSpatialCoverage:
[3847]383            logging.info("No spatial coverage elements found - assuming no spatial data available")
384        else:
[3967]385            self.getCoordData(self.dgMeta)
[3847]386
[4223]387        #SJD error with line below- this is where 23/09/08 edit in PostgresDAO fudge sorts...
[3967]388        if not self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgTemporalCoverage:
[3847]389            logging.info("No temporal coverage elements found - assuming no temporal data available")
390        else:
[3967]391            self.getTimeRangeData(self.dgMeta)
[3847]392
[3967]393   
394    def getAuthorsInfo(self):
395        '''
396        Extract authors info from the moles file
397        '''
398        logging.info('Retrieving authors info from moles file')
399       
400        if self.dgMeta is None:
401            self.createMolesFile()
402           
403        logging.info("Extracting author info")
404        creators = ""
405        authors = ""
406        try:
[3972]407            # TODO: check this is the correct path for author data - NB, this is not obvious from example files
408            # nb, if this is correct, need to escape out the %20 and %3 characters else it doesn't work - see unescape.. fn
409            creators = self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataRoles.dgDataCreator.dgRoleHolder.dgMetadataID.localIdentifier
[3967]410            logging.info("Found creator information - adding this to authors record")
411           
412        except Exception, detail:
413            logging.info("Exception thrown whilst trying to find creator information:")
414            logging.info(detail)
415            logging.info("- this suggests document does not contain creator information.")
[3847]416
[3967]417        try:
418            authors = self.dgMeta.dgMetadataRecord.dgMetadataDescription.abstract.abstractOnlineReference.dgCitation.authors
419            logging.info("Found cited author information - adding this to authors record")
420           
421        except Exception, detail:
422            logging.info("Exception thrown whilst trying to find cited author information:")
423            logging.info(detail)
424            logging.info("- this suggests document does not contain cited author information.")
[3972]425       
[3967]426        self.authors = authors + " " + creators
427        return self.authors
428   
429   
430    def getParametersInfo(self):
431        '''
432        Extract parameters info from the moles file
433        '''
434        logging.info('Retrieving parameters info from moles file')
435       
436        if self.dgMeta is None:
437            self.createMolesFile()
438           
439        params = ""
440        try:
[3972]441            # TODO: check this is the correct path for parameters data - NB, this is not obvious from example files
442            parameters = self.dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgParameterSummary.dgStdParameterMeasured
443            parameters_list = self.listify(parameters)
444            for parameter in parameters_list:
445                if parameters.dgValidTerm:
446                    logging.info("Found parameter information - adding this to record")
447                    params += " " + parameters.dgValidTerm
[3967]448           
[3972]449           
[3967]450        except Exception, detail:
451            logging.info("Exception thrown whilst trying to find parameter information:")
452            logging.info(detail)
453            logging.info("- this suggests document does not contain parameter information.")
454       
455        self.parameters = params
456        return self.parameters
457   
458   
459    def getScopeInfo(self):
460        '''
461        Extract scope info from the moles file
462        '''
463        logging.info('Retrieving scope info from moles file')
464       
465        if self.dgMeta is None:
466            self.createMolesFile()
467           
468        scope = ""
469        try:
470            keywords = self.dgMeta.dgMetadataRecord.dgStructuredKeyword
471            logging.info("Found keyword information - parsing this for scope")
472
473            keywords_list = self.listify(keywords)
474            for keyword in keywords_list:
475                if keyword.dgValidTermID:
476                    if keyword.dgValidTermID.ParentListID.strip().startswith(self.ndg_data_provider_vocab):
477                        logging.info("Found scope value - adding this to record")
478                        scope += " " + keyword.dgValidTerm.strip()
479           
480        except Exception, detail:
481            logging.info("Exception thrown whilst trying to find scope information:")
482            logging.info(detail)
483            logging.info("- this suggests document does not contain scope information.")
[3972]484
485        # NB, to_tsvector will remove any underscores -leading to, e.g. NERC_DDC becoming tokenised as 'NERC' and 'DDC'
486        # - to avoid this, use the following delimiter
487        self.scope = re.sub(r'_', 'UNDERSCORE', scope)
[3967]488        return self.scope
489           
490           
[3847]491    def getTimeRangeData(self, dgMeta):
492        '''
493        Parse an xml tree and add any time range data found
494        @param dgMeta: xml fragment for the time range
495        '''
496        logging.info("Extracting time range info")
[3800]497        try:
[3847]498            dates = dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgTemporalCoverage.DateRange
499           
500            if not dates:
501                logging.info("No temporal info found for document")
502               
503            dates_list = self.listify(dates)
504            for date in dates_list:
505                startdate=date.DateRangeStart
506                enddate= date.DateRangeEnd
507                if startdate==None or startdate=='None':
[3853]508                    startdate="null"
[3847]509                if enddate==None or enddate=='None':
[3853]510                    enddate="null"
[3847]511                   
512                self.stData.addTimeRange(startdate, enddate)
513                logging.info("Temporal info: startdate " + \
514                             startdate + ", enddate " + enddate) 
[3839]515        except Exception, detail:
[3847]516            logging.info("Document does not contain temporal info.")
517            logging.info(detail)
[3800]518
[3847]519       
520    def getCoordData(self, dgMeta):
521        '''
522        Parse an xml tree and add any coord data found
523        @param dgMeta: xml fragment for the bounding boxes
524        '''
525        logging.info("Extracting bounding box info")
[3800]526        try:
527
[3847]528            bboxes = dgMeta.dgMetadataRecord.dgDataEntity.dgDataSummary.dgDataCoverage.dgSpatialCoverage.BoundingBox
529           
530            if not bboxes:
531                logging.info("No bounding box info found for document")
532                return
[4257]533               
[3847]534            bbox_list=self.listify(bboxes)
535            #parse the list of coordinates
536            for bbox in bbox_list:
537                north = self.parseCoord(bbox.LimitNorth, 'S', 'N')
538                south = self.parseCoord(bbox.LimitSouth, 'S', 'N')
539                east = self.parseCoord(bbox.LimitEast, 'W', 'E')
540                west = self.parseCoord(bbox.LimitWest, 'W', 'E')
541                self.stData.addCoords(north, south, east, west)
542                logging.info("Spatial info: west= " + west + ",south " + south + ", east " + \
543                    east + ", north " + north + "")
544               
545        except Exception, detail:
546            logging.warning("Problem encountered whilst parsing bounding box info - this may lead \n" + \
547                            "to an incomplete set of metadata being ingested. \nDetail: %s" %detail)
[3800]548
549
[3839]550    def parseCoord(self, coordValue, minField, maxField):
551        '''
552        Take a coordinate value extracted from a molefile bbox limit - together with
553        the appropriate max/min limits and extract the correct value from it
554        @param coordValue: the contents of the bbox limit tage
555        @param minField: the expected min field of the coord range - i.e. 'W' or 'S'
556        @param maxField: the expected max field of the coord range - i.e. 'E' or 'N'
557        @return: coord - the value of the coordinate as a string   
558        '''
[3847]559        logging.debug("Parsing document coordinates")
[3846]560        try:
561            coord = coordValue.strip()
562            if coord.endswith(maxField):
563                coord=coordValue.split(maxField)[0]
564            elif coord.endswith(minField):
565                if coord.startswith('-'):
566                    coord = coordValue.split(minField)[0]
567                else:
568                    coord = "-" + coordValue.split(minField)[0]
569   
570            return '%s' % float(coord)
571        except:
572            raise SyntaxError, 'Will not process File: contains incorrect bounding box limit: ' + coordValue
[3839]573
[3800]574           
[3809]575    def hasNullCoords():
[3839]576        '''
577        Checks a record to determine whether it has any coordinates set to null
578        '''
[3809]579        if str(self.west)=='null' or \
580            str(self.south)=='null' or \
581            str(self.east)=='null' or \
582            str(self.north)=='null':
583            return True;
584        else:
585            return False;
[3800]586       
Note: See TracBrowser for help on using the repository browser.