source: TI03-DataExtractor/branches/old_stuff/badc_extract.py @ 793

Subversion URL: http://proj.badc.rl.ac.uk/svn/ndg/TI03-DataExtractor/branches/old_stuff/badc_extract.py@793
Revision 793, 10.0 KB checked in by astephen, 13 years ago (diff)

Put all the old code in the old_stuff branch.

  • Property svn:executable set to *
Line 
1#!/usr/local/cdat/bin/python
2
3"""
4extract.py
5==========
6
7Main module for the extractor package.
8
9This module holds the main function that is used
10to control a user request and data extraction.
11
12Version history
13===============
14
15Version 1.0:  - 01/03/2004
16              - Ag Stephens, British Atmospheric Data Centre.
17              - First version.
18
19"""
20
21# Import required modules
22import string
23import sys
24# Add your location path for the dx module
25sys.path.append("/usr/local")
26#sys.path.insert(0, "/usr/local/cdat/lib")
27#sys.path.insert(0, "/usr/local/cdat/lib/python2.2")
28#sys.path.insert(0, "/usr/local/cdat/lib/python2.2/site-packages")
29#sys.path.insert(0, "/usr/local/cdat/lib/python2.2/site-packages/Numeric")
30sys.path.insert(0, "/usr/local/badcdat/lib")
31sys.path.insert(0, "/usr/local/badcdat/lib/python")
32
33import os
34os.environ["HOME"]="/home/tornado/internal/badc"
35#os.environ["PATH"]=os.environ["PATH"]+":/usr/local/grads/bin:/usr/local/cdat/bin:/usr/local/badcdat/bin"
36#os.system("unset ECMWF_LOCAL_TABLE_PATH ; unset ECMWF_LOCAL_TABLE_PATH")
37
38# Bring DataExtractor package into local scope
39from dx_badc import *
40
41import time
42import cgi
43import commands
44
45# Set up cgi error reporting to screen
46import cgitb
47cgitb.enable()
48
49# Set global variables
50
51def exitNicely(message):
52    pass
53
54
55def displayError(error):
56    print "<HTML><HEAD><HEAD><BODY><H2>ERROR:</H2><B>%s</B></BODY></HTML>" % error
57           
58
59def main(args):
60
61    username=None
62    allowed_groups=None
63    response=None
64    #print "Content-Type: text/html\n\n"
65
66    request={}
67    if args==None:
68        args={}
69        form=cgi.FieldStorage()
70       
71        for key in form.keys():
72            args[key]=form.getvalue(key)
73            #print "%s::%s" % (key, args[key])
74    # Get security from configuration script conf.py
75    #### OVERRIDE SECURITY FOR NOW ####
76
77#    print "Content-Type: text/html\n\n"
78#    print args
79
80    if restricted_data==1:
81        sec=Security(args)
82
83        if args.has_key("logout"):
84            # logout
85            args={}
86            sec.logout()
87            allowed_groups=None
88            #loginStatus=None
89
90        else:
91            response=sec.getLoginStatus()
92            #print response
93            if response:
94                username=response[0]
95                allowed_groups=response[1]
96                #loginStatus=1
97                #print "<H1>", allowed_groups, "</H1>"
98                #print "<H1>", username, "</H1>"
99            else:
100                return
101
102    else:
103        allowed_groups=""
104
105    ui=UserInterface()
106
107    # If first page then don't create a request
108    if args=={}:
109        pass 
110        # NOT YET IMPLEMENTED       
111   
112    # args now holds the input arguments (from CGI or command line)
113
114    for key in ("_gohome",):
115        if args.has_key(key):  del args[key]
116
117    if not args.has_key("req_id"):
118        requester=Requestdb()
119        request["req_id"]=requester.getRequestID()
120        #request["user"]=username
121        request["allowed_groups"]=allowed_groups
122        request["target_page"]=pages[0]
123        request["num_datasets"]="1"
124        #request["loginStatus"]=loginStatus
125    else:
126        requester=Requestdb(args["req_id"])
127        request=requester.readRequest()
128
129    if not request.has_key("user"):
130        request["user"]=username
131
132    for key in args.keys():
133        request[key]=args[key]
134
135    if request.has_key("allowed_groups"):
136        if type(request["allowed_groups"])==str:
137            allowed_string=request["allowed_groups"].replace(" ","")
138            allowed_as_list=allowed_string[1:-1].split(",")
139            cleaned_list=map(lambda x: x.replace("'", ""), allowed_as_list)
140            request["allowed_groups"]=cleaned_list
141
142    if request.has_key("datasetURI_2"):  request["num_datasets"]="2"
143       
144    for dset_num in range(1, int(request["num_datasets"])+1):
145        if request.has_key("datasetURI_%s" % dset_num) and not request.has_key("dataset_%s" % dset_num):
146            dsetdb=Datasetdb()
147            URI_list=dsetdb.getSubsetURIList()
148            for i in URI_list:
149                if request["datasetURI_%s" % dset_num]==i[0]: # if it is a known dataset
150                    request["dataset_%s" % dset_num]=i[2]
151                    request["subset_%s" %dset_num]=i[1]
152            if not request.has_key("dataset_%s" % dset_num):
153                request["dataset_%s" % dset_num]="User dataset"
154                request["subset_%s" % dset_num]="User subset"
155                # Note if it is not known then no security check needed.         
156            request["target_page"]="VariablesPage"
157
158    if request.has_key("num_datasets") and request["num_datasets"]=="1":
159        for key in ("dataset_2", "subset_2", "variable_2"):
160            if request.has_key(key):  del request[key]
161   
162    if request.has_key("target_page"):
163        target_page=request["target_page"]
164    else:
165        target_page="DatasetPage"
166
167    # Extra security check
168    for dset in ("dataset_1", "dataset_2"):
169        if request.has_key(dset):
170            if request[dset]=="User dataset" or datasets[request[dset]]==None:
171                pass
172            elif datasets[request[dset]] not in request["allowed_groups"] and datasets[request[dset]]!=None:
173                raise "You do not have permission to access the requested data!" 
174
175    if target_page:
176        stage=pages.index(target_page)
177    else:
178        displayError("Target page not recognised; %s" % target_page)
179
180    if request.has_key("dataset_1") or request.has_key("req_id"):
181        request["access_time"]=time.time()
182        requester.writeRequest(request)
183
184    if target_page!=pages[4]:
185        ui.writePage(request, stage)
186    else:                         #  if target_page==pages[4]:
187        ui.writeHeader()
188        ui.writeRequestInfo(request, stage)
189        sys.stdout.flush()
190
191        print "<P><B>Executing request...</B><P>"
192        sys.stdout.flush()
193        # Roundabout method of getting subset xml file out of list of lists.
194        source_xmls=[]
195        for dset_num in range(1, int(request["num_datasets"])+1):
196            if request["dataset_%s" % dset_num]=="User dataset":
197                source_xmls.append(request["datasetURI_%s" % dset_num])
198            else:
199                for sub in subsets[request["dataset_%s" % dset_num]]:
200                    if sub[0]==request["subset_%s" % dset_num]:
201                        source_xmls.append(sub[1])
202
203
204        if request["output_type"]=="NetCDF":
205            data_producer=DataFile(source_xmls, request)
206            estimatedDuration=data_producer.getParam(estimateDuration="yes")
207
208            if estimatedDuration>60:
209                emailAddr=accountUtils.getUserEmail(request["user"])
210                print """<P>Your extraction job has been submitted and is estimated to take about %s seconds.
211<P>You will be emailed at %s when the job has finished and the output is ready.<P>Thank you for using the %s.""" % (int(estimatedDuration), emailAddr, package_name)
212                print "<P>If you remain on this page it should eventually produce your output but the server may time out if your request is large.<P>"
213            # Now really get data...
214            data_producer.getParam()
215            (outfile, outfile_actual)=data_producer.process()
216            if estimatedDuration>60:
217                stdout=sys.stdout
218                sys.stdout=RedirectStdout() 
219                mail=MailUser(emailAddr, "Message from %s" % package_name)
220                mail.sendMail("""Your extraction job has completed, you may collect the data from:\n
221%s\n\nThank you for using this service.\n If you have any comments or feedback please send them to %s.""" % (os.path.join(web_output_basedir, outfile[1:]), admin_mail_address))
222                sys.stdout=stdout
223            print "<P>Request processed...<P>"
224            print '<A HREF="%s">Click here to download file</A>.<P>' % outfile
225            print '<A HREF="%s?fileURI=%s">Click here to visualise your NetCDF file</A>.<P>' % (visualisor, outfile_actual)
226
227        elif request["output_type"]=="GIF (Plot)":
228            data_producer=Plot(source_xmls, request)
229            estimatedDuration=data_producer.getParam(estimateDuration="yes")
230
231            if estimatedDuration>60:
232                emailAddr=accountUtils.getUserEmail(request["user"])
233                print """<P>Your extraction job has been submitted and is estimated to take about %s seconds. 
234<P>You will be emailed at %s when the job has finished and the output is ready.<P>Thankyou for using the %s.""" % (int(estimatedDuration), emailAddr, package_name)
235                print "<P>If you remain on this page it should eventually produce your output but the server may time out if your request is large.<P>"
236            # Now really get data...
237            data_producer.getParam()
238            (plotfile, ncfile)=data_producer.process()
239            if estimatedDuration>60:
240                stdout=sys.stdout
241                sys.stdout=RedirectStdout()
242                mail=MailUser(emailAddr, "Message from %s" % package_name)
243                mail.sendMail("""Your extraction job has completed, you may collect the data or plot from:\n
244%s\n%s\n\nThank you for using this service.\n\n If you have any comments or feedback please send them to %s.""" % (os.path.join(web_output_basedir, ncfile[1:]), os.path.join(web_output_basedir, plotfile[1:]), admin_mail_address))
245                sys.stdout=stdout
246            print "<P>Request processed...<P>"
247            print """<IMG SRC="%s" BORDER="1">
248<P><A HREF="%s">Click here to download plot</A>.<P>
249<P><A HREF="%s">Click here to download data (NetCDF)</A>.<P>""" % (plotfile, plotfile, ncfile)
250
251        elif request["output_type"]=="GRIB":
252            data_producer=GribFile(source_xmls, request)
253            outfile=data_producer.process()
254            print "Request processed...<P>"
255            print '<A HREF="%s">Click here to download file</A>.<P>' % outfile
256        else:
257            pass
258        ui.writeFooter()
259        return
260   
261
262
263       
264if __name__=="__main__":
265    args={}
266   
267    if len(sys.argv)>1:   # use command line args to test
268        for arg in sys.argv[1:]:
269            (key, value)=arg.split("=")
270            args[key]=value
271    else:
272        args=None
273
274    main(args)
Note: See TracBrowser for help on using the repository browser.