2 # -*- coding: utf-8 -*-
4 Outil d'export dynamique de données w.c.s.
6 Copyright : Agence universitaire de la Francophonie — www.auf.org
7 Licence : GNU General Public Licence, version 2
8 Auteur : Jean Christophe André
9 Date de création : 13 mars 2013
11 Depends: wcs, python-simplejson, python-magic
14 - /dynexport => liste des formulaires pour le domaine courant
15 - /dynexport/domains.json => liste des domaines disponibles
16 - /dynexport/formulaire => liste des options ci-dessous
17 - /dynexport/formulaire/fields.json
18 - /dynexport/formulaire/field-names.json
19 - /dynexport/formulaire/field-names.txt
20 - /dynexport/formulaire/data.json
21 - /dynexport/formulaire/last-run.log
22 - /dynexport/formulaire/liste-dossiers.json
23 - /dynexport/formulaire/clear-cache => vide le cache
24 - /dynexport/formulaire/data/nom-dossier.json
25 - /dynexport/formulaire/data/nom-dossier_attachement-1.xxx
26 - /dynexport/formulaire/data/nom-dossier_attachement-2.xxx
27 - /dynexport/formulaire/data/nom-dossier_attachement-…
33 import time # time, gmtime, strftime, strptime, struct_time
34 import simplejson as json
38 from cStringIO import StringIO
39 from gzip import GzipFile
42 DELAIS = 5 # maximum 5 secondes en cache
43 TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' # format date pour HTTP
45 WCS_ROOT_DIR = '/var/lib/wcs'
46 WCS_DOMAIN_SUFFIX = '.auf.org'
47 WCS_CACHE_DIR = '/var/tmp'
49 #--------------------------------------------------------------------------
51 #--------------------------------------------------------------------------
55 #--------------------------------------------------------------------------
56 # fonctions de traitement
57 #--------------------------------------------------------------------------
59 def http_redirect(location, code='302'):
61 headers['Content-Type'] = 'text/plain; charset=utf-8'
62 headers['Status'] = '302 Redirection'
63 headers['Location'] = location
64 data = """If you see this, it means the automatic redirection has failed.
65 Please go to ${location}"""
67 headers = ''.join(map(lambda x: "%s: %s\r\n" % (x, headers[x]), headers))
68 f = open('/dev/stdout', 'wb')
69 f.write(headers + "\r\n")
76 def http_reply_and_exit(data, mime_type='text/html', charset='utf-8'):
78 current_time = time.time()
79 mtime = time.gmtime(current_time)
80 etime = time.gmtime(current_time + DELAIS)
81 if os.environ.has_key('HTTP_IF_MODIFIED_SINCE'):
83 itime = time.strptime(os.environ['HTTP_IF_MODIFIED_SINCE'], TIME_FORMAT)
88 # préparation des en-têtes et données
90 headers['Content-Type'] = '%s; charset=%s' % (mime_type, charset)
91 headers['Last-Modified'] = time.strftime(TIME_FORMAT, mtime)
92 headers['Expires'] = time.strftime(TIME_FORMAT, etime)
93 if os.environ['REQUEST_METHOD'] == 'GET' and (not itime or mtime > itime):
94 # détermination de la version demandée (compressée ou non)
95 if os.environ.get('HTTP_ACCEPT_ENCODING','').split(',').count('gzip') > 0:
97 GzipFile('', 'w', 9, zdata).write(data)
98 data = zdata.getvalue()
99 headers['Content-Encoding'] = 'gzip'
100 headers['Vary'] = 'Content-Encoding'
101 headers['Content-Length'] = len(data)
104 # envoi de la réponse
105 headers = ''.join(map(lambda x: "%s: %s\r\n" % (x, headers[x]), headers))
106 f = open('/dev/stdout', 'wb')
107 f.write(headers + "\r\n")
111 # arrêt du traitement
115 def _reduce_to_alnum(s, replacement_char='-'):
116 """réduction d'une chaîne de caractères à de l'alpha-numérique"""
118 if type(s) is not unicode:
119 s = unicode(s, 'utf-8')
120 s = unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore')
123 if ('a' <= c.lower() <= 'z') or ('0' <= c <= '9'):
125 elif len(r) > 0 and r[-1] != replacement_char:
126 r += replacement_char
127 else: # r == '' or r[-1] == replacement_char
129 return r.strip(replacement_char)
131 def _make_wcs_cache_name(domain, form, name):
132 return 'wcs-%s-%s-%s' % (domain, form, name)
134 def set_wcs_cache(domain, form, name, data):
136 cache_filename = _make_wcs_cache_name(domain, form, name)
137 f = open(os.path.join(WCS_CACHE_DIR, cache_filename), 'wb')
141 def get_wcs_cache(domain, form, name):
143 cache_filename = _make_wcs_cache_name(domain, form, name)
144 cache_filename = os.path.join(WCS_CACHE_DIR, cache_filename)
145 if os.path.exists(cache_filename):
146 f = open(cache_filename, 'rb')
151 def clear_wcs_cache(domain, form):
152 cache_filename = _make_wcs_cache_name(domain, form, '')
153 for f in os.listdir(WCS_CACHE_DIR):
154 if f.startswith(cache_filename):
155 os.unlink(os.path.join(WCS_CACHE_DIR, f))
157 def set_wcs_publisher(domain):
160 from wcs import publisher
161 pub = publisher.WcsPublisher.create_publisher()
162 pub.app_dir = os.path.join(pub.app_dir, domain)
165 def get_wcs_domains():
167 suffix = WCS_DOMAIN_SUFFIX
172 return [x for x in l if os.path.isdir(os.path.join(root, x)) and x.endswith(suffix)]
174 def get_wcs_forms(domain):
175 set_wcs_publisher(domain)
176 from wcs.formdef import FormDef
177 return [f.url_name for i,f in FormDef.items()]
179 def get_wcs_form_data(domain, form):
180 """extraction des données du formulaire"""
181 data = get_wcs_cache(domain, form, 'metadata.json')
183 return json.loads(data, encoding='utf-8')
184 # dictionnaire des metadonnées (qui seront mises en cache)
188 logname = _make_wcs_cache_name(domain, form, 'last-run.log')
189 logging.basicConfig(level=logging.DEBUG,
190 format='%(asctime)s %(levelname)s %(message)s',
191 filename=os.path.join(WCS_CACHE_DIR, logname),
194 logging.info('Début.')
196 set_wcs_publisher(domain)
197 from wcs.formdef import FormDef
198 from wcs.fields import TitleField, CommentField, TextField, \
199 StringField, ItemField, ItemsField, EmailField, \
200 DateField, FileField, BoolField, TableField
201 formdef = FormDef.get_by_urlname(form)
203 # nommage des champs de façon unique
206 field_names_duplicates = {}
207 for i, field in enumerate(formdef.fields):
208 if isinstance(field, TitleField) or isinstance(field, CommentField):
213 name = _reduce_to_alnum(field.label,'_').lower()
214 if name in field_names.values(): # duplicat
215 field_names_duplicates[name] = field_names_duplicates.get(name, 1) + 1
216 name = '%s_%d' % (name, field_names_duplicates[name])
217 field_names.update({field.id: name})
218 fields.update({field.id: {'index': i, 'name': field_names[field.id], 'label': field.label, 'varname': field.varname and field.varname or ''}})
220 data = json.dumps(fields, ensure_ascii=False).encode('utf-8')
221 set_wcs_cache(domain, form, 'fields.json', data)
222 metadata.update({'fields': fields})
224 # on charge la base des types MIME une fois pour toutes
225 #magicmime = magic.Magic(mime=True) => ce sera pour plus tard…
226 magicmime = magic.open(magic.MAGIC_MIME)
230 liste_attachements = {}
231 for object in formdef.data_class().select():
232 if object.user is None:
233 logging.warning("Dossier '%s' sans utilisateur associé ?!?"\
234 " On ignore...", object.id)
238 workflow_status = object.status.startswith('wf-') and \
239 object.get_workflow_status().name or None
241 workflow_status = None
244 'num_dossier': object.id,
245 'wcs_status': object.status,
246 'wcs_workflow_status': workflow_status,
247 'wcs_user_email': object.user.email,
248 'wcs_user_display_name': object.user.display_name,
249 #'wcs_last_modified': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(object.last_modified())),
253 if object.evolution is not None:
254 for e in object.evolution:
255 if e.comment is not None:
256 who = pub.user_class.get(e.who).display_name
257 e_time = time.strftime('%Y-%m-%d %H:%M:%S', e.time)
258 comment = '%s -- %s %s' % (e.comment, who, e_time)
259 result['wcs_comments'].append(comment)
262 for field in formdef.fields:
263 field_id = str(field.id)
264 if not field_id in object.data:
266 if isinstance(field, TitleField) or isinstance(field, CommentField):
268 field_name = fields[field_id]['name']
269 data = object.data.get(field_id)
271 result[field_name] = None
273 if isinstance(field, StringField) or isinstance(field, TextField) \
274 or isinstance(field, EmailField) or isinstance(field, ItemField):
275 result[field_name] = data
276 elif isinstance(field, ItemsField) or isinstance(field, TableField):
277 result[field_name] = data # liste => peux-être joindre sur ';'
278 elif isinstance(field, BoolField):
279 result[field_name] = (data == 'True')
280 elif isinstance(field, DateField):
281 if isinstance(data, time.struct_time):
282 result[field_name] = '%04d-%02d-%02d' % (data.tm_year,
283 data.tm_mon, data.tm_mday)
285 result[field_name] = data
286 elif isinstance(field, FileField):
287 if '.' in data.orig_filename:
288 extension = data.orig_filename.rpartition('.')[2].lower()
289 else: # il n'y a pas d'extension dans le nom de fichier
290 p = os.path.join(pub.app_dir, 'uploads', data.qfilename)
292 #m = magicmime.from_file(p) => ce sera pour plus tard…
293 m = magicmime.file(p).split()[0].strip(';')
294 extension = mimetypes.guess_extension(m)
296 logging.warning("Type de fichier inconnu pour '%s'.", p)
298 if extension is not None:
299 extension = extension[1:]
301 extension = 'unknown'
302 result[field_name] = "%s.%s" % (field_name, extension)
303 qfiles[field_name] = data.qfilename
305 logging.warning("Type de champ inconnu '%s' pour '%s' (%s).",
306 field.__class__.__name__, field_name, field.label)
308 num_dossier = result['num_dossier']
309 nom = _reduce_to_alnum(result.get('nom','sans-nom')).upper()
310 prenom = _reduce_to_alnum(result.get('prenom','sans-prenom')).upper()
311 adel = result.get('adresse_electronique','sans-adel').replace('@','-').lower()
313 filename = "%04d-%s-%s-%s" % (num_dossier, nom, prenom, adel)
314 liste_dossiers.append(filename + '.json')
316 # sauvegarde des chemins d'accès aux fichiers joints
318 dst = filename + '_' + result[f]
319 src = os.path.join(pub.app_dir, 'uploads', qfiles[f])
320 liste_attachements.update({dst: src})
321 # on renomme le fichier joint indiqué dans le dossier
324 # génération du fichier JSON
325 data = json.dumps(result, ensure_ascii=False).encode('utf-8')
326 set_wcs_cache(domain, form, 'data_%s.json' % filename, data)
328 logging.info("Dossier '%s' : %s.",
329 filename, result['wcs_workflow_status'])
331 data = json.dumps(liste_attachements, ensure_ascii=False).encode('utf-8')
332 set_wcs_cache(domain, form, 'data-files.json', data)
333 metadata.update({'attachements': liste_attachements})
335 liste_dossiers.sort()
336 data = json.dumps(liste_dossiers, ensure_ascii=False).encode('utf-8')
337 set_wcs_cache(domain, form, 'liste-dossiers.json', data)
338 metadata.update({'dossiers': liste_dossiers})
342 data = json.dumps(metadata, ensure_ascii=False).encode('utf-8')
343 set_wcs_cache(domain, form, 'metadata.json', data)
345 #if __name__ == '__main__':
347 # extract_data(formdef, OUTPUT_DIRECTORY)
349 # logging.exception("Interruption du traitement pour cause d'erreur !")
351 #--------------------------------------------------------------------------
352 # gestion des requêtes web
353 #--------------------------------------------------------------------------
356 #for k in sorted(os.environ):
357 # l.append('%s=%s\n' % (k, os.environ[k]))
359 #http_reply_and_exit(data, 'text/plain')
361 domain = os.environ.get('HTTP_HOST', '')
362 if domain not in get_wcs_domains():
363 http_reply_and_exit("Domaine '%s' inconnu." % domain, 'text/plain')
365 path_info = os.environ.get('PATH_INFO', '')
367 path_prefix = os.environ.get('REQUEST_URI', '')
368 if len(path_info) > 0:
369 path_prefix = path_prefix[:-len(path_info)]
372 http_redirect(path_prefix + '/')
375 # liste des formulaires disponibles
376 l = sorted(get_wcs_forms(domain))
377 l = ['<li><a href="%s/">%s</a></li>' % (f, f) for f in l]
378 title = '<p>Liste des formulaires disponibles :</p>\n'
379 data = '<html>\n' + title + '<ul>\n' + '\n'.join(l) + '\n</ul>\n</html>'
380 http_reply_and_exit(data, 'text/html')
382 if path_info == '/index.json':
383 # liste des formulaires disponibles
384 l = sorted(get_wcs_forms(domain))
385 data = json.dumps(l, ensure_ascii=False, indent=' ').encode('utf-8')
386 http_reply_and_exit(data, 'application/json')
388 if path_info == '/domains.json':
389 # liste des domaines disponibles
390 l = get_wcs_domains()
391 data = json.dumps(l, ensure_ascii=False, indent=' ').encode('utf-8')
392 http_reply_and_exit(data, 'application/json')
394 if match(r'^/[a-z0-9-]+$', path_info):
395 http_redirect(path_prefix + path_info + '/')
397 if match(r'^/[a-z0-9-]+/$', path_info):
398 form = path_info.split('/')[1]
399 if form not in get_wcs_forms(domain):
400 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
401 l = [ 'fields.json', 'field-names.json', 'field-names.txt', 'last-run.log', 'liste-dossiers.json' ]
402 l = ['<li><a href="%s">%s</a></li>' % (f, f) for f in l]
403 title = '<p>Liste des informations disponibles :</p>\n'
404 action1 = """<p><a href="data/">Export des données</a></p>\n"""
405 action2 = """<p><a href="clear-cache">Suppression du cache</a> (pour ré-export)</p>\n"""
406 data = '<html>\n' + title + '<ul>\n' + '\n'.join(l) + '\n</ul>\n' + action1 + action2 + '</html>'
407 http_reply_and_exit(data, 'text/html')
409 if match(r'^/[a-z0-9-]+/index.json$', path_info):
410 form = path_info.split('/')[1]
411 if form not in get_wcs_forms(domain):
412 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
413 l = [ 'fields.json', 'field-names.json', 'field-names.txt', 'last-run.log', 'liste-dossiers.json', 'data', 'clear-cache' ]
414 data = json.dumps(l, ensure_ascii=False, indent=' ').encode('utf-8')
415 http_reply_and_exit(data, 'application/json')
417 if match(r'^/[a-z0-9-]+/clear-cache$', path_info):
418 form = path_info.split('/')[1]
419 if form not in get_wcs_forms(domain):
420 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
421 clear_wcs_cache(domain, form)
422 http_reply_and_exit('Ok.', 'text/plain')
424 if match(r'^/[a-z0-9-]+/fields.json$', path_info):
425 form = path_info.split('/')[1]
426 if form not in get_wcs_forms(domain):
427 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
428 get_wcs_form_data(domain, form)
429 d = json.loads(get_wcs_cache(domain, form, 'fields.json'), encoding='utf-8')
430 data = json.dumps(d, ensure_ascii=False, indent=' ').encode('utf-8')
431 http_reply_and_exit(data, 'application/json')
433 if match(r'^/[a-z0-9-]+/field-names.json$', path_info):
434 form = path_info.split('/')[1]
435 if form not in get_wcs_forms(domain):
436 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
437 get_wcs_form_data(domain, form)
438 d = json.loads(get_wcs_cache(domain, form, 'fields.json'), encoding='utf-8')
439 d = dict([(k, d[k]['name']) for k in d])
440 data = json.dumps(d, ensure_ascii=False, indent=' ').encode('utf-8')
441 http_reply_and_exit(data, 'application/json')
443 if match(r'^/[a-z0-9-]+/field-names.txt$', path_info):
444 form = path_info.split('/')[1]
445 if form not in get_wcs_forms(domain):
446 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
447 get_wcs_form_data(domain, form)
448 d = json.loads(get_wcs_cache(domain, form, 'fields.json'), encoding='utf-8')
449 d = [(k, d[k]['name'], d[k]['label']) for k in d]
450 d = sorted(d, key=lambda x: int(x[0]))
451 text = u''.join([u'%s:%s:%s\n' % (x[0], x[1], x[2]) for x in d])
452 data = text.encode('utf-8')
453 http_reply_and_exit(data, 'text/plain')
455 if match(r'^/[a-z0-9-]+/last-run.log$', path_info):
456 form = path_info.split('/')[1]
457 if form not in get_wcs_forms(domain):
458 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
459 get_wcs_form_data(domain, form)
460 data = get_wcs_cache(domain, form, 'last-run.log')
461 http_reply_and_exit(data, 'text/plain')
463 if match(r'^/[a-z0-9-]+/liste-dossiers.json$', path_info):
464 form = path_info.split('/')[1]
465 if form not in get_wcs_forms(domain):
466 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
467 get_wcs_form_data(domain, form)
468 data = json.loads(get_wcs_cache(domain, form, 'liste-dossiers.json'), encoding='utf-8')
469 data = json.dumps(data, ensure_ascii=False, indent=' ').encode('utf-8')
470 http_reply_and_exit(data, 'application/json')
472 if match(r'^/[a-z0-9-]+/data$', path_info):
473 http_redirect(path_prefix + path_info + '/')
475 if match(r'^/[a-z0-9-]+/data/$', path_info):
476 form = path_info.split('/')[1]
477 if form not in get_wcs_forms(domain):
478 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
479 get_wcs_form_data(domain, form)
480 dossiers = json.loads(get_wcs_cache(domain, form, 'liste-dossiers.json'), encoding='utf-8')
481 attachements = json.loads(get_wcs_cache(domain, form, 'data-files.json'), encoding='utf-8')
482 l = sorted(dossiers + attachements.keys())
484 l = ['<li><a href="%s">%s</a></li>' % (f, f) for f in l]
485 title = '<p>Liste des documents disponibles :</p>\n'
486 data = '<html>\n' + title + '<ul>\n' + '\n'.join(l) + '\n</ul>\n</html>'
488 data = '<html>\n<p>Aucun document disponible.</p>\n</html>'
489 http_reply_and_exit(data, 'text/html')
491 if match(r'^/[a-z0-9-]+/data/index.json$', path_info):
492 form = path_info.split('/')[1]
493 if form not in get_wcs_forms(domain):
494 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
495 get_wcs_form_data(domain, form)
496 dossiers = json.loads(get_wcs_cache(domain, form, 'liste-dossiers.json'), encoding='utf-8')
497 attachements = json.loads(get_wcs_cache(domain, form, 'data-files.json'), encoding='utf-8')
498 l = sorted(dossiers + attachements.keys())
499 data = json.dumps(l, ensure_ascii=False, indent=' ').encode('utf-8')
500 http_reply_and_exit(data, 'application/json')
502 if match(r'^/[a-z0-9-]+/data/[^/]+$', path_info):
503 form = path_info.split('/')[1]
504 if form not in get_wcs_forms(domain):
505 http_reply_and_exit("Formulaire '%s' inconnu." % form, 'text/plain')
506 get_wcs_form_data(domain, form)
507 doc = path_info.split('/')[3]
508 dossiers = json.loads(get_wcs_cache(domain, form, 'liste-dossiers.json'), encoding='utf-8')
510 data = get_wcs_cache(domain, form, 'data_' + doc)
511 data = json.loads(data, encoding='utf-8')
512 data = json.dumps(data, ensure_ascii=False, indent=' ').encode('utf-8')
513 http_reply_and_exit(data, 'application/json')
514 attachements = json.loads(get_wcs_cache(domain, form, 'data-files.json'), encoding='utf-8')
515 if doc in attachements:
516 data = open(attachements[doc], 'rb').read()
517 mime_type = mimetypes.guess_type(doc)[0]
518 if mime_type is None:
519 mime_type = 'application/octet-stream'
520 http_reply_and_exit(data, mime_type)
521 http_reply_and_exit("Document '%s' inconnu." % path_info, 'text/plain')
523 http_reply_and_exit("Requête '%s' inconnue." % path_info, 'text/plain')