c7ef9dc0aa980a1fed53c5bba0d3712350a19577
[auf_savoirs_en_partage_django.git] / auf_savoirs_en_partage / savoirs / lib / recherche.py
1 # -*- encoding: utf-8 -*-
2 import urllib, httplib, time, simplejson, pprint, math, re
3 from django.core.urlresolvers import reverse
4 from django.conf import settings
5 from auf_savoirs_en_partage.backend_config import RESOURCES
6 from sep import SEP
7 from utils import smart_str
8 from savoirs.globals import configuration
9
10 def google_search (page, q):
11 data = {'results': [], 'last_page': 0, 'more_link': ''}
12 params = {'q': q,
13 'rsz': 'large',
14 'v': '1.0',
15 'start': page * configuration['resultats_par_page'],
16 }
17
18 params['cref'] = configuration['google_xml'] % time.time()
19 url = "/ajax/services/search/web?" + urllib.urlencode (params)
20
21 handle = httplib.HTTPConnection ('ajax.googleapis.com')
22 handle.request ("GET", url)
23 r = handle.getresponse ()
24 response = simplejson.loads(r.read ())
25 #print pprint.pformat (params)
26 #print pprint.pformat (response)
27 handle.close ()
28
29 if len (response['responseData']['results']) > 0:
30 for i in response['responseData']['cursor']['pages']:
31 p = int (i['label']) - 1
32 if p > data['last_page']:
33 data['last_page'] = p
34
35 for r in response['responseData']['results']:
36 data['results'].append( {'uri': r['url'],
37 'content': r['content'],
38 'title': r['title']} )
39
40 data['more_link'] = response['responseData']['cursor']['moreResultsUrl']
41 return data
42
43 def sep_build_content (regexp, description):
44 maxlen = 200
45 content = description
46 if len (description) > maxlen:
47 start = 0
48 loc = regexp.search (description)
49 if loc:
50 start = loc.start ()
51
52 f = start - (maxlen / 2)
53 t = 0
54 if f < 0:
55 t = -f
56 f = 0
57 t += start + (maxlen / 2)
58 if f > 0:
59 while description[f] != '.' and f > 0:
60 f -= 1
61 if f > 0:
62 f += 1
63 if t < len (description):
64 while t < len (description) and description[t] != '.':
65 t += 1
66 t += 1
67 content = description[f:t]
68 if f > 0:
69 content = "(...) " + content
70 if t < (len (description) - 1):
71 content = content + " (...)"
72 content = regexp.sub (r'\1<b>\2</b>\3', content)
73 return content
74
75
76 def make_regexp (q):
77 words = []
78 w = re.compile (r'\W+', re.U)
79 for k in q.keys ():
80 if k != 'operator':
81 words.extend(w.split (smart_str(q[k]).decode("utf-8")))
82 words = filter (lambda x: len(x)>2, words)
83 words.sort (lambda x,y: len(y)-len(x))
84
85 patt = "|".join (words)
86 patt = "([\W|-]{1})(" + patt + ")([\W|-]{1})"
87 return re.compile (patt, re.I|re.U)
88
89 def hl (r, string):
90 if string is not None:
91 return r.sub (r'\1<b>\2</b>\3', string)
92 return None
93
94
95 def sep_search (page, q, data):
96 f = page * configuration['resultats_par_page']
97 t = f + 8
98 s = SEP ()
99
100 matches = s.search (q)
101 data['last_page'] = math.ceil (float(len (matches)) / \
102 float(configuration['resultats_par_page'])) - 1
103 set = s.get (matches[f:t])
104 regexp = make_regexp (q)
105
106 for r in set:
107 uri = r.get ("uri", "")
108 if len (uri) == 0:
109 uri = r.get ("source")
110
111 serveur = RESOURCES[r.get('server')]['url']
112
113 # Récupère la source si ce n'est pas une URL
114 source = r.get("source", None)
115 if source is not None and source.startswith('http'):
116 source = None
117
118 title = r.get ("title", "")
119 content = sep_build_content (regexp, r.get ("description", ""))
120
121 contributeurs = r.get('contributor')
122 if contributeurs is not None:
123 contributeurs = "; ".join (contributeurs)
124
125 subject = r.get ('subject')
126 if subject is not None:
127 subject = ", ".join (subject)
128
129 data['results'].append ({
130 'uri': uri,
131 'getServeurURL': serveur,
132 'source' : source,
133 'id': r.get("id"), \
134 'title': hl(regexp, title),
135 'content': content, \
136 'creator': '; '.join([hl(regexp, x) for x in r.get('creator', [])]),
137 'contributors': hl(regexp, contributeurs),
138 'subject': hl(regexp, subject),
139 'modified': r.get('modified'),
140 'isbn': r.get('isbn'),
141 'admin_url': reverse('admin:savoirs_record_change', args=[r.get('id')])
142 })
143
144
145 def cherche (page, q, engin=None):
146 rc = {'results': [], 'last_page': 0, 'more_link': ''}
147
148 if engin is None:
149 engin = configuration['engin_recherche']
150
151 if engin == 'google':
152 rc = google_search (page, q)
153
154 elif engin == 'sep':
155 sep_search (page, {'q': q}, rc)
156
157 elif engin == 'avancee':
158 sep_search (page, q, rc)
159
160 return rc
161
162 def build_search_regexp(query):
163 """Construit une expression régulière qui peut servir à chercher les
164 mots-clés donnés dans 'query'."""
165 words = query.split()
166 if not words:
167 return None
168 parts = []
169 for word in words:
170 part = re.escape(word.lower())
171 # Les expressions régulières ne connaissent pas la version
172 # en majuscules des caractères accentués. :(
173 part = part.replace(u'à', u'[àÀ]')
174 part = part.replace(u'â', u'[âÂ]')
175 part = part.replace(u'é', u'[éÉ]')
176 part = part.replace(u'ê', u'[êÊ]')
177 part = part.replace(u'î', u'[îÎ]')
178 part = part.replace(u'ç', u'[çÇ]')
179
180 # Faire ceci après avoir traité les caractères accentués...
181 part = part.replace('a', u'[aàâÀÂ]')
182 part = part.replace('e', u'[eéèëêÉÊ]')
183 part = part.replace('i', u'[iïîÎ]')
184 part = part.replace('o', u'[oô]')
185 part = part.replace('u', u'[uûüù]')
186 part = part.replace('c', u'[cç]')
187
188 parts.append(part)
189 return re.compile('|'.join(parts), re.I)