def sep_build_content (regexp, description):
maxlen = 200
+ content = description
if len (description) > maxlen:
loc = regexp.search (description)
- f = loc.start () - (maxlen / 2)
- t = 0
- if f < 0:
- t = -f
- f = 0
- t += loc.start () + (maxlen / 2)
- if f > 0:
- while description[f] != '.' and f > 0:
- f -= 1
+ if loc:
+ f = loc.start () - (maxlen / 2)
+ t = 0
+ if f < 0:
+ t = -f
+ f = 0
+ t += loc.start () + (maxlen / 2)
if f > 0:
- f += 1
- if t < len (description):
- while description[t] != '.' and t < len (description):
+ while description[f] != '.' and f > 0:
+ f -= 1
+ if f > 0:
+ f += 1
+ if t < len (description):
+ while t < len (description) and description[t] != '.':
+ t += 1
t += 1
- t += 1
- content = description[f:t]
- if f > 0:
- content = "(...) " + content
- if t < (len (description) - 1):
- content = content + " (...)"
+ content = description[f:t]
+ if f > 0:
+ content = "(...) " + content
+ if t < (len (description) - 1):
+ content = content + " (...)"
content = regexp.sub (r'<b>\1</b>', content)
return content
f = page * configuration['resultats_par_page']
t = f + 8
s = SEP ()
- matches = s.search ({'description': q.encode ('utf-8')})
+ matches = s.search ({'q': q.encode ('utf-8')})
data['last_page'] = math.ceil (float(len (matches)) / \
float(configuration['resultats_par_page'])) - 1
set = s.get (matches[f:t])
uri = r.get ("source", "")
if len (uri) == 0:
uri = r.get ("uri")
- title = r.get ("title", "")
+ title = regexp.sub (r'<b>\1</b>', r.get ("title", ""))
content = sep_build_content (regexp, r.get ("description", ""))
data['results'].append ({'uri': uri, 'title': title, 'content': content})