2015-06-07 00:31:04 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
from feedgen.feed import FeedGenerator
|
2015-06-07 21:09:05 +00:00
|
|
|
import json
|
2015-06-07 22:01:59 +00:00
|
|
|
import re
|
2015-06-08 01:05:44 +00:00
|
|
|
import os.path
|
|
|
|
|
|
|
|
DATA_DIR = 'data'
|
|
|
|
ENTRIES_FILE = os.path.join(DATA_DIR, 'entries.html')
|
|
|
|
RELEASES_FILE = os.path.join(DATA_DIR, 'releases.json')
|
2015-06-07 00:31:04 +00:00
|
|
|
|
2015-06-07 21:19:24 +00:00
|
|
|
def load_feed_metadata(fg):
|
2015-06-07 00:31:04 +00:00
|
|
|
fg.id('urn:uuid:60a76c80-d399-11d9-b91C-543213999af6')
|
|
|
|
fg.title('I2P News')
|
|
|
|
fg.subtitle('News feed, and router updates')
|
|
|
|
fg.link( href='http://i2p-projekt.i2p/' )
|
|
|
|
fg.link( href='http://echelon.i2p/news/news.atom.xml', rel='self' )
|
2015-06-07 23:08:12 +00:00
|
|
|
fg.link( href='http://psi.i2p/news/news.atom.xml', rel='alternate' )
|
2015-06-07 00:31:04 +00:00
|
|
|
|
2015-06-08 07:56:59 +00:00
|
|
|
def load_entries(fg, entries_file):
|
|
|
|
with open(entries_file) as f:
|
2015-06-07 21:46:02 +00:00
|
|
|
entries_data = f.read().strip('\n')
|
|
|
|
entries = entries_data.split('</article>')
|
|
|
|
# split() creates an empty final element
|
|
|
|
for entry_str in entries[:-1]:
|
2015-06-07 22:30:12 +00:00
|
|
|
entry_parts = entry_str.split('</details>', 1)
|
2015-06-07 22:01:59 +00:00
|
|
|
metadata = extract_entry_metadata(entry_parts[0])
|
2015-06-07 21:46:02 +00:00
|
|
|
|
|
|
|
fe = fg.add_entry()
|
2015-06-07 22:01:59 +00:00
|
|
|
fe.id(metadata['id'])
|
|
|
|
fe.title(metadata['title'])
|
|
|
|
fe.summary(metadata['summary'])
|
|
|
|
fe.link( href=metadata['href'] )
|
|
|
|
fe.author( name=metadata['author'] )
|
2015-06-07 22:36:55 +00:00
|
|
|
fe.published(metadata['published'])
|
|
|
|
fe.updated(metadata['updated'])
|
2015-06-07 21:46:02 +00:00
|
|
|
fe.content(entry_parts[1], type='xhtml')
|
2015-06-07 00:31:04 +00:00
|
|
|
|
2015-06-07 22:01:59 +00:00
|
|
|
def extract_entry_metadata(s):
|
2015-06-07 22:30:12 +00:00
|
|
|
m = {k:v.strip('"') for k,v in re.findall(r'(\S+)=(".*?"|\S+)', s)}
|
|
|
|
m['summary'] = re.findall(r'<summary>(.*)</summary>', s)[0]
|
|
|
|
return m
|
2015-06-07 22:01:59 +00:00
|
|
|
|
2015-06-07 21:19:24 +00:00
|
|
|
def load_releases(fg):
|
2015-06-07 00:31:04 +00:00
|
|
|
fg.load_extension('i2p')
|
2015-06-08 01:05:44 +00:00
|
|
|
with open(RELEASES_FILE) as json_data:
|
2015-06-07 21:09:05 +00:00
|
|
|
d = json.load(json_data)
|
|
|
|
for release in d:
|
|
|
|
r = fg.i2p.add_release()
|
|
|
|
r.date(release['date'])
|
|
|
|
r.version(release['version'])
|
|
|
|
if release.has_key('minVersion'):
|
|
|
|
r.min_version(release['minVersion'])
|
|
|
|
if release.has_key('minJavaVersion'):
|
|
|
|
r.min_java_version(release['minJavaVersion'])
|
|
|
|
|
|
|
|
for update_type, update in release['updates'].iteritems():
|
|
|
|
u = r.add_update(update_type)
|
|
|
|
if update.has_key('clearnet'):
|
|
|
|
for url in update['clearnet']:
|
|
|
|
u.clearnet(url)
|
|
|
|
if update.has_key('clearnetssl'):
|
|
|
|
for url in update['clearnetssl']:
|
|
|
|
u.clearnetssl(url)
|
|
|
|
if update.has_key('torrent'):
|
|
|
|
u.torrent(update['torrent'])
|
|
|
|
if update.has_key('url'):
|
|
|
|
for url in update['url']:
|
|
|
|
u.url(url)
|
2015-06-07 00:31:04 +00:00
|
|
|
|
2015-06-07 21:19:24 +00:00
|
|
|
if __name__ == '__main__':
|
|
|
|
fg = FeedGenerator()
|
2015-06-08 07:56:59 +00:00
|
|
|
fg.language('en')
|
2015-06-07 21:19:24 +00:00
|
|
|
load_feed_metadata(fg)
|
2015-06-08 07:56:59 +00:00
|
|
|
load_entries(fg, ENTRIES_FILE)
|
2015-06-07 21:46:15 +00:00
|
|
|
load_releases(fg)
|
2015-06-07 00:31:04 +00:00
|
|
|
fg.atom_file('news.atom.xml', pretty=True)
|