User:Occasionaluse/mainpageright.py
Jump to navigation
Jump to search
#TODO Refactor with simplemediawiki and JSON?
from lxml import etree
from urllib import urlopen
import pickle
import re
revision_id_file = "revisionid.pkl"
domain_counts_file = "domaincounts.pkl"
user_domain_counts_file = "userdomaincounts.pkl"
api_url = "http://conservapedia.com/api.php"
get_diff = api_url + "?action=query&prop=revisions&rvdiffto=prev&format=xml&revids="
get_last_revision_id = api_url + "?action=query&titles=Template:Mainpageright&prop=revisions&format=xml"
get_revision_list = api_url + '?action=query&titles=Template:Mainpageright&prop=revisions&rvdir=newer&rvlimit=500&format=xml&rvstartid='
domain_regex = r'^.*(http|https):\/\/(www.)?([a-z0-9]+([\-\.]{1}[a-z0-9]+)*\.[a-z]{2,6}(:[0-9]{1,5})?)(\/.*).*$'
try: #try to load next revisionid and count dicts
revision_id = pickle.load(open(revision_id_file, "rb"))
domain_counts = pickle.load(open(domain_counts_file, "rb"))
user_domain_counts = pickle.load(open(user_domain_counts_file, "rb"))
except IOError: #get last revisionid and recreate count dicts
revision_id = int(etree.parse(urlopen(get_last_revision_id)).find(".//rev").get("revid"))
domain_counts = {}
user_domain_counts = {}
#TODO support query-continue
for rev in etree.parse(urlopen(get_revision_list+str(revision_id))).findall(".//rev"): #for each revision in the revision list
user = rev.get("user")
#TODO distinguish moves from insertions
for ins in etree.HTML(etree.parse(urlopen(get_diff + rev.get("revid"))).find(".//diff").text).findall(".//ins"): #for each insertion in the revision's diff
try :domain_match = re.match(domain_regex, ins.text, re.I|re.X)
except TypeError: pass
if(domain_match):
domain = domain_match.group(3)
try: domain_counts[domain] += 1
except KeyError: domain_counts[domain] = 1
try: user_domain_counts[(user,domain)] += 1
except KeyError: user_domain_counts[(user,domain)] = 1
print "Total domain counts"
for domain, count in domain_counts.iteritems(): print domain + ": " + str(count)
print "\nDomain counts by user"
for user_domain, count in user_domain_counts.iteritems(): print str(user_domain) + ": " + str(count)
pickle.dump(revision_id+1, open(revision_id_file, "wb")) #dump an incremented revision_id so we don't repeat revisions
pickle.dump(domain_counts, open(domain_counts_file, "wb"))
pickle.dump(user_domain_counts, open(user_domain_counts_file, "wb"))