The 2024 Legal Defense Fundraiser has ended early due to the Board's decision to settle the suit. Donations will go towards the creation of a legal defense fund. Discussion on the matter can be found at the RationalMedia Foundation noticeboard. Thank you for your generous support.

Information icon.svg Campaigning is open for the RationalWiki 2024 Board of Trustees election.

User:Capturebot2/capturebot2.py

From RationalWiki
Jump to navigation Jump to search
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import wikipedia, config, pagegenerators, query, upload
from datetime import datetime
from datetime import timedelta
from datetime import date
import time, re, os
import locale

sleeptime = 60 # seconds

sleeping = False;
checking = True;

lastrevs = {
           'console'  : 0,
           'options'  : 0,
           }
           
#script = ""
# since javascript is a security risk, this script is hardcoded now
script = "if (el = document.getElementById('content')) { el.style.marginLeft = '0px';el.style.zIndex = '100'; } if (el = document.getElementById('footer')) { el.style.marginLeft = '0px';el.style.zIndex = '100'; } if (el = document.getElementById('column-one')) { el.style.position = 'relative';el.style.left = '-200px';el.style.top = '-1000px';el.style.width = '0';el.style.height = '0'; } if (el = document.getElementById('p-logo')) { el.style.display = 'none'; } if (el = document.getElementById('p-cactions')) { el.style.left = '200px';el.style.top = '1016px';el.style.zIndex = '102'; } if (el = document.getElementById('p-personal')) { el.style.display = 'none'; }"
#script2 = "toc = document.getElementById(\"toc\"); toc.style.display = \"none\";\n" + "spans = getElementsByClassName(document.getElementById(\"bodyContent\"),\"span\",\"mw-headline\"); \n" + "section = location.href.match(/#([^&?]*)/);\n" + "if (section.length > 1) section = section[1];\n" + "i=0; while (i < spans.length && spans[i].parentNode.previousSibling.id != section) ++i;\n" + "if (i < spans.length) {\n" + "  start0 = document.getElementById(\"bodyContent\").firstChild;\n" + "  cur = start0;\n" + "  hiding = true;\n" + "  key1 = spans[i].parentNode.previousSibling;\n" + "    if (i+1 < spans.length) { key2 = spans[i+1].parentNode.previousSibling; } else { key2 = null; }\n" + "  do { if (key1 == cur) { hiding = false; } else if (key2 != null && key2 == cur) {hiding = true; } if (cur.style != undefined && hiding) cur.style.display = \"none\"; } while ((cur = cur.nextSibling) != null);\n" + "}"
script2 = '''toc = document.getElementById("toc"); 
toc.style.display = "";

spans = getElementsByClassName(document.getElementById("bodyContent"),"span","mw-headline"); 
section = location.href.match(/#([^&?]*)/);
if (section.length > 1) section = section[1];
i=0; 
while (i < spans.length && spans[i].parentNode.previousSibling.id != section) ++i;
headinglevel = parseInt(spans[i].parentNode.tagName.substring(1));
//alert(spans[i].innerHTML);

if (i < spans.length) {

  start0 = document.getElementById("bodyContent").firstChild;
  cur = start0;
  hiding = true;
  key1 = spans[i].parentNode.previousSibling;
  j = i+1;
  key2 = null;
  while (j < spans.length) {
    headinglevel2 = parseInt(spans[j].parentNode.tagName.substring(1));
    if (headinglevel2 <= headinglevel) {
      key2 = spans[j].parentNode.previousSibling;
      break;
    }
    j++;
  }
  toremove = new Array();
  do { 
    if (key1 == cur) { 
      hiding = false; 
    } else if (key2 != null && key2 == cur) {
      hiding = true; 
    }
    if (hiding) {
      toremove.push(cur);
    }
  } while ((cur = cur.nextSibling) != null);
  for (var i=0; i<toremove.length; ++i) {
    toremove[i].parentNode.removeChild(toremove[i]);
  }
}'''

def recentChanges(site = None, delay=0, block=70):
    '''
    Return a pagegenerator containing all the pages edited in a certain timespan.
    The delay is the amount of minutes to wait and the block is the timespan to return pages in.
    Should probably copied to somewhere else
    '''
    
    result = []
    dateformat ="%Y-%m-%dT%H:%M:%SZ"
    rcstart = datetime.utcnow() + timedelta(minutes=-delay-block)
    rcend = datetime.utcnow() + timedelta(minutes=-delay)

    params = {
        'action'    :'query',
        'list'      :'recentchanges',
        'rcstart'   :rcstart.strftime(dateformat),
        'rcend'     :rcend.strftime(dateformat),
        'rcdir'     :'newer',
        'rcprop'    :'title',
        'rcshow'    :'!bot',
        'rclimit'   :'5000',
        'rctype'    :'edit|new',
        }

    data = query.GetData(params, site, useAPI = True, encodeTitle = False)
    try:
        for item in data['query']['recentchanges']:
            result.append(item['title'])
    except IndexError:
        wikipedia.output(u'API Error, nothing found in the APIs')
    except KeyError:
        wikipedia.output(u'API Error, nothing found in the APIs')

    return pagegenerators.PagesFromTitlesGenerator(result, site)

def execConsole(pagetext, site):
    modified = False
    output = ''
    summary = 'By your command'
    lines = pagetext.splitlines()   
    global sleeping 
    for line in lines:
        if line != '':
            if line[0] != '*':
                modified = True
                output += '*' + line + '\n'
                if re.match('\s*status\s*',line,re.I) != None:
                    if not sleeping:
                        output += '**Online' + '\n'
                    else:
                        output += '**Sleeping' + '\n'
                elif re.match('\s*sleep\s*',line,re.I) != None:
                    if not sleeping:
                        output += '**Deactivating' + '\n'
                        sleeping = True
                    else:
                        output += '**Already inactive' + '\n'
                elif re.match('\s*wake\s*',line,re.I) != None:
                    if sleeping:
                        output += '**Activating' + '\n'
                        sleeping = False
                    else:
                        output += '**Already active' + '\n'
                elif re.match('\s*capture\s*(.*)',line,re.I) != None:
                    m = re.match('\s*capture\s*(.*)',line,re.I)
                    output += '**Analyzing ' + m.group(1) + '\n'
                    cpage = wikipedia.Page(site,m.group(1))
                    if cpage.exists():
                        check(site.getUrl(site.get_address(m.group(1))),site)
                        output += '**Done' + '\n'
                    else:
                        output += '**Error: Page does not exist ' + '\n'
                else:
                    output += '**Unknown command' + '\n'
                    summary = 'Error'
            else:
                 output += line + '\n'
    return modified, output, summary

def execOptions(pagetext):
    summary = 'Updating configuration'
    output = ''
    modified = False
    lines = pagetext.splitlines()
    newlr = {}
    for line in lines:
        if line != '':
            if line[0] != '*':
                valid = False
                m = re.match('\s*watch\s*=\s*(.*)',line,re.I)
                if m != None:
                    valid = True
                    output += line + '\n'
                    items = m.group(1).split('&')
                    for item in items:
                        item = item.strip()
                        newlr[item] = 0
                m = re.match('\s*script\s*=\s*(.*)',line,re.I)
                if m != None:
                    valid = False
                    output += '*This option is a security risk, and has been disabled. You will have to modify the script variable in capturebot2.py instead.\n'
                    # security risk
                    # global script
                    # script = m.group(1)
                m = re.match('\s*match\s*=\s*(.*)',line,re.I)
                if m != None:
                    valid = True
                    global checking
                    if m.group(1) == 'true':
                        output += line + '\n'
                        checking = True
                    elif m.group(1) == 'false':
                        output += line + '\n'
                        checking = False
                    else:
                        valid = False
                if not valid:
                    modified = True
                    output += '*' + line + '\n'
                    output += '**Unknown command' + '\n'
                    summary = 'Error'
            else:
                output += line + '\n'
    global lastrevs
    # copy over those that stayed
    for key, value in newlr.iteritems():
        if key in lastrevs:
            newlr[key] = lastrevs[key]
    newlr['console'] = lastrevs['console']
    newlr['options'] = lastrevs['options']
    lastrevs = newlr
    return modified, output, summary

def unescape(s):
    s = s.replace("&lt;", "<")
    s = s.replace("&gt;", ">")
    s = s.replace("&quot;", '"')
    s = s.replace("&apos;", "'")
    s = s.replace("&amp;", "&")
    return s

def check(pagetext, site):
    regexp = re.compile('<a[^>]*\s+href="([^"]*)"[^>]*>(?:[^<]|<[^/]|</[^a]|</a[^>])*</a><span\s*class="wigocapture"><sup><a\s*href="[^"]*"\s*class="new"\s*title="(?:Image:|File:)([^&"]*).png', re.UNICODE)
    imglist = {}
    for m in regexp.finditer(pagetext):
        # wikipedia.output(m.group(2) + ' : ' + m.group(1))
        imglist[unescape(m.group(2))] = unescape(m.group(1))
    if len(imglist) == 0:
        return
    f = open('capturebotuploads','w')
    usesectionscript = False
    for img, link in imglist.iteritems():
        if link.find('&diff=') >= 0:
            link += '&diffonly=1'
        if link.find('#') >= 0:
            usesectionscript = True
        f.write('\'' + img.encode('utf8').encode('string-escape') + '\'' + ' : ' + '\'' + link.encode('utf8').encode('string-escape') + '\'' + '\n')
    f.close()
    f = open('capturebotscript','w')
    global script
    f.write(script)
    if usesectionscript:
        f.write("\n")
        f.write(script2)
    f.close()    
    wikipedia.output('capturing')
    # remove -x to use existing X server or when running on Windows
    os.system("./webkit2png.py -l capturebotuploads --geometry 1024 1 -x -t 90 --script capturebotscript")
    os.remove('capturebotuploads')
    os.remove('capturebotscript')
    wikipedia.output('uploading')
    for img, link in imglist.iteritems():
        imgname = img + '.png'
        if os.path.exists(imgname):
            wikipedia.output('uploading' + imgname + ' for ' + link)
            if link.find('conservapedia.com') != -1:
                desc = link + '\n{{CP screenshot}}\n[[Category:' + date.today().strftime('%B %Y') + ' Conservapedia screencaps]]'
                # Check for the existence of monthly category and create it
                catpage = wikipedia.Page(site,'Category:' + date.today().strftime('%B %Y') + ' Conservapedia screencaps')
                if not catpage.exists():
                    catpage.put('[[Category:Conservapedia screencaps]]', comment='Creating monthly category', minorEdit=False )
            else:
                desc = link + '\n{{subst:Nolicense/subst}}\n[[Category:Screencaps]]'
            bot = upload.UploadRobot(url = imgname, description = desc, targetSite = site, verifyDescription = False, keepFilename = True, ignoreWarning = True)
            try:
                targetFilename = bot.run()
            except IOError:
                wikipedia.output('upload failed!')
                page = wikipedia.Page(site,'File:' + imgname)
                try:
                    page.put(link+'\n[[Category:Failed screencaps]]','upload failed')
                except PageNotSaved:
                    wikipedia.output("Saving page failed")
            os.remove(imgname)
            wikipedia.output(imgname)
        else:
            wikipedia.output(imgname + ' failed')
    return

def main():
    # locale gets set to user's locale (by pywikipedia?), which affects month names in categories and could cause exceptions with foreign characters
    # reset it to C (aka english)
    locale.setlocale(locale.LC_ALL, 'C')
    
    site = wikipedia.getSite('en','rationalwiki')
#    site = wikipedia.getSite('en','testwiki')
    site.forceLogin()
    username = site.loggedInAs()
    consolepage = 'User:'+username+'/console'
    optionspage = 'User:'+username+'/configuration'
    
    
    global lastrevs
    
    # process the options and console pages    
    page = wikipedia.Page(site,optionspage)
    try:
        lastrevs['options'] = page.latestRevision()
        r, t, s = execOptions(page.get(False,True))
        if r:
            try:
                page.put(t,s,False,False)
            except PageNotSaved:
                wikipedia.output("Saving page failed")
    except  wikipedia.NoPage:
        # TODO: logging
        pass
    page = wikipedia.Page(site,consolepage)        
    try:
        lastrevs['console'] = page.latestRevision()
        r, t, s = execConsole(page.get(False,True),site)
        if r:
            try:
                page.put(t,s,False,False)
            except PageNotSaved:
                wikipedia.output("Saving page failed")
    except wikipedia.NoPage:
        # TODO: logging
        pass
        
    # now process all pages on watch
    for pagename, lr in lastrevs.iteritems():
        page = wikipedia.Page(site,pagename)
        try:
            if page.latestRevision() > lastrevs[page.title()]:
                lastrevs[page.title()] = page.latestRevision()
                wikipedia.output('analyzing ' + page.title())
                check(site.getUrl(site.get_address(page.title())),site)
        except wikipedia.NoPage:
            # TODO: logging
            pass
        except KeyError:
            pass
    
    while True:
    
    
        generator = recentChanges(site)
                                    
        global sleeping
        global checking
        pregenerator = pagegenerators.PreloadingGenerator(generator)
        for page in pregenerator:
            if page.title() == consolepage:
                try:
                    if page.latestRevision() > lastrevs['console']:
                        lastrevs['console'] = page.latestRevision()
                        r, t, s = execConsole(page.get(False,True),site)
                        if r:
                            try:
                                page.put(t,s,False,False)
                            except PageNotSaved:
                                wikipedia.output("Saving page failed")
                except wikipedia.NoPage:
                    # TODO: logging
                    pass
            elif page.title() == optionspage:
                try:
                    if page.latestRevision() > lastrevs['options']:
                        lastrevs['options'] = page.latestRevision()
                        r, t, s = execOptions(page.get(False,True))
                        if r:
                            try:
                                page.put(t,s,False,False)
                            except PageNotSaved:
                                wikipedia.output("Saving page failed")
                except  wikipedia.NoPage:
                    # TODO: logging
                    pass
            else:
                if not sleeping:
                    if checking:
                        if page.title() in lastrevs:
                            try:
                                if page.latestRevision() > lastrevs[page.title()]:
                                    lastrevs[page.title()] = page.latestRevision()
                                    if page.exists():
                                        wikipedia.output('analyzing ' + page.title())
                                        check(site.getUrl(site.get_address(page.title())),site)
                            except wikipedia.NoPage:
                                # TODO: logging
                                pass
                            except KeyError:
                                pass
                    else:
                        if page.exists():
                            wikipedia.output('analyzing ' + page.title())
                            check(site.getUrl(site.get_address(page.title())),site)
        wikipedia.output('sleeping')
        global sleeptime
        time.sleep(sleeptime)

if __name__ == '__main__':
    try:
        main()
    finally:
        wikipedia.stopme()