User:CaptureBot4/source

From RationalWiki
Jump to navigation Jump to search
RedOpenSource.png
#!/usr/bin/env python3
# encoding: utf8
#
# Copyright © 2016 Martin Tournoij <martin@arp242.net>
# See below for full copyright
#
# Make sure that xvfb is running, or run this script with xvfb-run:
#   $ xvfb-run --server-args '-screen 0 1280x1024x16' python3.4 pwb.py capturebot4.py
#
# The resolution is important, the default of 640x480 will result in ugly fonts.
# 

import sys, os, re, subprocess, tempfile, time, urllib.parse, urllib.request, datetime

import pywikibot
from pywikibot.pagegenerators import RecentChangesPageGenerator
from bs4 import BeautifulSoup

import upload

_site = pywikibot.Site()


def recent_changes():
	''' Get all pages changed in the last 70 min '''
	gen = RecentChangesPageGenerator(
		site=_site,
		start=(datetime.datetime.utcnow() - datetime.timedelta(minutes=70)),
		reverse=True,         # Start with oldest
		showBot=False,        # Don't need bot changes
		showRedirects=False)  # Or redirects

	return gen


def find_uncaptured(page):
	""" Find uncaptured <capture> links on +page+ """

	soup = BeautifulSoup(_site.getUrl(_site.get_address(page.title())), 'html.parser')
	for link in soup.select('span.wigocapture > sup > a.new'):
		try:
			filename = re.sub('^File:', '', re.sub(r' \(page does not exist\)$', '', link.get('title')))
			url = link.find_parent('span', class_='wigocapture').find_previous_sibling('a').get('href')

			with tempfile.TemporaryDirectory() as tmp:
				tmp += '/f.png'
				print('    capturing {} -> {}'.format(url, tmp))
				try:
					capture_page(url, tmp)
				except:
					print('ERROR', sys.exc_info()[1])
					continue

				print('    Uploading to {}'.format(filename))
				upload_file(tmp, filename, make_desc(url))
		# Make sure we never fail in the loop; updating pages is asynchronous,
		# and exiting unexpectedly will fuck things up.
		except:
			print('ERROR', sys.exc_info()[1])


def make_desc(url):
	""" Make a description for the upload """
	purl =  urllib.parse.urlparse(url)

	if 'conservapedia.com' in purl.netloc:
		cat = 'Category:{} Conservapedia screencaps'.format(datetime.date.today().strftime('%B %Y'))
		desc = '{{CP screenshot}}\n[[' + cat + ']]'

		# Check for the existence of monthly category and create it
		catpage = pywikibot.Page(_site, cat)
		if not catpage.exists():
			catpage.put('[[Category:Conservapedia screencaps]]', comment='Creating monthly category', minorEdit=False)
	elif 'citizendium.org' in purl.netloc:
		desc = '{{CZ screenshot}}\n[[Category:Citizendium screencaps]]'
	else:
		desc = '{{subst:Nolicense/subst}}\n[[Category:Screencaps]]'

	ar = archive_is(url)
	return 'Capture of {}\n\n{}{}'.format(url, ar, desc)


def upload_file(path, dest, desc=''):
	""" Upload the file at +path+ to +title+ with the description +desc+ """

	upload.UploadRobot(
		targetSite=_site,
		ignoreWarning=True,       # Just upload
		verifyDescription=False,  # Don't ask for confirmation
		keepFilename=True,        # Don't ask for confirmation
		url=[path],               # filename must be list
		useFilename=dest,         # Destination page name
		description=desc).run()


def capture_page(url, out):
	""" Run wkhtmltoimage on +url+, saving the output to +page+ """

	try:
		# The --quality parameter is weird:
		# Running with a quality of 90 or higher gives whopping file sizes; well
		# above 50M for some pages (instead of about 1M). After running optipng
		# all the file sizes are the same (?!) I can't see any difference eiter
		#
		# For fonts: https://github.com/wkhtmltopdf/wkhtmltopdf/issues/2193
		ret = subprocess.check_output(['wkhtmltoimage', '--quiet', '--quality', '50', url, out])
		size = os.stat(out).st_size
		if size < 1014:
			raise Exception('File is very small: {}'.format(size))
		if size > 5242880: # 5M
			raise Exception('File is very large: {}'.format(size))
	except subprocess.CalledProcessError:
		raise


def archive_is(url):
	""" Send +url+ off to archive.is for archival, and return the access URL. We
	won't request a re-archive if the page already exists (which is usually
	okay). """

	data = urllib.parse.urlencode([('url', url)])
	with urllib.request.urlopen('http://archive.is/submit/', data=data.encode()) as fp:
		archive = re.search(r'"(http://archive\.is/[a-zA-Z0-9]{3,6})"', fp.read(4096).decode())
		if archive:
			return archive.groups()[0]
		# Fetch existing
		else:
			archive = urllib.request.urlopen('https://archive.is/{}'.format(url))
			soup = BeautifulSoup(archive, 'html.parser')
			return soup.select('#CONTENT .THUMBS-BLOCK a')[0].get('href')

_linkdb = None
def notify_archives(url):
	''' Notice archive.is, but only if we've never done this before '''

	global _linkdb
	if _linkdb is None:
		_linkdb = open('/data/code/rw/linkdb', 'r').readlines()

	if url + '\n' in _linkdb:
		return

	print('  Archive', url)
	print('   ', archive_is(url))
	with open('/data/code/rw/linkdb', 'a') as fp:
		fp.write(url + '\n')


def archive_templates(page):
	''' Find all {{a}} templates on +page+ 
	
	 Converting refs in Vim
	   :%s/<ref>\[\(.\{-}\) \(.\{-}\)]\(.\{-}\)<\/ref>/<ref>{{a|\1|\2\3}}<\/ref>/
	'''

	for t in page.templatesWithParams():
		#print(' '*7, 'Found template:', t[0].title())
		if t[0].title() not in ['Template:A', 'User:Carpetsmoker/a']: continue
		if len(t[1]) > 1:
			notify_archives(t[1][0])


def main():
	# We want to make sure there's only one instance of this running.
	lock = '/data/code/rw/.capturebot_running'
	if os.path.exists(lock):
		print('Already have a running instance; doing nothing')
		sys.exit(0)
	open(lock, 'w+').close()

	try:
		done = []
		for p in recent_changes():
			if p.title() in done: continue
			done.append(p.title())
			print('=> {}'.format(p.title()))
			find_uncaptured(p)
			archive_templates(p)
			print()

		always_check = ['User:Capturebot4/sandbox']
		for p in always_check:
			if p.title() in done: continue
			p = pywikibot.Page(_site, p)
			print('=> {}'.format(p.title()))
			find_uncaptured(p)
			archive_templates(p)
			print()
	finally:
		os.unlink(lock)


if __name__ == '__main__':
	main()


# The MIT License (MIT)
#
# Copyright © 2016 Martin Tournoij
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the software.