Official Kodi Wiki:Add-on bot

From Official Kodi Wiki
Jump to: navigation, search
Home icon grey.png   ▶ Team Kodi ▶ Category:Team Kodi specific notes ▶ Add-on bot
Emblem-important-yellow.png Before you can run these scripts on XBMC Wiki you must get permission from a wiki administrator

The following python scripts can be used with Pywikipediabot to update this wiki's add-ons pages via an addon.xml file from an add-on repo.

1 addons.py

Bot script to create/update add-on pages for given Repo

# -*- coding: utf-8 -*-
"""
Reads addons.xml and creates/updates pages.
Usage: xbmc_addons.py [repo]
where repo can be one of these:
 * Gotham
 * Helix
 * Isengard
 * Jarvis (default)

"""
#
# Copyright (C) 2005-2015 Team Kodi
# http://kodi.tv
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html

import wikipedia as pywikibot
import sys, urllib2, re
from BeautifulSoup import BeautifulStoneSoup # For processing XML

repoUrls={'Gotham':u'http://mirrors.kodi.tv/addons/gotham/',
          'Helix':u'http://mirrors.kodi.tv/addons/helix/',
          'Isengard':u'http://mirrors.kodi.tv/addons/isengard/',
          'Jarvis':u'http://mirrors.kodi.tv/addons/jarvis/',
         }

def UpdateAddons(*args):
    site = pywikibot.getSite()
    try:
        repoUrl = repoUrls[pywikibot.handleArgs(*args)[0]]
    except:
        repoUrl = repoUrls['Jarvis']
    pywikibot.output(u'Repo URL: ' + repoUrl)
    soup = importAddonXML(repoUrl + "addons.xml")
    for addon in soup.addons:
        newtext = None
        addontext = None
        oldtext = None
        iconUrl = None
        # Extract Add-on details from xml
        addon_data = extractAddonData(addon)
        # Which Wiki page are we looking at?
        pagename = 'Add-on:' + addon_data['name']
        #pagename = 'Sandbox'

        # Get content of wiki page
        page = pywikibot.Page(site, pagename)
        try:
            oldtext = page.get(force = False, get_redirect=True, throttle = True, sysop = False, change_edit_time = True)
        except pywikibot.NoPage:
            oldtext =  ''
        except pywikibot.IsRedirectPage:
            pywikibot.output(u'%s is a redirect!' % pagename)
        except pywikibot.Error: # third exception, take the problem and print
            pywikibot.output(u"Some Error, skipping..")
            continue

        if addon_data['icon url'] != u"":
            iconUrl = repoUrl + addon_data['icon url']
        else:
            iconUrl = u""
        # Create Addon template
        try:
            addontext = ("{{Addon \n|Name=" + addon_data['name'] +
                         "\n|provider-name="+ addon_data['provider-name'] +
                         "\n|ID=" + addon_data['id'] +
                         "\n|latest-version=" + addon_data['version']+
                         "\n|extension point=" + addon_data['extension point'] +
                         "\n|provides="+ addon_data['provides'] +
                         "\n|Summary=" + addon_data['summary'] +
                         "\n|Description=" + addon_data['description'] +
                         "\n|Platform=" + addon_data['platform'] +
                         "\n|Language=" + addon_data['language'] +
                         "\n|License=" + addon_data['license'] +
                         "\n|Forum=" + addon_data['forum'] +
                         "\n|Website=" + addon_data['website'] +
                         "\n|Source=" + addon_data['source'] +
                         "\n|Email=" + addon_data['email'] +
                         "\n|broken=" + addon_data['broken'] +
                         "\n|icon url=" + iconUrl + "}}")
        except:
            pywikibot.output(u"Some Error creating Addons String, skipping..")
            continue

        # Replace existing Addon template
        templateRegex = re.compile(r'\{\{ *(' + ':|'+ \
                                   r':|[mM][sS][gG]:)?Addon' + \
                                   r'(?P<parameters>\s*\|.+?|) *}}',
                                   re.DOTALL)
        replacedText = re.subn(templateRegex, addontext, oldtext)

        if replacedText[1] > 0:
            newtext = replacedText[0]
        else:
            newtext = addontext + "\n" + oldtext
        # print newtext (debug)
        # pywikibot.output(newtext)

        # Push new page to wiki
        try:
            page.put(newtext, comment='Addon-Bot Update', watchArticle = None, minorEdit = True)
        except pywikibot.LockedPage:
            pywikibot.output(u"Page %s is locked; skipping." % page.aslink())
        except pywikibot.EditConflict:
            pywikibot.output(u'Skipping %s because of edit conflict' % (page.title()))
        except pywikibot.SpamfilterError, error:
            pywikibot.output(u'Cannot change %s because of spam blacklist entry %s' % (page.title(), error.url))
        except:
            pywikibot.output(u"Some Error writing to wiki page, skipping..")
            continue
        # break here for testing purposes
        # break


# Converts soup element into a tuple
# Gets english summaries/descriptions (if language specified)
# data: Soup addon element from repo addons.xml
def extractAddonData(data):

    addon = {'name' : data['name']}
    addon['id'] = data['id']
    addon['version'] = data['version']
    try:
        addon['extension point'] = data.find('extension',library=True)['point']
    except:
        addon['extension point'] = data.extension['point']

    try:
        addon['provider-name'] = u""+data['provider-name'].replace('|',' & ')
    except:
        addon['provider-name'] = u""

    try:
        addon['provides'] = u""+data.find('extension',library=True).provides.string
    except:
        addon['provides'] = u""

    try:
        addon['summary'] = u""+data('summary', lang="en")[0].string
    except:
        try:
            addon['summary'] = u""+data.summary.string
        except:
            addon['summary'] = u""

    try:
        addon['description'] = u""+data('description', lang="en")[0].string
    except:
        try:
            addon['description'] = u""+data.description.string
        except:
            addon['description'] = u""

    try:
        addon['platform'] = u""+data.platform.string
    except:
        addon['platform'] = u""

    try:
        addon['language'] = u""+data.language.string
    except:
        addon['language'] = u""
        
    try:
        addon['license'] = u""+data.license.string
    except:
        addon['license'] = u""

    try:
        addon['forum'] = u""+data.forum.string
    except:
        addon['forum'] = u""

    try:
        addon['website'] = u""+data.website.string
    except:
        addon['website'] = u""

    try:
        addon['source'] = u""+data.source.string
    except:
        addon['source'] = u""

    try:
        addon['email'] = u""+data.email.string
    except:
        addon['email'] = u""

    try:
        addon['broken'] = u""+data.broken.string
    except:
        addon['broken'] = u""

    try:
        if data.noicon.string == u"true":
            addon['noicon'] = True
        else:
            addon['noicon'] = False
    except:
        addon['noicon'] = False

    if addon['noicon']:
        addon['icon url'] = u""
    else:
        addon['icon url'] = u''+addon['id']+'/icon.png'

    addon['summary'] = re.sub("\[CR\]","\\n",addon['summary'])
    addon['description'] = re.sub("\[CR\]","\\n",addon['description'])
    return addon

# Download addons.xml and return Soup xml class
def importAddonXML(url):
    page = urllib2.urlopen(url)
    return BeautifulStoneSoup(page)

if __name__ == '__main__':
    try:
       UpdateAddons()
    finally:
       pywikibot.stopme()

2 addons_category.py

Bot script to insert or remove repo categories.

# -*- coding: utf-8 -*-
"""
Usage: 


"""
#
# Copyright (C) 2005-2015 Team Kodi
# http://kodi.tv
# 
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# 
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html


import wikipedia as pywikibot
import sys, urllib2, re
from BeautifulSoup import BeautifulStoneSoup # For processing XML
import pagegenerators, catlib

repoUrls={
    'Gotham':u'http://mirrors.kodi.tv/addons/gotham/',
    'Helix':u'http://mirrors.kodi.tv/addons/helix/',
    'Isengard':u'http://mirrors.kodi.tv/addons/isengard/',
    'Jarvis':u'http://mirrors.kodi.tv/addons/jarvis/',
    }

repoCats={
    'Gotham':u'Gotham add-on repository',
    'Helix':u'Helix add-on repository',
    'Isengard':u'Isengard add-on repository',
    'Jarvis':u'Jarvis add-on repository',
    }
	
def UpdateRepoCats(*args):
	# Get List of all articles in Category:All add-ons 
	site = pywikibot.getSite()
	
	# Download all repos as soup element
	soups = importAllAddonXML()

	# Get all pages in Category All add-ons	
	cat = catlib.Category(site, u'Category:All add-ons')
	pages = cat.articlesList(False)
	allRepoCats = repoCatList(site)
	
	for Page in pagegenerators.PreloadingGenerator(pages,100): 
		# Get addon_id via regexp
		addon_id = None
		addon_id = re.search("\|ID=([a-zA-Z0-9_\.\-]+)",Page.get())
		if not addon_id:
			pywikibot.output("Can't find addon_id for %s, skipping it..." % Page.title())
			continue
		else:
			addon_id = addon_id.group(1)
			pywikibot.output("Identifying Repos for %s." % addon_id)		
		# See if addon_id can be found in repos
		repos = checkInRepo(addon_id, soups)
		addRemoveRepoCats(Page, repos, allRepoCats)

def repoCatList(site):
	CatList = []
	for repoName, repoCat in repoCats.iteritems():
		CatList.append(catlib.Category(site, 'Category:'+ repoCat))
	return CatList

def addRemoveRepoCats(article, repos, allRepoCats, comment=None):
	# Create list of repos to be removed
	notRepos = []

	if not article.canBeEdited():
		pywikibot.output("Can't edit %s, skipping it..." % article.aslink())
		return False	
	
	cats = article.categories(get_redirect=True)	
	site = article.site()
	changesMade = False
	newCatList = []
	newCatSet = set()	

	repoCatList = []

	
	#remove all repos
	for i in range(len(cats)):
		cat = cats[i]
		if cat in allRepoCats:
			changesMade = True	
			continue
		if cat.title() not in newCatSet:
			newCatSet.add(cat.title())
			newCatList.append(cat)
	
	#add relevant repos	
	for i in range(len(repos)):
		repo = repos[i]
		newCatList.append(catlib.Category(site, 'Category:'+ repoCats[repo]))
		changesMade = True
	
	if not changesMade:
		pywikibot.output(u'No changes necessary to %s!' % article.title())
	else:
		text = article.get(get_redirect=True)
		try:
			text = pywikibot.replaceCategoryLinks(text, newCatList)
		except ValueError:
			# Make sure that the only way replaceCategoryLinks() can return
			# a ValueError is in the case of interwiki links to self.
			pywikibot.output(
				u'Skipping %s because of interwiki link to self' % article)
		try:
			article.put(text, comment='Addon-Bot repo category update', watchArticle = None, minorEdit = True)
		except pywikibot.EditConflict:
			pywikibot.output(
				u'Skipping %s because of edit conflict' % article.title())
		except pywikibot.SpamfilterError, e:
			pywikibot.output(
				u'Skipping %s because of blacklist entry %s'
				% (article.title(), e.url))
		except pywikibot.LockedPage:
			pywikibot.output(
				u'Skipping %s because page is locked' % article.title())
		except pywikibot.PageNotSaved, error:
			pywikibot.output(u"Saving page %s failed: %s"
						 % (article.aslink(), error.message))

def checkInRepo(addon_id, soups):
	repos = [ ]
	for repoName, soup in soups.iteritems():
		if soup.find('addon',id=addon_id):
			repos.append(repoName)
	return repos

def importAllAddonXML():
	soup = { }	
	for repoName, repoUrl in repoUrls.iteritems():
		soup[repoName] = importAddonXML(repoUrl + "addons.xml")
	return soup
	
# Download addons.xml and return Soup xml class
def importAddonXML(url):
	page = urllib2.urlopen(url)
	return BeautifulStoneSoup(page)
	
if __name__ == '__main__':
    try:
        UpdateRepoCats()
    finally:
        pywikibot.stopme()

3 See also