/*
* This code is heavily based on Arc90's readability.js (1.7.1) script
* available at: http://code.google.com/p/arc90labs-readability
*
* readability.js is licensed under the Apache License, Version 2.0
* Copyright (c) 2010 Arc90 Inc
**/
let Cc = Components.classes;
let Ci = Components.interfaces;
let Cu = Components.utils;
Cu.import("resource://gre/modules/Services.jsm");
function dump(s) {
Services.console.logStringMessage("Reader: (Readability) " + s);
};
var Readability = function(uri, doc) {
this._uri = uri;
this._doc = doc;
this._biggestFrame = false;
// Start with all flags set
this._flags = this.FLAG_STRIP_UNLIKELYS |
this.FLAG_WEIGHT_CLASSES |
this.FLAG_CLEAN_CONDITIONALLY;
// The list of pages we've parsed in this call of readability,
// for autopaging. As a key store for easier searching.
this._parsedPages = {};
// A list of the ETag headers of pages we've parsed, in case they happen to match,
// we'll know it's a duplicate.
this._pageETags = {};
// Make an AJAX request for each page and append it to the document.
this._curPageNum = 1;
}
Readability.prototype = {
FLAG_STRIP_UNLIKELYS: 0x1,
FLAG_WEIGHT_CLASSES: 0x2,
FLAG_CLEAN_CONDITIONALLY: 0x4,
FLAG_READABILITY_CHECK: 0x8,
// The maximum number of pages to loop through before we call
// it quits and just show a link.
MAX_PAGES: 5,
// The number of iterations processed before yielding.
GEN_ITERATIONS: 100,
// All of the regular expressions in use within readability.
// Defined up here so we don't instantiate them repeatedly in loops.
REGEXPS: {
unlikelyCandidates: /combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter/i,
okMaybeItsACandidate: /and|article|body|column|main|shadow/i,
positive: /article|body|content|entry|hentry|main|page|pagination|post|text|blog|story/i,
negative: /combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget/i,
extraneous: /print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single/i,
divToPElements: /<(a|blockquote|dl|div|img|ol|p|pre|table|ul)/i,
replaceBrs: /(
]*>[ \n\r\t]*){2,}/gi,
replaceFonts: /<(\/?)font[^>]*>/gi,
trim: /^\s+|\s+$/g,
normalize: /\s{2,}/g,
killBreaks: /(
(\s| ?)*){1,}/g,
videos: /http:\/\/(www\.)?(youtube|vimeo)\.com/i,
nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i,
prevLink: /(prev|earl|old|new|<|«)/i
},
/**
* Run any post-process modifications to article content as necessary.
*
* @param Element
* @return void
**/
_postProcessContent: function(articleContent) {
this._fixImageFloats(articleContent);
// Readability cannot open relative uris so we convert them to absolute uris.
this._fixRelativeUris(articleContent);
},
/**
* Some content ends up looking ugly if the image is too large to be floated.
* If the image is wider than a threshold (currently 55%), no longer float it,
* center it instead.
*
* @param Element
* @return void
**/
_fixImageFloats: function(articleContent) {
let imageWidthThreshold = Math.min(articleContent.offsetWidth, 800) * 0.55;
let images = articleContent.getElementsByTagName('img');
for (let i = 0, il = images.length; i < il; i += 1) {
let image = images[i];
if (image.offsetWidth > imageWidthThreshold)
image.className += " blockImage";
}
},
/**
* Converts each and uri in the given element to an absolute URI.
*
* @param Element
* @return void
*/
_fixRelativeUris: function(articleContent) {
let baseUri = this._uri;
// Fix links.
let links = articleContent.getElementsByTagName('a');
for (let i = links.length - 1; i >= 0; i--) {
links[i].href = this._newURIErrorWrapper(links[i].href, baseUri);
}
// Fix images.
let images = articleContent.getElementsByTagName('img');
for (let i = images.length - 1; i >= 0; i--) {
images[i].src = this._newURIErrorWrapper(images[i].src, baseUri);
}
},
/**
* Converts the given parameters into a new nsIURI object and returns the "spec" attribute of it.
* Catches errors of the newURI method and returns an appropriate value.
*
* @param string
* @param nsIURI
* @return string
*/
_newURIErrorWrapper: function(aSpec, aBaseURI) {
try {
return Services.io.newURI(aSpec, null, aBaseURI).spec;
} catch (err) {
dump("_newURIErrorWrapper: " + err.message);
return "";
}
},
/**
* Get the article title as an H1.
*
* @return void
**/
_getArticleTitle: function() {
let doc = this._doc;
let curTitle = "";
let origTitle = "";
try {
curTitle = origTitle = doc.title;
// If they had an element with id "title" in their HTML
if (typeof curTitle !== "string")
curTitle = origTitle = this._getInnerText(doc.getElementsByTagName('title')[0]);
} catch(e) {}
if (curTitle.match(/ [\|\-] /)) {
curTitle = origTitle.replace(/(.*)[\|\-] .*/gi,'$1');
if (curTitle.split(' ').length < 3)
curTitle = origTitle.replace(/[^\|\-]*[\|\-](.*)/gi,'$1');
} else if (curTitle.indexOf(': ') !== -1) {
curTitle = origTitle.replace(/.*:(.*)/gi, '$1');
if (curTitle.split(' ').length < 3)
curTitle = origTitle.replace(/[^:]*[:](.*)/gi,'$1');
} else if (curTitle.length > 150 || curTitle.length < 15) {
let hOnes = doc.getElementsByTagName('h1');
if (hOnes.length === 1)
curTitle = this._getInnerText(hOnes[0]);
}
curTitle = curTitle.replace(this.REGEXPS.trim, "");
if (curTitle.split(' ').length <= 4)
curTitle = origTitle;
let articleTitle = doc.createElement("H1");
articleTitle.innerHTML = curTitle;
return articleTitle;
},
/**
* Prepare the HTML document for readability to scrape it.
* This includes things like stripping javascript, CSS, and handling terrible markup.
*
* @return void
**/
_prepDocument: function() {
if (this._flagIsActive(this.FLAG_READABILITY_CHECK))
return;
let doc = this._doc;
// In some cases a body element can't be found (if the HTML is
// totally hosed for example) so we create a new body node and
// append it to the document.
if (doc.body === null) {
let body = doc.createElement("body");
try {
doc.body = body;
} catch(e) {
doc.documentElement.appendChild(body);
dump(e);
}
}
let frames = doc.getElementsByTagName('frame');
if (frames.length > 0) {
let bestFrame = null;
// The frame to try to run readability upon. Must be on same domain.
let bestFrameSize = 0;
// Used for the error message. Can be on any domain.
let biggestFrameSize = 0;
for (let frameIndex = 0; frameIndex < frames.length; frameIndex += 1) {
let frameSize = frames[frameIndex].offsetWidth + frames[frameIndex].offsetHeight;
let canAccessFrame = false;
try {
let frameBody = frames[frameIndex].contentWindow.document.body;
canAccessFrame = true;
} catch(eFrames) {
dump(eFrames);
}
if (frameSize > biggestFrameSize) {
biggestFrameSize = frameSize;
this._biggestFrame = frames[frameIndex];
}
if (canAccessFrame && frameSize > bestFrameSize) {
bestFrame = frames[frameIndex];
bestFrameSize = frameSize;
}
}
if (bestFrame) {
let newBody = doc.createElement('body');
newBody.innerHTML = bestFrame.contentWindow.document.body.innerHTML;
newBody.style.overflow = 'scroll';
doc.body = newBody;
let frameset = doc.getElementsByTagName('frameset')[0];
if (frameset) {
frameset.parentNode.removeChild(frameset);
}
}
}
// Remove all stylesheets
for (let k = 0; k < doc.styleSheets.length; k += 1) {
doc.styleSheets[k].disabled = true;
}
// Remove all style tags in head
let styleTags = doc.getElementsByTagName("style");
for (let st = 0; st < styleTags.length; st += 1) {
styleTags[st].textContent = "";
}
// Turn all double br's into p's. Note, this is pretty costly as far
// as processing goes. Maybe optimize later.
doc.body.innerHTML =
doc.body.innerHTML.replace(this.REGEXPS.replaceBrs, '
'). replace(this.REGEXPS.replaceFonts, '<$1span>'); }, /** * Prepare the article node for display. Clean out any inline styles, * iframes, forms, strip extraneous
tags, etc.
*
* @param Element
* @return void
**/
_prepArticle: function(articleContent) {
this._cleanStyles(this._doc, articleContent);
this._killBreaks(articleContent);
// Clean out junk from the article content
this._cleanConditionally(articleContent, "form");
this._clean(articleContent, "object");
this._clean(articleContent, "h1");
// If there is only one h2, they are probably using it as a header
// and not a subheader, so remove it since we already have a header.
if (articleContent.getElementsByTagName('h2').length === 1)
this._clean(articleContent, "h2");
this._clean(articleContent, "iframe");
this._cleanHeaders(articleContent);
// Do these last as the previous stuff may have removed junk
// that will affect these
this._cleanConditionally(articleContent, "table");
this._cleanConditionally(articleContent, "ul");
this._cleanConditionally(articleContent, "div");
// Remove extra paragraphs
let articleParagraphs = articleContent.getElementsByTagName('p');
for (let i = articleParagraphs.length - 1; i >= 0; i -= 1) {
let imgCount = articleParagraphs[i].getElementsByTagName('img').length;
let embedCount = articleParagraphs[i].getElementsByTagName('embed').length;
let objectCount = articleParagraphs[i].getElementsByTagName('object').length;
if (imgCount === 0 &&
embedCount === 0 &&
objectCount === 0 &&
this._getInnerText(articleParagraphs[i], false) === '') {
articleParagraphs[i].parentNode.removeChild(articleParagraphs[i]);
}
}
articleContent.innerHTML = articleContent.innerHTML.replace(/
]*>\s*
topCandidate.readability.contentScore) { topCandidate = candidates[c]; } yield; } // If we still have no top candidate, just use the body as a last resort. // We also have to copy the body node so it is something we can modify. if (topCandidate === null || topCandidate.tagName === "BODY") { // If we couldn't find a candidate for article content at this point, // it's very unlikely to be a convertible page, just bail the check. if (isChecking) { dump('No top candidate found, failed readability check'); yield null; } topCandidate = doc.createElement("DIV"); topCandidate.innerHTML = page.innerHTML; page.innerHTML = ""; page.appendChild(topCandidate); this._initializeNode(topCandidate); } else if (isChecking) { dump('Found a top candidate, passed readability check'); // Just return a non-null value, no need to post-process the article content // as we're just checking for readability. yield {}; } // Now that we have the top candidate, look through its siblings for content // that might also be related. Things like preambles, content split by ads // that we removed, etc. let articleContent = doc.createElement("DIV"); if (isPaging) articleContent.id = "readability-content"; let siblingScoreThreshold = Math.max(10, topCandidate.readability.contentScore * 0.2); let siblingNodes = topCandidate.parentNode.childNodes; for (let s = 0, sl = siblingNodes.length; s < sl; s += 1) { let siblingNode = siblingNodes[s]; let append = false; dump("Looking at sibling node: " + siblingNode + " (" + siblingNode.className + ":" + siblingNode.id + ")" + ((typeof siblingNode.readability !== 'undefined') ? (" with score " + siblingNode.readability.contentScore) : '')); dump("Sibling has score " + (siblingNode.readability ? siblingNode.readability.contentScore : 'Unknown')); if (siblingNode === topCandidate) append = true; let contentBonus = 0; // Give a bonus if sibling nodes and top candidates have the example same classname if (siblingNode.className === topCandidate.className && topCandidate.className !== "") contentBonus += topCandidate.readability.contentScore * 0.2; if (typeof siblingNode.readability !== 'undefined' && (siblingNode.readability.contentScore+contentBonus) >= siblingScoreThreshold) { append = true; } if (siblingNode.nodeName === "P") { let linkDensity = this._getLinkDensity(siblingNode); let nodeContent = this._getInnerText(siblingNode); let nodeLength = nodeContent.length; if (nodeLength > 80 && linkDensity < 0.25) { append = true; } else if (nodeLength < 80 && linkDensity === 0 && nodeContent.search(/\.( |$)/) !== -1) { append = true; } } if (append) { dump("Appending node: " + siblingNode); let nodeToAppend = null; if (siblingNode.nodeName !== "DIV" && siblingNode.nodeName !== "P") { // We have a node that isn't a common block level element, like a form or td tag. // Turn it into a div so it doesn't get filtered out later by accident. */ dump("Altering siblingNode of " + siblingNode.nodeName + ' to div.'); nodeToAppend = doc.createElement("DIV"); nodeToAppend.id = siblingNode.id; nodeToAppend.innerHTML = siblingNode.innerHTML; } else { nodeToAppend = siblingNode; s -= 1; sl -= 1; } // To ensure a node does not interfere with readability styles, // remove its classnames. nodeToAppend.className = ""; // Append sibling and subtract from our list because it removes // the node when you append to another node. articleContent.appendChild(nodeToAppend); } yield; } // So we have all of the content that we need. Now we clean it up for presentation. this._prepArticle(articleContent); yield; if (this._curPageNum === 1) articleContent.innerHTML = '
§
'; doc.getElementById("readability-content").appendChild(articlePage); if (this._curPageNum > this.MAX_PAGES) { let nextPageMarkup = ""; articlePage.innerHTML = articlePage.innerHTML + nextPageMarkup; return; } // Now that we've built the article page DOM element, get the page content // asynchronously and load the cleaned content into the div we created for it. (function(pageUrl, thisPage) { this._ajax(pageUrl, { success: function(r) { // First, check to see if we have a matching ETag in headers - if we do, this is a duplicate page. let eTag = r.getResponseHeader('ETag'); if (eTag) { if (eTag in this._pageETags) { dump("Exact duplicate page found via ETag. Aborting."); articlePage.style.display = 'none'; return; } else { this._pageETags[eTag] = 1; } } // TODO: this ends up doubling up page numbers on NYTimes articles. Need to generically parse those away. let page = doc.createElement("DIV"); // Do some preprocessing to our HTML to make it ready for appending. // - Remove any script tags. Swap and reswap newlines with a unicode // character because multiline regex doesn't work in javascript. // - Turn any noscript tags into divs so that we can parse them. This // allows us to find any next page links hidden via javascript. // - Turn all double br's into p's - was handled by prepDocument in the original view. // Maybe in the future abstract out prepDocument to work for both the original document // and AJAX-added pages. let responseHtml = r.responseText.replace(/\n/g,'\uffff').replace(/');
responseHtml = responseHtml.replace(this.REGEXPS.replaceFonts, '<$1span>');
page.innerHTML = responseHtml;
// Reset all flags for the next page, as they will search through it and
// disable as necessary at the end of grabArticle.
this._flags = 0x1 | 0x2 | 0x4;
let nextPageLink = this._findNextPageLink(page);
// NOTE: if we end up supporting _appendNextPage(), we'll need to
// change this call to be async
let content = this._grabArticle(page);
if (!content) {
dump("No content found in page to append. Aborting.");
return;
}
// Anti-duplicate mechanism. Essentially, get the first paragraph of our new page.
// Compare it against all of the the previous document's we've gotten. If the previous
// document contains exactly the innerHTML of this first paragraph, it's probably a duplicate.
let firstP = content.getElementsByTagName("P").length ? content.getElementsByTagName("P")[0] : null;
if (firstP && firstP.innerHTML.length > 100) {
for (let i = 1; i <= this._curPageNum; i += 1) {
let rPage = doc.getElementById('readability-page-' + i);
if (rPage && rPage.innerHTML.indexOf(firstP.innerHTML) !== -1) {
dump('Duplicate of page ' + i + ' - skipping.');
articlePage.style.display = 'none';
this._parsedPages[pageUrl] = true;
return;
}
}
}
this._removeScripts(content);
thisPage.innerHTML = thisPage.innerHTML + content.innerHTML;
// After the page has rendered, post process the content. This delay is necessary because,
// in webkit at least, offsetWidth is not set in time to determine image width. We have to
// wait a little bit for reflow to finish before we can fix floating images.
setTimeout((function() {
this._postProcessContent(thisPage);
}).bind(this), 500);
if (nextPageLink)
this._appendNextPage(nextPageLink);
}
});
}).bind(this)(nextPageLink, articlePage);
},
/**
* Get an elements class/id weight. Uses regular expressions to tell if this
* element looks good or bad.
*
* @param Element
* @return number (Integer)
**/
_getClassWeight: function(e) {
if (!this._flagIsActive(this.FLAG_WEIGHT_CLASSES))
return 0;
let weight = 0;
// Look for a special classname
if (typeof(e.className) === 'string' && e.className !== '') {
if (e.className.search(this.REGEXPS.negative) !== -1)
weight -= 25;
if (e.className.search(this.REGEXPS.positive) !== -1)
weight += 25;
}
// Look for a special ID
if (typeof(e.id) === 'string' && e.id !== '') {
if (e.id.search(this.REGEXPS.negative) !== -1)
weight -= 25;
if (e.id.search(this.REGEXPS.positive) !== -1)
weight += 25;
}
return weight;
},
/**
* Remove extraneous break tags from a node.
*
* @param Element
* @return void
**/
_killBreaks: function(e) {
e.innerHTML = e.innerHTML.replace(this.REGEXPS.killBreaks,'
');
},
/**
* Clean a node of all elements of type "tag".
* (Unless it's a youtube/vimeo video. People love movies.)
*
* @param Element
* @param string tag to clean
* @return void
**/
_clean: function(e, tag) {
let targetList = e.getElementsByTagName(tag);
let isEmbed = (tag === 'object' || tag === 'embed');
for (let y = targetList.length - 1; y >= 0; y -= 1) {
// Allow youtube and vimeo videos through as people usually want to see those.
if (isEmbed) {
let attributeValues = "";
for (let i = 0, il = targetList[y].attributes.length; i < il; i += 1) {
attributeValues += targetList[y].attributes[i].value + '|';
}
// First, check the elements attributes to see if any of them contain youtube or vimeo
if (attributeValues.search(this.REGEXPS.videos) !== -1)
continue;
// Then check the elements inside this element for the same.
if (targetList[y].innerHTML.search(this.REGEXPS.videos) !== -1)
continue;
}
targetList[y].parentNode.removeChild(targetList[y]);
}
},
/**
* Clean an element of all tags of type "tag" if they look fishy.
* "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc.
*
* @return void
**/
_cleanConditionally: function(e, tag) {
if (!this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY))
return;
let tagsList = e.getElementsByTagName(tag);
let curTagsLength = tagsList.length;
// Gather counts for other typical elements embedded within.
// Traverse backwards so we can remove nodes at the same time
// without effecting the traversal.
//
// TODO: Consider taking into account original contentScore here.
for (let i = curTagsLength-1; i >= 0; i -= 1) {
let weight = this._getClassWeight(tagsList[i]);
let contentScore = (typeof tagsList[i].readability !== 'undefined') ? tagsList[i].this._contentScore : 0;
dump("Cleaning Conditionally " + tagsList[i] + " (" + tagsList[i].className + ":" + tagsList[i].id + ")" + ((typeof tagsList[i].readability !== 'undefined') ? (" with score " + tagsList[i].this._contentScore) : ''));
if (weight + contentScore < 0) {
tagsList[i].parentNode.removeChild(tagsList[i]);
} else if (this._getCharCount(tagsList[i],',') < 10) {
// If there are not very many commas, and the number of
// non-paragraph elements is more than paragraphs or other
// ominous signs, remove the element.
let p = tagsList[i].getElementsByTagName("p").length;
let img = tagsList[i].getElementsByTagName("img").length;
let li = tagsList[i].getElementsByTagName("li").length-100;
let input = tagsList[i].getElementsByTagName("input").length;
let embedCount = 0;
let embeds = tagsList[i].getElementsByTagName("embed");
for (let ei = 0, il = embeds.length; ei < il; ei += 1) {
if (embeds[ei].src.search(this.REGEXPS.videos) === -1)
embedCount += 1;
}
let linkDensity = this._getLinkDensity(tagsList[i]);
let contentLength = this._getInnerText(tagsList[i]).length;
let toRemove = false;
if (img > p) {
toRemove = true;
} else if (li > p && tag !== "ul" && tag !== "ol") {
toRemove = true;
} else if ( input > Math.floor(p/3) ) {
toRemove = true;
} else if (contentLength < 25 && (img === 0 || img > 2) ) {
toRemove = true;
} else if (weight < 25 && linkDensity > 0.2) {
toRemove = true;
} else if (weight >= 25 && linkDensity > 0.5) {
toRemove = true;
} else if ((embedCount === 1 && contentLength < 75) || embedCount > 1) {
toRemove = true;
}
if (toRemove)
tagsList[i].parentNode.removeChild(tagsList[i]);
}
}
},
/**
* Clean out spurious headers from an Element. Checks things like classnames and link density.
*
* @param Element
* @return void
**/
_cleanHeaders: function(e) {
for (let headerIndex = 1; headerIndex < 3; headerIndex += 1) {
let headers = e.getElementsByTagName('h' + headerIndex);
for (let i = headers.length - 1; i >= 0; i -= 1) {
if (this._getClassWeight(headers[i]) < 0 || this._getLinkDensity(headers[i]) > 0.33)
headers[i].parentNode.removeChild(headers[i]);
}
}
},
_flagIsActive: function(flag) {
return (this._flags & flag) > 0;
},
_addFlag: function(flag) {
this._flags = this._flags | flag;
},
_removeFlag: function(flag) {
this._flags = this._flags & ~flag;
},
/**
* Runs readability.
*
* Workflow:
* 1. Prep the document by removing script tags, css, etc.
* 2. Build readability's DOM tree.
* 3. Grab the article content from the current dom tree.
* 4. Replace the current DOM tree with the new one.
* 5. Read peacefully.
*
* @return void
**/
parse: function (callback) {
let uri = this._uri;
if ((uri.prePath + "/") === uri.spec) {
callback(null);
return;
}
// Remove script tags from the document.
this._removeScripts(this._doc);
// FIXME: Disabled multi-page article support for now as it
// needs more work on infrastructure.
// Make sure this document is added to the list of parsed pages first,
// so we don't double up on the first page.
// this._parsedPages[uri.spec.replace(/\/$/, '')] = true;
// Pull out any possible next page link first.
// let nextPageLink = this._findNextPageLink(doc.body);
this._prepDocument();
let articleTitle = this._getArticleTitle();
this._grabArticle(function (articleContent) {
if (!articleContent) {
callback(null);
return;
}
// If we're simply checking whether the document is convertible
// or not, we don't need to do any post-processing on the article
// content, just return a non-null value (see check() method)
if (this._flagIsActive(this.FLAG_READABILITY_CHECK)) {
callback({});
return;
}
this._postProcessContent(articleContent);
// if (nextPageLink) {
// // Append any additional pages after a small timeout so that people
// // can start reading without having to wait for this to finish processing.
// setTimeout((function() {
// this._appendNextPage(nextPageLink);
// }).bind(this), 500);
// }
callback({ title: this._getInnerText(articleTitle),
content: articleContent.innerHTML });
}.bind(this));
},
check: function (callback) {
// Set proper flags for parsing document in readability check mode, skipping
// any DOM manipulation.
this._flags = this.FLAG_READABILITY_CHECK;
this.parse(function (result) {
callback(result != null);
});
}
};