From b50211b2581d93b34db0566a6bb11766d5dea884 Mon Sep 17 00:00:00 2001
From: Dave Brondsema Table
|}
| Web site | Link
diff --git a/mediawiki/wikimarkup/__init__.py b/mediawiki/wikimarkup/__init__.py
index b7d86b6..ca51649 100644
--- a/mediawiki/wikimarkup/__init__.py
+++ b/mediawiki/wikimarkup/__init__.py
@@ -17,7 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see
|
|---|
') mInPre = False - mLastSection = u'pre' + mLastSection = 'pre' t = t[1:] else: # paragraph - if t.strip() == u'': + if t.strip() == '': if paragraphStack: - output.append(paragraphStack + u'
') + output.append(paragraphStack + '
') paragraphStack = False - mLastSection = u'p' + mLastSection = 'p' else: - if mLastSection != u'p': + if mLastSection != 'p': output.append(self.closeParagraph(mLastSection)) - mLastSection = u'' + mLastSection = '' mInPre = False - paragraphStack = u'' + paragraphStack = '
' else: - paragraphStack = u'
' + paragraphStack = '
' else: if paragraphStack: output.append(paragraphStack) paragraphStack = False - mLastSection = u'p' - elif mLastSection != u'p': - output.append(self.closeParagraph(mLastSection) + u'
') - mLastSection = u'p' + mLastSection = 'p' + elif mLastSection != 'p': + output.append(self.closeParagraph(mLastSection) + '
') + mLastSection = 'p' mInPre = False # somewhere above we forget to get out of pre block (bug 785) @@ -1587,16 +1588,16 @@ def doBlockLevels(self, text, linestart): mInPre = False if paragraphStack == False: - output.append(t + u"\n") + output.append(t + "\n") while prefixLength: output.append(self.closeList(pref2[prefixLength-1], mDTopen)) mDTopen = False prefixLength -= 1 - if mLastSection != u'': - output.append(u'' + mLastSection + u'>') - mLastSection = u'' + if mLastSection != '': + output.append('' + mLastSection + '>') + mLastSection = '' return ''.join(output) @@ -1608,8 +1609,8 @@ def __init__(self, show_toc=True): def parse(self, text): utf8 = isinstance(text, str) text = to_unicode(text) - if text[-1:] != u'\n': - text = text + u'\n' + if text[-1:] != '\n': + text = text + '\n' taggedNewline = True else: taggedNewline = False @@ -1622,32 +1623,32 @@ def parse(self, text): text = self.parseHeaders(text) text = self.parseAllQuotes(text) text = self.replaceExternalLinks(text) - if not self.show_toc and text.find(u"") == -1: + if not self.show_toc and text.find("") == -1: self.show_toc = False text = self.formatHeadings(text, True) text = self.unstrip(text) text = self.fixtags(text) text = self.doBlockLevels(text, True) text = self.unstripNoWiki(text) - text = text.split(u'\n') - text = u'\n'.join(text) - if taggedNewline and text[-1:] == u'\n': + text = text.split('\n') + text = '\n'.join(text) + if taggedNewline and text[-1:] == '\n': text = text[:-1] if utf8: return text.encode("utf-8") return text def checkTOC(self, text): - if text.find(u"__NOTOC__") != -1: - text = text.replace(u"__NOTOC__", u"") + if text.find("__NOTOC__") != -1: + text = text.replace("__NOTOC__", "") self.show_toc = False - if text.find(u"__TOC__") != -1: - text = text.replace(u"__TOC__", u"") + if text.find("__TOC__") != -1: + text = text.replace("__TOC__", "") self.show_toc = True return text def doTableStuff(self, text): - t = text.split(u"\n") + t = text.split("\n") td = [] # Is currently a td tag open? ltd = [] # Was it TD or TH? tr = [] # Is currently a tr tag open? @@ -1664,96 +1665,96 @@ def doTableStuff(self, text): attributes = self.unstripForHTML(matches.group(2)) - t[k] = u'
You can italicize text by putting 2 apostrophes on each side. 3 apostrophes will embolden the text. @@ -38,6 +38,7 @@
5 apostrophes will embolden and italicize the text.
(4 apostrophes don't do anything special -- there's just 'one left over'.) +
unicodË
You can give link to the other Web page over the Internet easily Visit Google
diff --git a/mediawiki/wiki.py b/mediawiki/wiki.py
index e2860e5..d49d61c 100644
--- a/mediawiki/wiki.py
+++ b/mediawiki/wiki.py
@@ -21,12 +21,6 @@
from __future__ import unicode_literals
from __future__ import absolute_import
import re
-import random
-import locale
-
-from base64 import b64encode
-from base64 import b64decode
-from StringIO import StringIO
from . import wikimarkup
diff --git a/mediawiki/wikimarkup/__init__.py b/mediawiki/wikimarkup/__init__.py
index a1e8e8a..8676635 100644
--- a/mediawiki/wikimarkup/__init__.py
+++ b/mediawiki/wikimarkup/__init__.py
@@ -1610,7 +1610,7 @@ def __init__(self, show_toc=True):
self.show_toc = show_toc
def parse(self, text):
- utf8 = isinstance(text, str)
+ utf8 = isinstance(text, six.binary_type)
text = to_unicode(text)
if text[-1:] != '\n':
text = text + '\n'
@@ -1859,7 +1859,7 @@ def formatHeadings(self, text, isMain):
prevlevel = level
prevtoclevel = toclevel
- level = matches[headlineCount][0]
+ level = int(matches[headlineCount][0])
if doNumberHeadings or enoughToc:
if level > prevlevel:
@@ -2072,12 +2072,12 @@ def to_unicode(text, charset=None):
return ' '.join([to_unicode(arg) for arg in text.args])
return six.text_type(text)
if charset:
- return six.text_type(text, charset, 'replace')
+ return six.ensure_text(text, charset, 'replace')
else:
try:
- return six.text_type(text, 'utf-8')
+ return six.ensure_text(text, 'utf-8')
except UnicodeError:
- return six.text_type(text, locale.getpreferredencoding(), 'replace')
+ return six.ensure_text(text, locale.getpreferredencoding(), 'replace')
# tag hooks
mTagHooks = {}
@@ -2120,10 +2120,6 @@ def str2url(str):
Takes a UTF-8 string and replaces all characters with the equivalent in 7-bit
ASCII. It returns a plain ASCII string usable in URLs.
"""
- try:
- str = str.encode('utf-8')
- except:
- pass
mfrom = "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝßàáâãäåæçèéêëìíîï"
to = "AAAAAAECEEEEIIIIDNOOOOOOUUUUYSaaaaaaaceeeeiiii"
mfrom += "ñòóôõöøùúûüýÿĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠġĢģ"
From 3bc4daf76234bfdaa1d434ebf5a9d5c98db96a42 Mon Sep 17 00:00:00 2001
From: Dave Brondsema