[kupfer: 2/4] plugin.text: Allow open URLs in Text



commit c8d45979d66485769c57ab85412464a07681106e
Author: Ulrik Sverdrup <ulrik sverdrup gmail com>
Date:   Thu Nov 5 23:40:57 2009 +0100

    plugin.text: Allow open URLs in Text
    
    Add action OpenTextUrl to open web addresses in Text. At the same time
    we "reduce" the URL matching in free-text to produce UrlLeaves only
    for "real" URLs (with http:// etc) while fuzzy URLs will just be
    treated as text (now with OpenTextUrl action).
    
    This makes for less surprises if the user *doesn't* intend to type in
    an URL.

 kupfer/plugin/text.py |   55 +++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 42 insertions(+), 13 deletions(-)
---
diff --git a/kupfer/plugin/text.py b/kupfer/plugin/text.py
index 71a6ff7..74b94ad 100644
--- a/kupfer/plugin/text.py
+++ b/kupfer/plugin/text.py
@@ -3,11 +3,13 @@ from urlparse import urlparse, urlunparse
 
 import gobject
 
-from kupfer.objects import TextSource, TextLeaf, FileLeaf, UrlLeaf
+from kupfer.objects import TextSource, TextLeaf, FileLeaf, UrlLeaf, OpenUrl
+from kupfer import utils
 
 __kupfer_name__ = _("Free-text Queries")
 __kupfer_sources__ = ()
 __kupfer_text_sources__ = ("BasicTextSource", "PathTextSource", "URLTextSource",)
+__kupfer_actions__ = ("OpenTextUrl", )
 __description__ = _("Basic support for free-text queries")
 __version__ = ""
 __author__ = "Ulrik Sverdrup <ulrik sverdrup gmail com>"
@@ -43,6 +45,39 @@ class PathTextSource (TextSource):
 	def provides(self):
 		yield FileLeaf
 
+def is_url(text):
+	"""If @text is an URL, return a cleaned-up URL, else return None"""
+	text = text.strip()
+	components = list(urlparse(text))
+	domain = "".join(components[1:])
+	dotparts = domain.rsplit(".")
+
+	# 1. Domain name part is one word (without spaces)
+	# 2. Urlparse parses a scheme (http://), else we apply heuristics
+	if len(domain.split()) == 1 and (components[0] or ("." in domain and
+		len(dotparts) >= 2 and len(dotparts[-1]) >= 2 and
+		any(char.isalpha() for char in domain) and
+		all(part[:1].isalnum() for part in dotparts))):
+		if not components[0]:
+			url = "http://"; + "".join(components[1:])
+		else:
+			url = text
+		name = ("".join(components[1:3])).strip("/")
+		if name:
+			return url
+
+class OpenTextUrl (OpenUrl):
+	rank_adjust = 10
+
+	def activate(self, leaf):
+		url = is_url(leaf.object)
+		utils.show_url(url)
+
+	def item_types(self):
+		yield TextLeaf
+	def valid_for_item(self, leaf):
+		return is_url(leaf.object)
+
 class URLTextSource (TextSource):
 	"""detect URLs and webpages"""
 	def __init__(self):
@@ -51,23 +86,17 @@ class URLTextSource (TextSource):
 	def get_rank(self):
 		return 75
 	def get_items(self, text):
+		# Only detect "perfect" URLs
 		text = text.strip()
 		components = list(urlparse(text))
 		domain = "".join(components[1:])
-		dotparts = domain.rsplit(".")
-
-		# 1. Domain name part is one word (without spaces)
-		# 2. Urlparse parses a scheme (http://), else we apply heuristics
-		if len(domain.split()) == 1 and (components[0] or ("." in domain and
-			len(dotparts) >= 2 and len(dotparts[-1]) >= 2 and
-			any(char.isalpha() for char in domain) and
-			all(part[:1].isalnum() for part in dotparts))):
-			if not components[0]:
-				url = "http://"; + "".join(components[1:])
-			else:
-				url = text
+
+		# If urlparse parses a scheme (http://), it's an URL
+		if len(domain.split()) == 1 and components[0]:
+			url = text
 			name = ("".join(components[1:3])).strip("/")
 			if name:
 				yield UrlLeaf(url, name=name)
+
 	def provides(self):
 		yield UrlLeaf



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]