[devdocsgjs/main: 619/1867] Use Mkdocs as base for RestFramework
- From: Andy Holmes <andyholmes src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [devdocsgjs/main: 619/1867] Use Mkdocs as base for RestFramework
- Date: Fri, 19 Nov 2021 23:47:18 +0000 (UTC)
commit 8fdaf8a33ccd3a4c2358d7f8b8517983c35a3824
Author: Emil Maruszczak <emilekm gmail com>
Date: Thu May 2 17:23:56 2019 +0200
Use Mkdocs as base for RestFramework
lib/docs/scrapers/mkdocs.rb | 16 +++++++++++++++-
lib/docs/scrapers/rest_framework.rb | 15 ++-------------
2 files changed, 17 insertions(+), 14 deletions(-)
---
diff --git a/lib/docs/scrapers/mkdocs.rb b/lib/docs/scrapers/mkdocs.rb
index c0f5d5e5..20559863 100644
--- a/lib/docs/scrapers/mkdocs.rb
+++ b/lib/docs/scrapers/mkdocs.rb
@@ -1,5 +1,19 @@
module Docs
- class Mkdocs < Scraper
+ class Mkdocs < UrlScraper
self.abstract = true
+ self.type = 'mkdocs'
+
+ html_filters.push 'mkdocs/clean_html'
+
+ private
+
+ def handle_response(response)
+ # Some scrapped urls don't have ending slash
+ # which leads to page duplication
+ if !response.url.path.ends_with?('/') && !response.url.path.ends_with?('index.html')
+ response.url.path << '/'
+ end
+ super
+ end
end
end
diff --git a/lib/docs/scrapers/rest_framework.rb b/lib/docs/scrapers/rest_framework.rb
index fa64b080..16e85449 100644
--- a/lib/docs/scrapers/rest_framework.rb
+++ b/lib/docs/scrapers/rest_framework.rb
@@ -1,5 +1,5 @@
module Docs
- class RestFramework < UrlScraper
+ class RestFramework < Mkdocs
self.name = 'Django REST Framework'
self.release = '3.9.2'
self.slug = 'rest_framework'
@@ -11,7 +11,7 @@ module Docs
code: 'https://github.com/encode/django-rest-framework'
}
- html_filters.push 'mkdocs/clean_html', 'rest_framework/clean_html', 'rest_framework/entries'
+ html_filters.push 'rest_framework/clean_html', 'rest_framework/entries'
options[:skip_patterns] = [
/\Atopics\//,
@@ -22,16 +22,5 @@ module Docs
Copyright 2011–present Encode OSS Ltd<br>
Licensed under the BSD License.
HTML
-
- private
-
- def handle_response(response)
- # Some scrapped urls don't have ending slash
- # which leads to page duplication
- if !response.url.path.ends_with?('/') && !response.url.path.ends_with?('index.html')
- response.url.path << '/'
- end
- super
- end
end
end
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]