gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[www_shared] branch master updated: remove unneeded stuff, structure as


From: gnunet
Subject: [www_shared] branch master updated: remove unneeded stuff, structure as package
Date: Fri, 07 May 2021 12:36:20 +0200

This is an automated email from the git hooks/post-receive script.

dold pushed a commit to branch master
in repository www_shared.

The following commit(s) were added to refs/heads/master by this push:
     new d8ec4d3  remove unneeded stuff, structure as package
d8ec4d3 is described below

commit d8ec4d322b5ecb23e992dfa6061dc015c6b4abbf
Author: Florian Dold <florian@dold.me>
AuthorDate: Fri May 7 12:33:45 2021 +0200

    remove unneeded stuff, structure as package
---
 README.text => README            |  0
 make_site.py                     | 28 ++++++++++++
 mybabel.py                       | 25 -----------
 sitegen/__init__.py              |  0
 i18nfix.py => sitegen/i18nfix.py |  0
 site.py => sitegen/site.py       | 53 +++++++++++++++-------
 time.py => sitegen/timeutil.py   |  0
 textproc.py                      | 94 ----------------------------------------
 8 files changed, 65 insertions(+), 135 deletions(-)

diff --git a/README.text b/README
similarity index 100%
rename from README.text
rename to README
diff --git a/make_site.py b/make_site.py
new file mode 100755
index 0000000..35cc333
--- /dev/null
+++ b/make_site.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+# coding: utf-8
+#
+# This file is in the public domain.
+#
+# This script runs the jinja2 templating engine on an input template-file
+# using the specified locale for gettext translations, and outputs
+# the resulting (HTML) ouptut-file.
+#
+# Note that the gettext files need to be prepared first. This script
+# is thus to be invoked via the Makefile.
+import jinja2
+import sys
+from pathlib import Path, PurePath
+
+# Make sure the current directory is in the search path when trying
+# to import i18nfix.
+sys.path.insert(0, ".")
+
+from sitegen.site import SiteGenerator
+
+
+def main():
+    x = SiteGenerator()
+    x.run()
+
+if __name__ == "__main__":
+    main()
diff --git a/mybabel.py b/mybabel.py
deleted file mode 100644
index daeb262..0000000
--- a/mybabel.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python3
-
-# This code is in the public domain.
-#
-# This is a wrapper around 'pybabel' that sets our include path
-# to find the 'i18nfix' module.  It takes the name of the
-# pybabel program as the first argument (must be a Python script!)
-# and passes the other arguments to pybabel after setting our
-# sys.path.
-
-import shutil
-import sys
-
-# First, extend the search path as needed (without setting PYTHONPATH!)
-sys.path.insert(0, ".")
-
-# Now, find the actual pybabel program in the $PATH
-pb = shutil.which(sys.argv[1])
-
-# Remove 'pybabel' from argv[] so that pybabel doesn't confuse
-# itself for the first command-line argument ;-)
-sys.argv.remove(sys.argv[1])
-
-# Now we can run pybabel. Yeah!
-exec(compile(source=open(pb).read(), filename=pb, mode="exec"))
diff --git a/sitegen/__init__.py b/sitegen/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/i18nfix.py b/sitegen/i18nfix.py
similarity index 100%
rename from i18nfix.py
rename to sitegen/i18nfix.py
diff --git a/site.py b/sitegen/site.py
similarity index 86%
rename from site.py
rename to sitegen/site.py
index c090130..5e009dc 100644
--- a/site.py
+++ b/sitegen/site.py
@@ -28,14 +28,41 @@ import jinja2
 from pathlib import Path, PurePosixPath, PurePath
 from ruamel.yaml import YAML
 from datetime import datetime
+import html.parser
+from bs4 import BeautifulSoup
+import sitegen.i18nfix as i18nfix
+from sitegen.timeutil import time_rfc822, time_now, conv_date_rfc822
 
-# Make sure the current directory is in the search path when trying
-# to import i18nfix.
-sys.path.insert(0, ".")
 
-import inc.i18nfix as i18nfix
-from inc.textproc import cut_news_text, cut_article
-from inc.time import time_rfc822, time_now, conv_date_rfc822
+def html2text(html_str):
+    class extractText(html.parser.HTMLParser):
+        def __init__(self):
+            super(extractText, self).__init__()
+            self.result = []
+
+        def handle_data(self, data):
+            self.result.append(data)
+
+        def text_in(self):
+            return "".join(self.result)
+
+    k = extractText()
+    k.feed(html_str)
+    return k.text_in()
+
+
+def cut_text(filename, count):
+    with open(filename) as html:
+        soup = BeautifulSoup(html, features="lxml")
+        for script in soup(["script", "style"]):
+            script.extract()
+        k = []
+        for i in soup.findAll("p")[1]:
+            k.append(i)
+        b = "".join(str(e) for e in k)
+        text = html2text(b.replace("\n", ""))
+        textreduced = (text[:count] + " [...]") if len(text) > count else 
(text)
+        return textreduced
 
 
 def make_helpers(root, in_file, locale):
@@ -87,6 +114,9 @@ def make_helpers(root, in_file, locale):
         else:
             return url(filename + "." + locale + ".svg")
 
+    def get_abstract(name, length):
+        return cut_text(root / "template" / (name + ".j2"), length)
+
     return dict(
         self_localized=self_localized,
         url_localized=url_localized,
@@ -96,6 +126,7 @@ def make_helpers(root, in_file, locale):
         svg_localized=svg_localized,
         now=time_rfc822(time_now()),
         conv_date_rfc822=conv_date_rfc822,
+        get_abstract=get_abstract,
     )
 
 
@@ -138,16 +169,6 @@ class SiteGenerator:
         if self.baseurl is None:
             self.baseurl = self.config["siteconf"].get("baseurl")
 
-    def gen_abstract(self, name, member, pages, length):
-        conf = self.config
-        for item in conf[name]:
-            item[member] = cut_news_text(item[pages], length)
-
-    def gen_newspost_content(self, name, member, pages, lang):
-        conf = self.config
-        for item in conf[name]:
-            item[member] = cut_article(item[pages], conf, lang)
-
     def run_localized(self, locale, tr):
         conf = self.config
         root = self.root
diff --git a/time.py b/sitegen/timeutil.py
similarity index 100%
rename from time.py
rename to sitegen/timeutil.py
diff --git a/textproc.py b/textproc.py
deleted file mode 100644
index e39ee12..0000000
--- a/textproc.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (C) 2019 GNUnet e.V.
-#
-# This code is derived from code contributed to GNUnet e.V.
-# by nikita <nikita@n0.is>.
-#
-# Permission to use, copy, modify, and/or distribute this software for
-# any purpose with or without fee is hereby granted.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
-# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
-# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
-# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-# SPDX-License-Identifier: 0BSD
-import html.parser
-from bs4 import BeautifulSoup
-
-
-class extractText(html.parser.HTMLParser):
-    def __init__(self):
-        super(extractText, self).__init__()
-        self.result = []
-
-    def handle_data(self, data):
-        self.result.append(data)
-
-    def text_in(self):
-        return "".join(self.result)
-
-
-def html2text(html):
-    k = extractText()
-    k.feed(html)
-    return k.text_in()
-
-
-def cut_text(filename, count):
-    with open(filename) as html:
-        soup = BeautifulSoup(html, features="lxml")
-        for script in soup(["script", "style"]):
-            script.extract()
-        k = []
-        for i in soup.findAll("p")[1]:
-            k.append(i)
-        b = "".join(str(e) for e in k)
-        text = html2text(b.replace("\n", ""))
-        textreduced = (text[:count] + " [...]") if len(text) > count else 
(text)
-        return textreduced
-
-
-def cut_news_text(filename, count):
-    return cut_text("template/news/" + filename + ".j2", count)
-
-
-# TODO: replace id='...' with frontier so that we can
-# pass it in cut_article reusable, or merge cut_text and
-# cut_by_frontier.
-def cut_by_frontier(filename):
-    with open(filename) as html:
-        soup = BeautifulSoup(html, features="lxml")
-        k = []
-        for i in soup.find(id="newspost-content"):
-            k.append(i)
-        b = "".join(str(e) in k)
-        text = b.replace("\n", "")
-        return text
-
-
-def cut_article(filename, conf, lang):
-    return cut_all("template/news/" + filename + ".j2", conf, lang)
-
-
-def cut_all(filename, conf, lang):
-    with open(filename) as html:
-        soup = BeautifulSoup(html, features="lxml")
-        i = (
-            repr(soup)
-            .replace('{% extends "common/news.j2" %}\n{% block body_content 
%}\n', "")
-            .replace("\n{% endblock body_content %}", "")
-            .replace("<html><body><p></p>", "")
-            .replace("</body></html>", "")
-        )
-        urlstr = "https://"; + conf["siteconf"]["baseurl"] + "/" + lang + "/"
-        text = (
-            i.replace("\n", "")
-            .replace("{{ url_localized('", urlstr)
-            .replace("') }}", "")
-        )
-        # .replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;')
-        return text

-- 
To stop receiving notification emails like this one, please contact
gnunet@gnunet.org.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]