repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/__init__.py | import sys
import os
parent_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], ".."))
if not parent_path in sys.path:
sys.path.insert(0, parent_path)
del parent_path
from runtests import buildTestSuite
import support
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/transform/__init__.py | # -*- coding: utf-8 -*-
"""
Transformer sub-package for the pyRdfa package. It contains modules with transformer functions; each may be
invoked by pyRdfa to transform the dom tree before the "real" RDfa processing.
@summary: RDFa Transformer package
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.8 2012/06/12 11:47:19 ivan Exp $
$Date: 2012/06/12 11:47:19 $
"""
__version__ = "3.0"
# Here are the transfomer functions that are to be performed for all RDFa files, no matter what
def top_about(root, options, state) :
"""
@param root: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def set_about(node) :
if has_one_of_attributes(node, "rel", "rev") :
if not has_one_of_attributes(top, "about", "src") :
node.setAttribute("about","")
else :
if not has_one_of_attributes(node, "href", "resource", "about", "src") :
node.setAttribute("about","")
from ..host import HostLanguage
from ..utils import has_one_of_attributes
if not has_one_of_attributes(root, "about") :
# The situation is a bit complicated: if a @resource is present without anything else, then it sets
# the subject, ie, should be accepted...
if has_one_of_attributes(root, "resource", "href", "src") :
if has_one_of_attributes(root, "rel", "rev","property") :
root.setAttribute("about","")
else :
root.setAttribute("about","")
if options.host_language in [ HostLanguage.xhtml, HostLanguage.html5, HostLanguage.xhtml5 ] :
if state.rdfa_version >= "1.1" :
pass
else :
for top in root.getElementsByTagName("head") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
for top in root.getElementsByTagName("body") :
if not has_one_of_attributes(top, "href", "resource", "about", "src") :
set_about(top)
def empty_safe_curie(node, options, state) :
"""
Remove the attributes whose value is an empty safe curie. It also adds an 'artificial' flag, ie, an
attribute (called 'emptysc') into the node to signal that there _is_ an attribute with an ignored
safe curie value. The name of the attribute is 'about_pruned' or 'resource_pruned'.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def prune_safe_curie(node,name) :
if node.hasAttribute(name) :
av = node.getAttribute(name)
if av == '[]' :
node.removeAttribute(name)
node.setAttribute(name+'_pruned','')
msg = "Attribute @%s uses an empty safe CURIE; the attribute is ignored" % name
options.add_warning(msg, node=node)
prune_safe_curie(node, "about")
prune_safe_curie(node, "resource")
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
empty_safe_curie(n, options, state)
def vocab_for_role(node, options, state) :
"""
The value of the @role attribute (defined separately in the U{Role Attribute Specification Lite<http://www.w3.org/TR/role-attribute/#using-role-in-conjunction-with-rdfa>}) should be as if a @vocab value to the
XHTML vocabulary was defined for it. This method turns all terms in role attributes into full URI-s, so that
this would not be an issue for the run-time.
@param node: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
from ..termorcurie import termname, XHTML_URI
def handle_role(node) :
if node.hasAttribute("role") :
old_values = node.getAttribute("role").strip().split()
new_values = ""
for val in old_values :
if termname.match(val) :
new_values += XHTML_URI + val + ' '
else :
new_values += val + ' '
node.setAttribute("role", new_values.strip())
handle_role(node)
for n in node.childNodes :
if n.nodeType == node.ELEMENT_NODE :
vocab_for_role(n, options, state)
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/sparql/aggregates.py | from rdflib import Literal, XSD
from rdflib.plugins.sparql.evalutils import _eval
from rdflib.plugins.sparql.operators import numeric
from rdflib.plugins.sparql.datatypes import type_promotion
from rdflib.plugins.sparql.compat import num_max, num_min
from decimal import Decimal
"""
Aggregation functions
"""
def _eval_rows(expr, group):
for row in group:
try:
yield _eval(expr, row)
except:
pass
def agg_Sum(a, group, bindings):
c = 0
dt = None
for x in group:
try:
e = _eval(a.vars, x)
n = numeric(e)
if dt == None:
dt = e.datatype
else:
dt = type_promotion(dt, e.datatype)
if type(c) == float and type(n) == Decimal:
c += float(n)
elif type(n) == float and type(c) == Decimal:
c = float(c) + n
else:
c += n
except:
pass # simply dont count
bindings[a.res] = Literal(c, datatype=dt)
# Perhaps TODO: keep datatype for max/min?
def agg_Min(a, group, bindings):
m = None
for x in group:
try:
v = numeric(_eval(a.vars, x))
if m is None:
m = v
else:
m = num_min(v, m)
except:
return # error in aggregate => no binding
if m is not None:
bindings[a.res] = Literal(m)
def agg_Max(a, group, bindings):
m = None
for x in group:
try:
v = numeric(_eval(a.vars, x))
if m is None:
m = v
else:
m = num_max(v, m)
except:
return # error in aggregate => no binding
if m is not None:
bindings[a.res] = Literal(m)
def agg_Count(a, group, bindings):
c = 0
for x in group:
try:
if a.vars != '*':
_eval(a.vars, x)
c += 1
except:
return # error in aggregate => no binding
# pass # simply dont count
bindings[a.res] = Literal(c)
def agg_Sample(a, group, bindings):
try:
bindings[a.res] = _eval(a.vars, iter(group).next())
except StopIteration:
pass # no res
def agg_GroupConcat(a, group, bindings):
sep = a.separator or " "
bindings[a.res] = Literal(
sep.join(unicode(x) for x in _eval_rows(a.vars, group)))
def agg_Avg(a, group, bindings):
c = 0
s = 0
dt = None
for x in group:
try:
e = _eval(a.vars, x)
n = numeric(e)
if dt == None:
dt = e.datatype
else:
dt = type_promotion(dt, e.datatype)
if type(s) == float and type(n) == Decimal:
s += float(n)
elif type(n) == float and type(s) == Decimal:
s = float(s) + n
else:
s += n
c += 1
except:
return # error in aggregate => no binding
if c == 0:
bindings[a.res] = Literal(0)
if dt == XSD.float or dt == XSD.double:
bindings[a.res] = Literal(s / c)
else:
bindings[a.res] = Literal(Decimal(s) / Decimal(c))
def evalAgg(a, group, bindings):
if a.name == 'Aggregate_Count':
return agg_Count(a, group, bindings)
elif a.name == 'Aggregate_Sum':
return agg_Sum(a, group, bindings)
elif a.name == 'Aggregate_Sample':
return agg_Sample(a, group, bindings)
elif a.name == 'Aggregate_GroupConcat':
return agg_GroupConcat(a, group, bindings)
elif a.name == 'Aggregate_Avg':
return agg_Avg(a, group, bindings)
elif a.name == 'Aggregate_Min':
return agg_Min(a, group, bindings)
elif a.name == 'Aggregate_Max':
return agg_Max(a, group, bindings)
else:
raise Exception("Unknown aggregate function " + a.name)
|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/test_whitespace_filter.py | <reponame>DmPo/Schemaorg_CivicOS
import unittest
from html5lib.filters.whitespace import Filter
from html5lib.constants import spaceCharacters
spaceCharacters = u"".join(spaceCharacters)
class TestCase(unittest.TestCase):
def runTest(self, input, expected):
output = list(Filter(input))
errorMsg = "\n".join(["\n\nInput:", str(input),
"\nExpected:", str(expected),
"\nReceived:", str(output)])
self.assertEquals(output, expected, errorMsg)
def runTestUnmodifiedOutput(self, input):
self.runTest(input, input)
def testPhrasingElements(self):
self.runTestUnmodifiedOutput(
[{"type": u"Characters", "data": u"This is a " },
{"type": u"StartTag", "name": u"span", "data": [] },
{"type": u"Characters", "data": u"phrase" },
{"type": u"EndTag", "name": u"span", "data": []},
{"type": u"SpaceCharacters", "data": u" " },
{"type": u"Characters", "data": u"with" },
{"type": u"SpaceCharacters", "data": u" " },
{"type": u"StartTag", "name": u"em", "data": [] },
{"type": u"Characters", "data": u"emphasised text" },
{"type": u"EndTag", "name": u"em", "data": []},
{"type": u"Characters", "data": u" and an " },
{"type": u"StartTag", "name": u"img", "data": [[u"alt", u"image"]] },
{"type": u"Characters", "data": u"." }])
def testLeadingWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"SpaceCharacters", "data": u" "},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testLeadingWhitespaceAsCharacters(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": spaceCharacters + u"foo"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u" foo"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testTrailingWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": u" "},
{"type": u"EndTag", "name": u"p", "data": []}])
def testTrailingWhitespaceAsCharacters(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo "},
{"type": u"EndTag", "name": u"p", "data": []}])
def testWhitespace(self):
self.runTest(
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"},
{"type": u"EndTag", "name": u"p", "data": []}],
[{"type": u"StartTag", "name": u"p", "data": []},
{"type": u"Characters", "data": u"foo bar"},
{"type": u"EndTag", "name": u"p", "data": []}])
def testLeadingWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"Characters", "data": u"foo"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testLeadingWhitespaceAsCharactersInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": spaceCharacters + u"foo"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testTrailingWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo"},
{"type": u"SpaceCharacters", "data": spaceCharacters},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testTrailingWhitespaceAsCharactersInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters},
{"type": u"EndTag", "name": u"pre", "data": []}])
def testWhitespaceInPre(self):
self.runTestUnmodifiedOutput(
[{"type": u"StartTag", "name": u"pre", "data": []},
{"type": u"Characters", "data": u"foo" + spaceCharacters + "bar"},
{"type": u"EndTag", "name": u"pre", "data": []}])
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
|
DmPo/Schemaorg_CivicOS | lib/html5lib/__init__.py | """
HTML parsing library based on the WHATWG "HTML5"
specification. The parser is designed to be compatible with existing
HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage:
import html5lib
f = open("my_document.html")
tree = html5lib.parse(f)
"""
__version__ = "0.95-dev"
from html5parser import HTMLParser, parse, parseFragment
from treebuilders import getTreeBuilder
from treewalkers import getTreeWalker
from serializer import serialize
|
DmPo/Schemaorg_CivicOS | lib/isodate/tzinfo.py | <reponame>DmPo/Schemaorg_CivicOS
'''
This module provides some datetime.tzinfo implementations.
All those classes are taken from the Python documentation.
'''
from datetime import timedelta, tzinfo
import time
ZERO = timedelta(0)
# constant for zero time offset.
class Utc(tzinfo):
'''UTC
Universal time coordinated time zone.
'''
def utcoffset(self, dt):
'''
Return offset from UTC in minutes east of UTC, which is ZERO for UTC.
'''
return ZERO
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a string.
'''
return "UTC"
def dst(self, dt):
'''
Return the daylight saving time (DST) adjustment, in minutes east of UTC.
'''
return ZERO
UTC = Utc()
# the default instance for UTC.
class FixedOffset(tzinfo):
'''
A class building tzinfo objects for fixed-offset time zones.
Note that FixedOffset(0, "UTC") is a different way to build a
UTC tzinfo object.
'''
def __init__(self, offset_hours, offset_minutes, name):
'''
Initialise an instance with time offset and name.
The time offset should be positive for time zones east of UTC
and negate for time zones west of UTC.
'''
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def utcoffset(self, dt):
'''
Return offset from UTC in minutes of UTC.
'''
return self.__offset
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a
string.
'''
return self.__name
def dst(self, dt):
'''
Return the daylight saving time (DST) adjustment, in minutes east of
UTC.
'''
return ZERO
def __repr__(self):
'''
Return nicely formatted repr string.
'''
return "<FixedOffset %r>" % self.__name
STDOFFSET = timedelta(seconds = -time.timezone)
# locale time zone offset
# calculate local daylight saving offset if any.
if time.daylight:
DSTOFFSET = timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
# difference between local time zone and local DST time zone
class LocalTimezone(tzinfo):
"""
A class capturing the platform's idea of local time.
"""
def utcoffset(self, dt):
'''
Return offset from UTC in minutes of UTC.
'''
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
'''
Return daylight saving offset.
'''
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
'''
Return the time zone name corresponding to the datetime object dt, as a
string.
'''
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
'''
Returns true if DST is active for given datetime object dt.
'''
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCAL = LocalTimezone()
# the default instance for local time zone.
|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/test_encoding.py | <reponame>DmPo/Schemaorg_CivicOS
import os
import unittest
from support import html5lib_test_files, TestData, test_dir
from html5lib import HTMLParser, inputstream
import re, unittest
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name(self):
self.assertEquals(inputstream.codecName("utf-8"), "utf-8")
self.assertEquals(inputstream.codecName("utf8"), "utf-8")
self.assertEquals(inputstream.codecName(" utf8 "), "utf-8")
self.assertEquals(inputstream.codecName("ISO_8859--1"), "windows-1252")
def buildTestSuite():
for filename in html5lib_test_files("encoding"):
test_name = os.path.basename(filename).replace('.dat',''). \
replace('-','')
tests = TestData(filename, "data")
for idx, test in enumerate(tests):
def encodingTest(self, data=test['data'],
encoding=test['encoding']):
p = HTMLParser()
t = p.parse(data, useChardet=False)
errorMessage = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n"%
(data, repr(encoding.lower()),
repr(p.tokenizer.stream.charEncoding)))
self.assertEquals(encoding.lower(),
p.tokenizer.stream.charEncoding[0],
errorMessage)
setattr(Html5EncodingTestCase, 'test_%s_%d' % (test_name, idx+1),
encodingTest)
try:
import chardet
def test_chardet(self):
data = open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt")).read()
encoding = inputstream.HTMLInputStream(data).charEncoding
assert encoding[0].lower() == "big5"
setattr(Html5EncodingTestCase, 'test_chardet', test_chardet)
except ImportError:
print "chardet not found, skipping chardet tests"
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
|
DmPo/Schemaorg_CivicOS | lib/rdflib_jsonld/util.py | try:
import json
assert json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
from rdflib.py3compat import PY3
from os import sep
from os.path import normpath
if PY3:
from urllib.parse import urljoin, urlsplit, urlunsplit
else:
from urlparse import urljoin, urlsplit, urlunsplit
from rdflib.parser import create_input_source
if PY3:
from io import StringIO
def source_to_json(source):
# TODO: conneg for JSON (fix support in rdflib's URLInputSource!)
source = create_input_source(source, format='json-ld')
stream = source.getByteStream()
try:
if PY3:
return json.load(StringIO(stream.read().decode('utf-8')))
else:
return json.load(stream)
finally:
stream.close()
VOCAB_DELIMS = ('#', '/', ':')
def split_iri(iri):
for delim in VOCAB_DELIMS:
at = iri.rfind(delim)
if at > -1:
return iri[:at+1], iri[at+1:]
return iri, None
def norm_url(base, url):
url = urljoin(base, url)
parts = urlsplit(url)
path = normpath(parts[2])
if sep != '/':
path = '/'.join(path.split(sep))
if parts[2].endswith('/') and not path.endswith('/'):
path += '/'
return urlunsplit(parts[0:2] + (path,) + parts[3:])
|
DmPo/Schemaorg_CivicOS | api.py | <reponame>DmPo/Schemaorg_CivicOS
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
import os
import os.path
import glob
import re
import threading
import parsers
import datetime, time
from google.appengine.ext import ndb
loader_instance = False
import apirdflib
#from apirdflib import rdfGetTargets, rdfGetSources
from apimarkdown import Markdown
def getInstanceId(short=False):
ret = ""
if "INSTANCE_ID" in os.environ:
ret = os.environ["INSTANCE_ID"]
if short:
ret = ret[len(ret)-6:]
return ret
schemasInitialized = False
extensionsLoaded = False
extensionLoadErrors = ""
#INTESTHARNESS used to flag we are in a test harness - not called by webApp so somethings will work different!
#setInTestHarness(True) should be called from test suites.
INTESTHARNESS = False
def setInTestHarness(val):
global INTESTHARNESS
INTESTHARNESS = val
def getInTestHarness():
global INTESTHARNESS
return INTESTHARNESS
if not getInTestHarness():
from google.appengine.api import memcache
AllLayersList = []
def setAllLayersList(val):
global AllLayersList
AllLayersList = val
#Copy it into apirdflib
apirdflib.allLayersList = val
def getAllLayersList():
global AllLayersList
return AllLayersList
EVERYLAYER = "!EVERYLAYER!"
sitename = "schema.org"
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite", "extensionsite" when off expected domains
DYNALOAD = True # permits read_schemas to be re-invoked live.
#JINJA_ENVIRONMENT = jinja2.Environment(
# loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
# extensions=['jinja2.ext.autoescape'], autoescape=True)
NDBPAGESTORE = True #True - uses NDB shared (accross instances) store for page cache - False uses in memory local cache
debugging = False
def getMasterStore():
return apirdflib.STORE
def getQueryGraph():
return apirdflib.queryGraph()
# Core API: we have a single schema graph built from triples and units.
NodeIDMap = {}
ext_re = re.compile(r'([^\w,])+')
all_layers = {}
all_terms = {}
# Utility declaration of W3C Initial Context
# From http://www.w3.org/2011/rdfa-context/rdfa-1.1
# and http://www.w3.org/2013/json-ld-context/rdfa11
# Enables all these prefixes without explicit declaration when
# using schema.org's JSON-LD context file.
#
namespaces = """ "schema": "http://schema.org/",
"cat": "http://www.w3.org/ns/dcat#",
"cc": "http://creativecommons.org/ns#",
"cnt": "http://www.w3.org/2008/content#",
"ctag": "http://commontag.org/ns#",
"dc": "http://purl.org/dc/terms/",
"dcat": "http://www.w3.org/ns/dcat#",
"dcterms": "http://purl.org/dc/terms/",
"describedby": "http://www.w3.org/2007/05/powder-s#describedby",
"earl": "http://www.w3.org/ns/earl#",
"foaf": "http://xmlns.com/foaf/0.1/",
"gldp": "http://www.w3.org/ns/people#",
"gr": "http://purl.org/goodrelations/v1#",
"grddl": "http://www.w3.org/2003/g/data-view#",
"ht": "http://www.w3.org/2006/http#",
"ical": "http://www.w3.org/2002/12/cal/icaltzd#",
"license": "http://www.w3.org/1999/xhtml/vocab#license",
"ma": "http://www.w3.org/ns/ma-ont#",
"og": "http://ogp.me/ns#",
"org": "http://www.w3.org/ns/org#",
"org": "http://www.w3.org/ns/org#",
"owl": "http://www.w3.org/2002/07/owl#",
"prov": "http://www.w3.org/ns/prov#",
"ptr": "http://www.w3.org/2009/pointers#",
"qb": "http://purl.org/linked-data/cube#",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfa": "http://www.w3.org/ns/rdfa#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"rev": "http://purl.org/stuff/rev#",
"rif": "http://www.w3.org/2007/rif#",
"role": "http://www.w3.org/1999/xhtml/vocab#role",
"rr": "http://www.w3.org/ns/r2rml#",
"sd": "http://www.w3.org/ns/sparql-service-description#",
"sioc": "http://rdfs.org/sioc/ns#",
"skos": "http://www.w3.org/2004/02/skos/core#",
"skosxl": "http://www.w3.org/2008/05/skos-xl#",
"v": "http://rdf.data-vocabulary.org/#",
"vcard": "http://www.w3.org/2006/vcard/ns#",
"void": "http://rdfs.org/ns/void#",
"wdr": "http://www.w3.org/2007/05/powder#",
"wdrs": "http://www.w3.org/2007/05/powder-s#",
"xhv": "http://www.w3.org/1999/xhtml/vocab#",
"xml": "http://www.w3.org/XML/1998/namespace",
"xsd": "http://www.w3.org/2001/XMLSchema#",
"""
class DataCacheTool():
def __init__ (self):
self.tlocal = threading.local()
self.tlocal.CurrentDataCache = "core"
self.initialise()
def initialise(self):
self._DataCache = {}
self._DataCache[self.tlocal.CurrentDataCache] = {}
return
def getCache(self,cache=None):
if cache == None:
cache = self.getCurrent()
if cache in self._DataCache.keys():
return self._DataCache[cache]
else:
self._DataCache[cache] = {}
return self._DataCache[cache]
def get(self,key,cache=None):
return self.getCache(cache).get(key)
def remove(self,key,cache=None):
return self.getCache(cache).pop(key,None)
def put(self,key,val,cache=None):
self.getCache(cache)[key] = val
def setCurrent(self,current):
self.tlocal.CurrentDataCache = current
if(self._DataCache.get(current) == None):
self._DataCache[current] = {}
log.debug("[%s] Setting _CurrentDataCache: %s" % (getInstanceId(short=True),current))
def getCurrent(self):
return self.tlocal.CurrentDataCache
def keys(self):
return self._DataCache.keys()
DataCache = DataCacheTool()
class PageEntity(ndb.Model):
content = ndb.TextProperty()
class PageStoreTool():
def __init__ (self):
self.tlocal = threading.local()
self.tlocal.CurrentStoreSet = "core"
def initialise(self):
import time
log.info("[%s]PageStore initialising Data Store" % (getInstanceId(short=True)))
loops = 0
ret = 0
while loops < 10:
keys = PageEntity.query().fetch(keys_only=True)
count = len(keys)
if count == 0:
break
log.info("[%s]PageStore deleting %s keys" % (getInstanceId(short=True), count))
ndb.delete_multi(keys,use_memcache=False,use_cache=False)
ret += count
loops += 1
time.sleep(0.01)
return {"PageStore":ret}
def getCurrent(self):
return self.tlocal.CurrentStoreSet
def setCurrent(self,current):
self.tlocal.CurrentStoreSet = current
log.debug("PageStore setting CurrentStoreSet: %s",current)
def put(self, key, val,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
#log.info("[%s]PageStore storing %s" % (getInstanceId(),fullKey))
ent = PageEntity(id = fullKey, content = val)
ent.put()
def get(self, key,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
ent = PageEntity.get_by_id(fullKey)
if(ent):
#log.info("[%s]PageStore returning %s" % (os.environ["INSTANCE_ID"],fullKey))
return ent.content
else:
#log.info("PageStore '%s' not found" % fullKey)
return None
def remove(self, key,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
ent = PageEntity.get_by_id(fullKey)
if(ent):
return ent.key.delete()
else:
#log.info("PageStore '%s' not found" % fullKey)
return None
class HeaderEntity(ndb.Model):
content = ndb.PickleProperty()
class HeaderStoreTool():
def __init__ (self):
self.tlocal = threading.local()
self.tlocal.CurrentStoreSet = "core"
def initialise(self):
import time
log.info("[%s]HeaderStore initialising Data Store" % (getInstanceId(short=True)))
loops = 0
ret = 0
while loops < 10:
keys = HeaderEntity.query().fetch(keys_only=True)
count = len(keys)
if count == 0:
break
log.info("[%s]HeaderStore deleting %s keys" % (getInstanceId(short=True), count))
ndb.delete_multi(keys,use_memcache=False,use_cache=False)
ret += count
loops += 1
time.sleep(0.01)
return {"HeaderStore":ret}
def getCurrent(self):
return self.tlocal.CurrentStoreSet
def setCurrent(self,current):
self.tlocal.CurrentStoreSet = current
log.debug("HeaderStore setting CurrentStoreSet: %s",current)
def put(self, key, val,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
ent = HeaderEntity(id = fullKey, content = val)
ent.put()
def get(self, key,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
ent = HeaderEntity.get_by_id(fullKey)
if(ent):
return ent.content
else:
return None
def remove(self, key,cache=None):
ca = self.getCurrent()
if cache != None:
ca = cache
fullKey = ca + ":" + key
ent = HeaderEntity.get_by_id(fullKey)
if(ent):
return ent.key.delete()
else:
return None
PageStore = None
HeaderStore = None
log.info("[%s] NDB PageStore & HeaderStore available: %s" % (getInstanceId(short=True),NDBPAGESTORE))
def enablePageStore(state):
global PageStore,HeaderStore
if state:
log.info("[%s] Enabling NDB" % getInstanceId(short=True))
PageStore = PageStoreTool()
log.info("[%s] Created PageStore" % getInstanceId(short=True))
HeaderStore = HeaderStoreTool()
log.info("[%s] Created HeaderStore" % getInstanceId(short=True))
else:
log.info("[%s] Disabling NDB" % getInstanceId(short=True))
PageStore = DataCacheTool()
HeaderStore = DataCacheTool()
if NDBPAGESTORE:
enablePageStore(True)
else:
enablePageStore(False)
class Unit ():
"""
Unit represents a node in our schema graph. IDs are local,
e.g. "Person" or use simple prefixes, e.g. rdfs:Class.
"""
def __init__ (self, id):
self.id = id
NodeIDMap[id] = self
self.arcsIn = []
self.arcsOut = []
self.examples = None
self.home = None
self.subtypes = None
self.sourced = False
self.category = " "
self.typeFlags = {}
def __str__(self):
return self.id
def GetImmediateSubtypes(self, layers='core'):
return GetImmediateSubtypes(self, layers=layers)
@staticmethod
def GetUnit (id, createp=False):
"""Return a Unit representing a node in the schema graph.
Argument:
createp -- should we create node if we don't find it? (default: False)
"""
ret = None
if (id in NodeIDMap):
return NodeIDMap[id]
ret = apirdflib.rdfGetTriples(id)
if (ret == None and createp != False):
return Unit(id)
return ret
@staticmethod
def GetUnitNoLoad(id, createp=False):
if (id in NodeIDMap):
return NodeIDMap[id]
if (createp != False):
return Unit(id)
return None
def typeOf(self, type, layers='core'):
"""Boolean, true if the unit has an rdf:type matching this type."""
types = GetTargets( Unit.GetUnit("rdf:type"), self, layers )
return (type in types)
# Function needs rewriting to use GetTargets(arc,src,layers) and recurse
def subClassOf(self, type, layers='core'):
"""Boolean, true if the unit has an rdfs:subClassOf matching this type, direct or implied (in specified layer(s))."""
if (self.id == type.id):
return True
parents = GetTargets( Unit.GetUnit("rdfs:subClassOf"), self, layers )
if type in parents:
return True
else:
for p in parents:
if p.subClassOf(type, layers):
return True
return False
def directInstanceOf(self, type, layers='core'):
"""Boolean, true if the unit has a direct typeOf (aka rdf:type) property matching this type, direct or implied (in specified layer(s))."""
mytypes = GetTargets( Unit.GetUnit("rdf:type"), self, layers )
if type in mytypes:
return True
return False # TODO: consider an API for implied types too?
def isClass(self, layers='core'):
"""Does this unit represent a class/type?"""
if self.typeFlags.has_key('c'):
return self.typeFlags['c']
isClass = self.typeOf(Unit.GetUnit("rdfs:Class"), layers=EVERYLAYER)
self.typeFlags['c'] = isClass
return isClass
def isAttribute(self, layers='core'):
"""Does this unit represent an attribute/property?"""
if self.typeFlags.has_key('p'):
return self.typeFlags['p']
isProp = self.typeOf(Unit.GetUnit("rdf:Property"), layers=EVERYLAYER)
self.typeFlags['p'] = isProp
return isProp
def isEnumeration(self, layers='core'):
"""Does this unit represent an enumerated type?"""
if self.typeFlags.has_key('e'):
return self.typeFlags['e']
isE = self.subClassOf(Unit.GetUnit("Enumeration"), layers=EVERYLAYER)
self.typeFlags['e'] = isE
return isE
def isEnumerationValue(self, layers='core'):
"""Does this unit represent a member of an enumerated type?"""
if self.typeFlags.has_key('ev'):
return self.typeFlags['ev']
types = GetTargets(Unit.GetUnit("rdf:type"), self , layers=EVERYLAYER)
#log.debug("isEnumerationValue() called on %s, found %s types. layers: %s" % (self.id, str( len( types ) ), layers ) )
found_enum = False
for t in types:
if t.subClassOf(Unit.GetUnit("Enumeration"), layers=EVERYLAYER):
found_enum = True
break
self.typeFlags['ev'] = found_enum
return found_enum
def isDataType(self, layers='core'):
"""
Does this unit represent a DataType type or sub-type?
DataType and its children do not descend from Thing, so we need to
treat it specially.
"""
if self.typeFlags.has_key('d'):
return self.typeFlags['d']
ret = False
if (self.directInstanceOf(Unit.GetUnit("DataType"), layers=layers) or
self.id == "DataType"):
ret = True
else:
subs = GetTargets(Unit.GetUnit("rdfs:subClassOf"), self, layers=layers)
for p in subs:
if p.isDataType(layers=layers):
ret = True
break
self.typeFlags['d'] = ret
return ret
@staticmethod
def storePrefix(prefix):
"""Stores the prefix declaration for a given class or property"""
# Currently defined just to let the tests pass
pass
# e.g. <http://schema.org/actors> <http://schema.org/supersededBy> <http://schema.org/actor> .
def superseded(self, layers='core'):
"""Has this property been superseded? (i.e. deprecated/archaic), in any of these layers."""
supersededBy_values = GetTargets( Unit.GetUnit("supersededBy"), self, layers )
return ( len(supersededBy_values) > 0)
def supersedes(self, layers='core'):
"""Returns a property (assume max 1) that is supersededBy this one, or nothing."""
olderterms = GetSources( Unit.GetUnit("supersededBy"), self, layers )
if len(olderterms) > 0:
return olderterms[0]
else:
return None
def supersedes_all(self, layers='core'):
"""Returns terms that is supersededBy by this later one, or nothing. (in this layer)"""
return(GetSources( Unit.GetUnit("supersededBy"), self, layers ))
# so we want sources of arcs pointing here with 'supersededBy'
# e.g. vendor supersededBy seller ; returns newer 'seller' for earlier 'vendor'.
def supersededBy(self, layers='core'):
"""Returns a property (assume max 1) that supersededs this one, or nothing."""
newerterms = GetTargets( Unit.GetUnit("supersededBy"), self, layers )
if len(newerterms)>0:
return newerterms.pop()
else:
return None
return ret
def category(self):
return self.category
def getHomeLayer(self,defaultToCore=False):
ret = self.home
if ret == None:
if defaultToCore:
ret = 'core'
else:
log.info("WARNING %s has no home extension defined!!" % self.id)
ret = ""
return ret
def superproperties(self, layers='core'):
"""Returns super-properties of this one."""
if not self.isAttribute(layers=layers):
logging.debug("Non-property %s won't have subproperties." % self.id)
return None
superprops = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers )
return superprops
def subproperties(self, layers='core'):
"""Returns direct subproperties of this property."""
if not self.isAttribute(layers=layers):
logging.debug("Non-property %s won't have subproperties." % self.id)
return None
subprops = GetSources(Unit.GetUnit("rdfs:subPropertyOf"),self, layers=layers )
return subprops
def inverseproperty(self, layers="core"):
"""A property that is an inverseOf this one, e.g. alumni vs alumniOf."""
a = GetTargets(Unit.GetUnit("inverseOf"), self, layers=layers)
b = GetSources(Unit.GetUnit("inverseOf"), self, layers=layers)
if len(a)>0:
return a.pop()
else:
if len(b) > 0:
return b.pop()
else:
return None
for triple in self.arcsOut:
if (triple.target != None and triple.arc.id == "inverseOf"):
return triple.target
for triple in self.arcsIn:
if (triple.source != None and triple.arc.id == "inverseOf"):
return triple.source
return None
def UsageStr (self) :
str = GetUsage(self.id)
if (str == '1') :
return "Between 10 and 100 domains"
elif (str == '2'):
return "Between 100 and 1000 domains"
elif (str == '3'):
return "Between 1000 and 10,000 domains"
elif (str == '4'):
return "Between 10,000 and 50,000 domains"
elif (str == '5'):
return "Between 50,000 and 100,000 domains"
elif (str == '7'):
return "Between 100,000 and 250,000 domains"
elif (str == '8'):
return "Between 250,000 and 500,000 domains"
elif (str == '9'):
return "Between 500,000 and 1,000,000 domains"
elif (str == '10'):
return "Over 1,000,000 domains"
else:
return "Fewer than 10 domains"
# NOTE: each Triple is in exactly one layer, by default 'core'. When we
# read_schemas() from data/ext/{x}/*.rdfa each schema triple is given a
# layer named "x". Access to triples can default to layer="core" or take
# a custom layer or layers, e.g. layers="bib", or layers=["bib", "foo"].
# This is verbose but at least explicit. If we move towards making better
# use of external templates for site generation we could reorganize.
# For now e.g. 'grep GetSources api.py| grep -v layer' and
# 'grep GetTargets api.py| grep -v layer' etc. can check for non-layered usage.
#
# Units, on the other hand, are layer-independent. For now we have only a
# crude inLayer(layerlist, unit) API to check which layers mention a term.
class Triple ():
"""Triple represents an edge in the graph: source, arc and target/text."""
def __init__ (self, source, arc, target, text, layer='core'):
"""Triple constructor keeps state via source node's arcsOut."""
self.source = source
source.arcsOut.append(self)
self.arc = arc
self.layer = layer
self.id = self
if (target != None):
self.target = target
self.text = None
target.arcsIn.append(self)
elif (text != None):
self.text = text
self.target = None
def __str__ (self):
ret = ""
if self.source != None:
ret += "%s " % self.source
if self.target != None:
ret += "%s " % self.target
if self.arc != None:
ret += "%s " % self.arc
return ret
@staticmethod
def AddTriple(source, arc, target, layer='core'):
"""AddTriple stores a thing-valued new Triple within source Unit."""
if (source == None or arc == None or target == None):
log.info("Bailing")
return
else:
# for any term mentioned as subject or object, we register the layer
# TODO: make this into a function
x = all_terms.get(source.id) # subjects
if x is None:
x = []
if layer not in x:
x.append(layer)
all_terms[source.id]= x
x = all_terms.get(target.id) # objects
if x is None:
x = []
if layer not in x:
x.append(layer)
all_terms[target.id]= x
return Triple(source, arc, target, None, layer)
@staticmethod
def AddTripleText(source, arc, text, layer='core'):
"""AddTriple stores a string-valued new Triple within source Unit."""
if (source == None or arc == None or text == None):
return
else:
return Triple(source, arc, None, text, layer)
def GetTargets(arc, source, layers='core'):
"""All values for a specified arc on specified graph node (within any of the specified layers)."""
# log.debug("GetTargets checking in layer: %s for unit: %s arc: %s" % (layers, source.id, arc.id))
targets = {}
fred = False
try:
for triple in source.arcsOut:
if (triple.arc == arc):
if (triple.target != None and (layers == EVERYLAYER or triple.layer in layers)):
targets[triple.target] = 1
elif (triple.text != None and (layers == EVERYLAYER or triple.layer in layers)):
targets[triple.text] = 1
return targets.keys()
except Exception as e:
log.debug("GetTargets caught exception %s" % e)
return []
def GetSources(arc, target, layers='core'):
"""All source nodes for a specified arc pointing to a specified node (within any of the specified layers)."""
#log.debug("GetSources checking in layer: %s for unit: %s arc: %s" % (layers, target.id, arc.id))
if(target.sourced == False):
apirdflib.rdfGetSourceTriples(target)
sources = {}
for triple in target.arcsIn:
if (triple.arc == arc and (layers == EVERYLAYER or triple.layer in layers)):
sources[triple.source] = 1
return sources.keys()
def GetArcsIn(target, layers='core'):
"""All incoming arc types for this specified node (within any of the specified layers)."""
arcs = {}
for triple in target.arcsIn:
if (layers == EVERYLAYER or triple.layer in layers):
arcs[triple.arc] = 1
return arcs.keys()
def GetArcsOut(source, layers='core'):
"""All outgoing arc types for this specified node."""
arcs = {}
for triple in source.arcsOut:
if (layers == EVERYLAYER or triple.layer in layers):
arcs[triple.arc] = 1
return arcs.keys()
# Utility API
def GetComment(node, layers='core') :
"""Get the first rdfs:comment we find on this node (or "No comment"), within any of the specified layers."""
tx = GetComments(node, layers)
if len(tx) > 0:
return Markdown.parse(tx[0])
else:
return "No comment"
def GetComments(node, layers='core') :
"""Get the rdfs:comment(s) we find on this node within any of the specified layers."""
return GetTargets(Unit.GetUnit("rdfs:comment", True), node, layers=layers )
def GetsoftwareVersions(node, layers='core') :
"""Get the schema:softwareVersion(s) we find on this node (or [] ), within any of the specified layers."""
return GetTargets(Unit.GetUnit("softwareVersion", True), node, layers=layers )
def GetImmediateSubtypes(n, layers='core'):
"""Get this type's immediate subtypes, i.e. that are subClassOf this."""
if n==None:
return None
subs = GetSources( Unit.GetUnit("rdfs:subClassOf", True), n, layers=layers)
if (n.isDataType() or n.id == "DataType"):
subs += GetSources( Unit.GetUnit("rdf:type", True), n, layers=layers)
subs.sort(key=lambda x: x.id)
return subs
def GetImmediateSupertypes(n, layers='core'):
"""Get this type's immediate supertypes, i.e. that we are subClassOf."""
if n==None:
return None
sups = GetTargets( Unit.GetUnit("rdfs:subClassOf", True), n, layers=layers)
if (n.isDataType() or n.id == "DataType"):
sups += GetTargets( Unit.GetUnit("rdf:type", True), n, layers=layers)
sups.sort(key=lambda x: x.id)
return sups
Utc = "util_cache"
def GetAllTypes(layers='core'):
global Utc
"""Return all types in the graph."""
KEY = "AllTypes:%s" % layers
if DataCache.get(KEY+'x',Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing", True)
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
if inLayer(layers,current):
subbed[current] = 1
for sc in subs:
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,subbed.keys(),Utc)
return subbed.keys()
def GetAllDataTypes(layers='core'):
global Utc
"""Return all types in the graph."""
KEY = "AllDataTypes:%s" % layers
if DataCache.get(KEY+'x',Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("DataType", True)
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
if inLayer(layers,current):
subbed[current] = 1
for sc in subs:
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,subbed.keys(),Utc)
return subbed.keys()
def GetAllEnumerationValues(layers='core'):
global Utc
KEY = "AllEnums:%s" % layers
if DataCache.get(KEY,Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Enumeration", True)
enums = {}
subbed = {}
todo = [mynode]
while todo:
current = todo.pop()
subs = GetImmediateSubtypes(current, EVERYLAYER)
subbed[current] = 1
for sc in subs:
vals = GetSources( Unit.GetUnit("rdf:type", True), sc, layers=EVERYLAYER)
for val in vals:
if inLayer(layers,val):
enums[val] = 1
if subbed.get(sc.id) == None:
todo.append(sc)
DataCache.put(KEY,enums.keys(),Utc)
return enums.keys()
def GetAllProperties(layers='core'):
"""Return all properties in the graph."""
global Utc
KEY = "AllProperties:%s" % layers
if DataCache.get(KEY,Utc):
#logging.debug("DataCache HIT: %s" % KEY)
return DataCache.get(KEY,Utc)
else:
#logging.debug("DataCache MISS: %s" % KEY)
mynode = Unit.GetUnit("Thing")
props = GetSources(Unit.GetUnit("rdf:type", True), Unit.GetUnit("rdf:Property", True), layers=EVERYLAYER)
res = []
for prop in props:
if inLayer(layers,prop):
res.append(prop)
sorted_all_properties = sorted(res, key=lambda u: u.id)
DataCache.put(KEY,sorted_all_properties,Utc)
return sorted_all_properties
def GetAllTerms(layers='core',includeDataTypes=False):
ret = GetAllTypes(layers)
ret.extend(GetAllEnumerationValues(layers))
ret.extend(GetAllProperties(layers))
if includeDataTypes:
ret.extend(GetAllDataTypes(layers))
return sorted(ret,key=lambda u: u.id)
def GetParentList(start_unit, end_unit=None, path=[], layers='core'):
"""
Returns one or more lists, each giving a path from a start unit to a supertype parent unit.
example:
for path in GetParentList( Unit.GetUnit("Restaurant") ):
pprint.pprint(', '.join([str(x.id) for x in path ]))
'Restaurant, FoodEstablishment, LocalBusiness, Organization, Thing'
'Restaurant, FoodEstablishment, LocalBusiness, Place, Thing'
"""
if not end_unit:
end_unit = Unit.GetUnit("Thing")
arc=Unit.GetUnit("rdfs:subClassOf")
logging.debug("from %s to %s - path length %d" % (start_unit.id, end_unit.id, len(path) ) )
path = path + [start_unit]
if start_unit == end_unit:
return [path]
if not Unit.GetUnit(start_unit.id):
return []
paths = []
for node in GetTargets(arc, start_unit, layers=layers):
if node not in path:
newpaths = GetParentList(node, end_unit, path, layers=layers)
for newpath in newpaths:
paths.append(newpath)
return paths
def HasMultipleBaseTypes(typenode, layers='core'):
"""True if this unit represents a type with more than one immediate supertype."""
return len( GetTargets( Unit.GetUnit("rdfs:subClassOf", True), typenode, layers ) ) > 1
EXAMPLESMAP = {}
EXAMPLES = []
ExamplesCount = 0
class Example ():
@staticmethod
def AddExample(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'):
"""
Add an Example (via constructor registering it with the terms that it
mentions, i.e. stored in term.examples).
"""
# todo: fix partial examples: if (len(terms) > 0 and len(original_html) > 0 and (len(microdata) > 0 or len(rdfa) > 0 or len(jsonld) > 0)):
typeinfo = "".join( [" %s " % t for t in terms] )
if "FakeEntryNeeded" in typeinfo or terms==[]:
return
if (len(terms) > 0 and len(original_html) > 0 and len(microdata) > 0 and len(rdfa) > 0 and len(jsonld) > 0):
return Example(terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core')
else:
log.info("API AddExample skipped a case due to missing value(s) in example. Target terms: %s ORIG: %s MICRODATA: %s RDFA: %s JSON: %s EGMETA: %s " % ( typeinfo, original_html, microdata, rdfa, jsonld, egmeta ) )
def get(self, name, layers='core') :
"""Exposes original_content, microdata, rdfa and jsonld versions (in the layer(s) specified)."""
if name == 'original_html':
return self.original_html
if name == 'microdata':
return self.microdata
if name == 'rdfa':
return self.rdfa
if name == 'jsonld':
return self.jsonld
def __init__ (self, terms, original_html, microdata, rdfa, jsonld, egmeta, layer='core'):
"""Example constructor, registers itself with the ExampleMap of terms to examples."""
global EXAMPLES, EXAMPLESMAP, ExamplesCount
ExamplesCount += 1
self.orderId = ExamplesCount #Used to maintain consistancy of display order
self.terms = terms
self.original_html = original_html
self.microdata = microdata
self.rdfa = rdfa
self.jsonld = jsonld
self.egmeta = egmeta
self.layer = layer
if 'id' in self.egmeta:
self.keyvalue = self.egmeta['id']
else:
self.keyvalue = "%s-gen-%s"% (terms[0],ExamplesCount)
self.egmeta['id'] = self.keyvalue
for term in terms:
if(EXAMPLESMAP.get(term, None) == None):
EXAMPLESMAP[term] = []
if not self in EXAMPLESMAP.get(term):
EXAMPLESMAP.get(term).append(self)
if not self in EXAMPLES:
EXAMPLES.append(self)
def LoadNodeExamples(node, layers='core'):
"""Returns the examples (if any) for some Unit node."""
#log.info("Getting examples for: %s %s" % (node.id,node.examples))
if(node.examples == None):
node.examples = []
if getInTestHarness(): #Get from local storage
node.examples = EXAMPLES.get(node.id)
if(node.examples == None):
node.examples = []
else: #Get from NDB shared storage
ids = ExampleMap.get(node.id)
if not ids:
ids = []
for i in ids:
node.examples.append(ExampleStore.get_by_id(i))
return node.examples
USAGECOUNTS = {}
def StoreUsage(id,count):
USAGECOUNTS[id] = count
def GetUsage(id):
return USAGECOUNTS.get(id,0)
def GetExtMappingsRDFa(node, layers='core'):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
if (node.isClass()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.id.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id
return markup
if (node.isAttribute()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id
return markup
return "<!-- no external mappings noted for this term. -->"
def GetJsonLdContext(layers='core'):
"""Generates a basic JSON-LD context file for schema.org."""
# Caching assumes the context is neutral w.r.t. our hostname.
if DataCache.get('JSONLDCONTEXT'):
#log.debug("DataCache: recycled JSONLDCONTEXT")
return DataCache.get('JSONLDCONTEXT')
else:
global namespaces
jsonldcontext = "{\n \"@context\": {\n"
jsonldcontext += " \"type\": \"@type\",\n"
jsonldcontext += " \"id\": \"@id\",\n"
jsonldcontext += " \"@vocab\": \"http://schema.org/\",\n"
jsonldcontext += namespaces
url = Unit.GetUnit("URL")
date = Unit.GetUnit("Date")
datetime = Unit.GetUnit("DateTime")
# properties = sorted(GetSources(Unit.GetUnit("rdf:type",True), Unit.GetUnit("rdf:Property",True), layers=getAllLayersList()), key=lambda u: u.id)
# for p in properties:
for t in GetAllTerms(EVERYLAYER,includeDataTypes=True):
if t.isClass(EVERYLAYER) or t.isEnumeration(EVERYLAYER) or t.isEnumerationValue(EVERYLAYER) or t.isDataType(EVERYLAYER):
jsonldcontext += " \"" + t.id + "\": {\"@id\": \"schema:" + t.id + "\"},"
elif t.isAttribute(EVERYLAYER):
range = GetTargets(Unit.GetUnit("rangeIncludes"), t, layers=EVERYLAYER)
type = None
if url in range:
type = "@id"
elif date in range:
type = "Date"
elif datetime in range:
type = "DateTime"
typins = ""
if type:
typins = ", \"@type\": \"" + type + "\""
jsonldcontext += " \"" + t.id + "\": { \"@id\": \"schema:" + t.id + "\"" + typins + "},"
jsonldcontext += "}}\n"
jsonldcontext = jsonldcontext.replace("},}}","}\n }\n}")
jsonldcontext = jsonldcontext.replace("},","},\n")
DataCache.put('JSONLDCONTEXT',jsonldcontext)
#log.debug("DataCache: added JSONLDCONTEXT")
return jsonldcontext
#### UTILITIES
def inLayer(layerlist, node):
"""Does a unit get its type mentioned in a layer?"""
if (node is None):
return False
if len(GetTargets(Unit.GetUnit("rdf:type"), node, layers=layerlist) ) > 0:
#log.debug("Found typeOf for node %s in layers: %s" % (node.id, layerlist ))
return True
if len(GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layerlist) ) > 0:
# TODO: should we really test for any mention of a term, not just typing?
return True
return False
def read_file (filename):
"""Read a file from disk, return it as a single string."""
strs = []
file_path = full_path(filename)
import codecs
#log.debug("READING FILE: filename=%s file_path=%s " % (filename, file_path ) )
for line in codecs.open(file_path, 'r', encoding="utf8").readlines():
strs.append(line)
return "".join(strs)
def full_path(filename):
"""convert local file name to full path."""
import os.path
folder = os.path.dirname(os.path.realpath(__file__))
return os.path.join(folder, filename)
def setHomeValues(items,layer='core',defaultToCore=False):
global extensionLoadErrors
for node in items:
if(node == None):
continue
home = GetTargets( Unit.GetUnit("isPartOf"), node, layer )
if(len(home) > 0):
if(node.home != None):
msg = "ERROR: %s trying to overwite home from %s to %s" % (node.id,node.home,home[0].id)
log.info(msg)
extensionLoadErrors += msg + '\n'
else:
h = home[0].id.strip()
if h.startswith("http://"):
h = h[7:]
node.home = re.match( r'([\w\-_]+)[\.:]?', h).group(1)
if(node.home == 'schema'):
node.home = 'core'
elif node.home == None:
if defaultToCore:
node.home = "core"
else:
msg = "ERROR: %s has no home defined" % (node.id)
log.info(msg)
extensionLoadErrors += msg + '\n'
def read_schemas(loadExtensions=False):
"""Read/parse/ingest schemas from data/*.rdfa. Also data/*examples.txt"""
load_start = datetime.datetime.now()
global schemasInitialized
schemasInitialized = True
if (not schemasInitialized or DYNALOAD):
log.debug("[%s] (re)loading core and annotations." % getInstanceId(short=True))
files = glob.glob("data/*.rdfa")
jfiles = glob.glob("data/*.jsonld")
for jf in jfiles:
rdfequiv = jf[:-7]+".rdfa"
if not rdfequiv in files: #Only add .jsonld files if no equivalent .rdfa
files.append(jf)
file_paths = []
for f in files:
file_paths.append(full_path(f))
apirdflib.load_graph('core',file_paths)
log.info("[%s] Loaded core graphs in %s" % (getInstanceId(short=True),(datetime.datetime.now() - load_start)))
load_start = datetime.datetime.now()
files = glob.glob("data/2015-04-vocab_counts.txt")
for file in files:
usage_data = read_file(file)
parser = parsers.UsageFileParser(None)
parser.parse(usage_data)
log.debug("[%s]Loaded usage data in %s" % (getInstanceId(short=True),(datetime.datetime.now() - load_start)))
schemasInitialized = True
def read_extensions(extensions):
global extensionsLoaded
extfiles = []
expfiles = []
load_start = datetime.datetime.now()
if not extensionsLoaded: #2nd load will throw up errors and duplicate terms
log.info("[%s] extensions %s " % (getInstanceId(short=True),extensions))
for i in extensions:
all_layers[i] = "1"
extfiles = glob.glob("data/ext/%s/*.rdfa" % i)
jextfiles = glob.glob("data/ext/%s/*.jsonld" % i)
for jf in jextfiles:
rdfequiv = jf[:-7]+".rdfa"
if not rdfequiv in extfiles: #Only add .jsonld files if no equivalent .rdfa
extfiles.append(jf)
file_paths = []
for f in extfiles:
file_paths.append(full_path(f))
apirdflib.load_graph(i,file_paths)
log.info("[%s]Loaded extension graphs in %s" % (getInstanceId(short=True),(datetime.datetime.now() - load_start)))
extensionsLoaded = True
def load_examples_data(extensions):
load = False
if getInTestHarness():
load = True
elif not memcache.get("ExmplesLoaded"):#Useing NDB Storage and not loaded
load = True
if load:
load_start = datetime.datetime.now()
files = glob.glob("data/*examples.txt")
read_examples(files,'core')
for i in extensions:
expfiles = glob.glob("data/ext/%s/*examples.txt" % i)
read_examples(expfiles,i)
if not getInTestHarness(): #Use NDB Storage
ExampleStore.store(EXAMPLES)
ExampleMap.store(EXAMPLESMAP)
memcache.set("ExmplesLoaded",value=True)
log.info("Loaded %s examples mapped to %s terms in %s" % (len(EXAMPLES),len(EXAMPLESMAP),(datetime.datetime.now() - load_start)))
else:
log.info("Examples already loaded")
def read_examples(files, layer):
first = True
for f in files:
parser = parsers.ParseExampleFile(None,layer=layer)
#log.info("[%s] Reading: %s" % (getInstanceId(short=True),f))
if first:
#log.info("[%s] Loading examples from %s" % (getInstanceId(short=True),layer))
first = False
parser.parse(f)
EXAMPLESTORECACHE = []
class ExampleStore(ndb.Model):
original_html = ndb.TextProperty('h',indexed=False)
microdata = ndb.TextProperty('m',indexed=False)
rdfa = ndb.TextProperty('r',indexed=False)
jsonld = ndb.TextProperty('j',indexed=False)
egmeta = ndb.PickleProperty('e',indexed=False)
keyvalue = ndb.StringProperty('o',indexed=True)
layer = ndb.StringProperty('l',indexed=False)
@staticmethod
def initialise():
EXAMPLESTORECACHE = []
import time
log.info("[%s]ExampleStore initialising Data Store" % (getInstanceId(short=True)))
loops = 0
ret = 0
while loops < 10:
keys = ExampleStore.query().fetch(keys_only=True,use_memcache=False,use_cache=False)
count = len(keys)
if count == 0:
break
log.info("[%s]ExampleStore deleting %s keys" % (getInstanceId(short=True), count))
ndb.delete_multi(keys,use_memcache=False,use_cache=False)
ret += count
loops += 1
time.sleep(0.01)
return {"ExampleStore":ret}
@staticmethod
def add(example):
e = ExampleStore(id=example.keyvalue,
original_html=example.original_html,
microdata=example.microdata,
rdfa=example.rdfa,
jsonld=example.jsonld,
egmeta=example.egmeta,
keyvalue=example.keyvalue,
layer=example.layer)
EXAMPLESTORECACHE.append(e)
@staticmethod
def store(examples):
for e in examples:
ExampleStore.add(e)
if len(EXAMPLESTORECACHE):
ndb.put_multi(EXAMPLESTORECACHE,use_cache=False)
def get(self,name):
if name == 'original_html':
return self.original_html
if name == 'microdata':
return self.microdata
if name == 'rdfa':
return self.rdfa
if name == 'jsonld':
return self.jsonld
return ""
@staticmethod
def getEgmeta(id):
em = ExampleStore.get_by_id(id)
ret = em.emeta
if ret:
return ret
return {}
EXAMPLESMAPCACHE = []
class ExampleMap(ndb.Model):
examples = ndb.StringProperty('e',repeated=True,indexed=False)
@staticmethod
def initialise():
EXAMPLESMAPCACHE = []
log.info("[%s]ExampleMap initialising Data Store" % (getInstanceId(short=True)))
loops = 0
ret = 0
while loops < 10:
keys = ExampleMap.query().fetch(keys_only=True,use_memcache=False,use_cache=False)
count = len(keys)
if count == 0:
break
log.info("[%s]ExampleMap deleting %s keys" % (getInstanceId(short=True), count))
ndb.delete_multi(keys,use_memcache=False,use_cache=False)
ret += count
loops += 1
time.sleep(0.01)
return {"ExampleMap":ret}
@staticmethod
def store(map):
for term, examples in map.items():
ids = []
for e in examples:
ids.append(e.keyvalue)
EXAMPLESMAPCACHE.append(ExampleMap(id=term,examples=ids))
if len(EXAMPLESMAPCACHE):
ndb.put_multi(EXAMPLESMAPCACHE,use_cache=False)
@staticmethod
def get(term):
em = ExampleMap.get_by_id(term)
if em:
return em.examples
return []
######################################
PageCaches = [PageStore,HeaderStore]
ExampleCaches = [ExampleStore,ExampleMap]
class CacheControl():
@staticmethod
def clean(pagesonly=False):
ret = {}
ret["DataCache"] = DataCache.initialise()
if not NDBPAGESTORE:
ret["PageStore"] = PageStore.initialise()
ret["HeaderStore"] = HeaderStore.initialise()
ndbret = CacheControl.ndbClean()
ret.update(ndbret)
return ret
@staticmethod
def ndbClean():
NdbCaches = PageCaches
NdbCaches += ExampleCaches
ret = {}
if getInTestHarness():
return ret
for c in NdbCaches:
r = c.initialise()
ret.update(r)
return ret
###############################
def StripHtmlTags(source):
if source and len(source) > 0:
return re.sub('<[^<]+?>', '', source)
return ""
def ShortenOnSentence(source,lengthHint=250):
if source and len(source) > lengthHint:
source = source.strip()
sentEnd = re.compile('[.!?]')
sentList = sentEnd.split(source)
com=""
count = 0
while count < len(sentList):
if(count > 0 ):
if len(com) < len(source):
com += source[len(com)]
com += sentList[count]
count += 1
if count == len(sentList):
if len(com) < len(source):
com += source[len(source) - 1]
if len(com) > lengthHint:
if len(com) < len(source):
com += source[len(com)]
break
if len(source) > len(com) + 1:
com += ".."
source = com
return source
log.info("[%s]api loaded" % (getInstanceId(short=True)))
|
DmPo/Schemaorg_CivicOS | apirdflib.py | <reponame>DmPo/Schemaorg_CivicOS
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
import sys
sys.path.append('lib')
import rdflib
from rdflib import Literal
from rdflib.term import URIRef
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.plugins.sparql import prepareQuery
import threading
import api
from apimarkdown import Markdown
import StringIO
rdflib.plugin.register("json-ld", Parser, "rdflib_jsonld.parser", "JsonLDParser")
rdflib.plugin.register("json-ld", Serializer, "rdflib_jsonld.serializer", "JsonLDSerializer")
ATTIC = 'attic'
VOCAB = "http://schema.org"
STORE = rdflib.Dataset()
#Namespace mapping#############
nss = {'core': 'http://schema.org/'}
revNss = {}
NSSLoaded = False
allLayersList = []
context_data = "data/internal-context" #Local file containing context to be used loding .jsonld files
RDFLIBLOCK = threading.Lock() #rdflib uses generators which are not threadsafe
from rdflib.namespace import RDFS, RDF, OWL
SCHEMA = rdflib.Namespace('http://schema.org/')
QUERYGRAPH = None
def queryGraph():
global QUERYGRAPH
if not QUERYGRAPH:
try:
RDFLIBLOCK.acquire()
if not QUERYGRAPH:
QUERYGRAPH = rdflib.Graph()
gs = list(STORE.graphs())
for g in gs:
id = str(g.identifier)
if not id.startswith("http://"):#skip some internal graphs
continue
QUERYGRAPH += g
QUERYGRAPH.bind('owl', 'http://www.w3.org/2002/07/owl#')
QUERYGRAPH.bind('rdfa', 'http://www.w3.org/ns/rdfa#')
QUERYGRAPH.bind('dct', 'http://purl.org/dc/terms/')
QUERYGRAPH.bind('schema', 'http://schema.org/')
finally:
RDFLIBLOCK.release()
return QUERYGRAPH
def loadNss():
global NSSLoaded
global nss
global revNss
if not NSSLoaded:
NSSLoaded = True
#log.info("allLayersList: %s"% allLayersList)
for i in allLayersList:
if i != "core":
#log.info("Setting %s to %s" % (i, "http://%s.schema.org/" % i))
nss.update({i:"http://%s.schema.org/" % i})
revNss = {v: k for k, v in nss.items()}
def getNss(val):
global nss
loadNss()
try:
return nss[val]
except KeyError:
return ""
def getRevNss(val):
global revNss
loadNss()
try:
return revNss[val]
except KeyError:
return ""
##############################
def load_graph(context, files):
"""Read/parse/ingest schemas from data/*.rdfa."""
import os.path
import glob
import re
log.debug("Loading %s graph." % context)
for f in files:
if(f[-5:] == ".rdfa"):
format = "rdfa"
elif(f[-7:] == ".jsonld"):
format = "json-ld"
else:
log.info("Unrecognised file format: %s" % f)
return
if(format == "rdfa"):
uri = getNss(context)
g = STORE.graph(URIRef(uri))
g.parse(file=open(full_path(f),"r"),format=format)
STORE.bind(context,uri)
elif(format == "json-ld"):
STORE.parse(file=open(full_path(f),"r"),format=format, context=context_data)
QUERYGRAPH = None #In case we have loaded graphs since the last time QUERYGRAPH was set
def rdfGetTriples(id):
"""All triples with node as subject."""
targets = []
fullId = id
# log.info("rdfgetTriples(%s)" % fullId)
if ':' in id: #Includes full path or namespaces
fullId = id
else:
fullId = VOCAB + "/" + id
#log.info("rdfgetTriples(%s)" % source)
first = True
unit = None
homeSetTo = None
typeOfInLayers = []
try:
RDFLIBLOCK.acquire()
q = "SELECT ?g ?p ?o WHERE {GRAPH ?g {<%s> ?p ?o }}" % fullId
res = list(STORE.query(q))
finally:
RDFLIBLOCK.release()
for row in res:
# if source == "http://meta.schema.org/":
# log.info("Triple: %s %s %s %s" % (source, row.p, row.o, row.g))
layer = str(getRevNss(str(row.g)))
if first:
first = False
unit = api.Unit.GetUnitNoLoad(id,True)
s = stripID(fullId)
p = stripID(row.p)
if p == "rdf:type":
typeOfInLayers.append(layer)
elif(p == "isPartOf"):
if(unit.home != None and unit.home != layer):
log.info("WARNING Cannot set %s home to %s - already set to: %s" % (s,layer,unit.home))
unit.home = layer
homeSetTo = layer
elif(p == "category"):
unit.category = row.o
prop = api.Unit.GetUnit(p,True)
if isinstance(row.o,rdflib.Literal):
api.Triple.AddTripleText(unit, prop, row.o, layer)
else:
api.Triple.AddTriple(unit, prop, api.Unit.GetUnit(stripID(row.o),True), layer)
""" Default Unit.home to core if not specificly set with an 'isPartOf' triple """
if(unit and homeSetTo == None):
if('core' in typeOfInLayers or len(typeOfInLayers) == 0):
unit.home = 'core'
else:
log.info("WARNING: %s defined in extensions %s but has no 'isPartOf' triple - cannot default home to core!" % (id,typeOfInLayers))
return unit
def rdfGetSourceTriples(target):
"""All source nodes for a specified arc pointing to a specified node (within any of the specified layers)."""
id = target.id
target.sourced = True
sources = []
fullId = id
if ':' in id: #Includes full path or namespaces
fullId = id
else:
fullId = VOCAB + "/" + id
targ = fullId
if fullId.startswith('http://'):
targ = "<%s>" % fullId
q = "SELECT ?g ?s ?p WHERE {GRAPH ?g {?s ?p %s }}" % targ
try:
RDFLIBLOCK.acquire()
res = list(STORE.query(q))
#log.info("RESCOUNT %s" % len(res))
finally:
RDFLIBLOCK.release()
for row in res:
layer = str(getRevNss(str(row.g)))
unit = api.Unit.GetUnit(stripID(row.s))
p = stripID(row.p)
prop = api.Unit.GetUnit(p,True)
obj = api.Unit.GetUnit(stripID(fullId),True)
api.Triple.AddTriple(unit, prop, obj, layer)
def countFilter(extension="ALL",includeAttic=False):
excludeAttic = "FILTER NOT EXISTS {?term schema:isPartOf <http://attic.schema.org>}."
if includeAttic or extension == ATTIC:
excludeAttic = ""
extensionSel = ""
if extension == "ALL":
extensionSel = ""
elif extension == "core":
extensionSel = "FILTER NOT EXISTS {?term schema:isPartOf ?ex}."
excludeAttic = ""
else:
extensionSel = "FILTER EXISTS {?term schema:isPartOf <http://%s.schema.org>}." % extension
return extensionSel + "\n" + excludeAttic
def countTypes(extension="ALL",includeAttic=False):
filter = countFilter(extension=extension, includeAttic=includeAttic)
query= ('''select (count (?term) as ?cnt) where {
?term a rdfs:Class.
?term rdfs:subClassOf* schema:Thing.
%s
}''') % filter
graph = queryGraph()
count = 0
try:
RDFLIBLOCK.acquire()
res = graph.query(query)
for row in res:
count = row.cnt
finally:
RDFLIBLOCK.release()
return count
def countProperties(extension="ALL",includeAttic=False):
filter = countFilter(extension=extension, includeAttic=includeAttic)
query= ('''select (count (?term) as ?cnt) where {
?term a rdf:Property.
FILTER EXISTS {?term rdfs:label ?l}.
BIND(STR(?term) AS ?strVal).
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/").
%s
}''') % filter
graph = queryGraph()
count = 0
try:
RDFLIBLOCK.acquire()
res = graph.query(query)
for row in res:
count = row.cnt
finally:
RDFLIBLOCK.release()
return count
def countEnums(extension="ALL",includeAttic=False):
filter = countFilter(extension=extension, includeAttic=includeAttic)
query= ('''select (count (?term) as ?cnt) where {
?term a ?type.
?type rdfs:subClassOf* <http://schema.org/Enumeration>.
%s
}''') % filter
graph = queryGraph()
count = 0
try:
RDFLIBLOCK.acquire()
res = graph.query(query)
for row in res:
count = row.cnt
finally:
RDFLIBLOCK.release()
return count
def serializeSingleTermGrapth(node,format="json-ld",excludeAttic=True,markdown=True):
graph = buildSingleTermGraph(node=node,excludeAttic=excludeAttic,markdown=markdown)
file = StringIO.StringIO()
kwargs = {'sort_keys': True}
file.write(graph.serialize(format=format,**kwargs))
data = file.getvalue()
file.close()
return data
def buildSingleTermGraph(node,excludeAttic=True,markdown=True):
g = rdflib.Graph()
g.bind('owl', 'http://www.w3.org/2002/07/owl#')
g.bind('rdfa', 'http://www.w3.org/ns/rdfa#')
g.bind('dct', 'http://purl.org/dc/terms/')
g.bind('schema', 'http://schema.org/')
full = "http://schema.org/" + node
#n = URIRef(full)
n = SCHEMA.term(node)
n = n
full = str(n)
q = queryGraph()
ret = None
#log.info("NAME %s %s"% (n,full))
atts = None
try:
RDFLIBLOCK.acquire()
atts = list(q.triples((n,SCHEMA.isPartOf,URIRef("http://attic.schema.org"))))
finally:
RDFLIBLOCK.release()
if len(atts):
#log.info("ATTIC TERM %s" % n)
excludeAttic = False
#Outgoing triples
try:
RDFLIBLOCK.acquire()
ret = list(q.triples((n,None,None)))
finally:
RDFLIBLOCK.release()
for (s,p,o) in ret:
#log.info("adding %s %s %s" % (s,p,o))
g.add((s,p,o))
#Incoming triples
ret = list(q.triples((None,None,n)))
for (s,p,o) in ret:
#log.info("adding %s %s %s" % (s,p,o))
g.add((s,p,o))
#super classes
query='''select * where {
?term (^rdfs:subClassOf*) <%s>.
?term rdfs:subClassOf ?super.
?super ?pred ?obj.
}''' % n
try:
RDFLIBLOCK.acquire()
ret = q.query(query)
finally:
RDFLIBLOCK.release()
for row in ret:
#log.info("adding %s %s %s" % (row.term,RDFS.subClassOf,row.super))
g.add((row.term,RDFS.subClassOf,row.super))
g.add((row.super,row.pred,row.obj))
#poperties with superclasses in domain
query='''select * where{
?term (^rdfs:subClassOf*) <%s>.
?prop <http://schema.org/domainIncludes> ?term.
?prop ?pred ?obj.
}
''' % n
try:
RDFLIBLOCK.acquire()
ret = q.query(query)
finally:
RDFLIBLOCK.release()
for row in ret:
g.add((row.prop,SCHEMA.domainIncludes,row.term))
g.add((row.prop,row.pred,row.obj))
#super properties
query='''select * where {
?term (^rdfs:subPropertyOf*) <%s>.
?term rdfs:subPropertyOf ?super.
?super ?pred ?obj.
}''' % n
try:
RDFLIBLOCK.acquire()
ret = q.query(query)
finally:
RDFLIBLOCK.release()
for row in ret:
#log.info("adding %s %s %s" % (row.term,RDFS.subPropertyOf,row.super))
g.add((row.term,RDFS.subPropertyOf,row.super))
g.add((row.super,row.pred,row.obj))
#Enumeration for an enumeration value
query='''select * where {
<%s> a ?type.
?type ?pred ?obj.
FILTER NOT EXISTS{?type a rdfs:class}.
}''' % n
try:
RDFLIBLOCK.acquire()
ret = q.query(query)
finally:
RDFLIBLOCK.release()
for row in ret:
#log.info("adding %s %s %s" % (row.type,row.pred,row.obj))
g.add((row.type,row.pred,row.obj))
if excludeAttic: #Remove triples referencing terms part of http://attic.schema.org
trips = list(g.triples((None,None,None)))
try:
RDFLIBLOCK.acquire()
for (s,p,o) in trips:
atts = list(q.triples((s,SCHEMA.isPartOf,URIRef("http://attic.schema.org"))))
if isinstance(o, URIRef):
atts.extend(q.triples((o,SCHEMA.isPartOf,URIRef("http://attic.schema.org"))))
for (rs,rp,ro) in atts:
#log.info("Removing %s" % rs)
g.remove((rs,None,None))
g.remove((None,None,rs))
finally:
RDFLIBLOCK.release()
if markdown:
try:
RDFLIBLOCK.acquire()
trips = list(g.triples((None,RDFS.comment,None)))
Markdown.setPre("http://schema.org/")
for (s,p,com) in trips:
mcom = Markdown.parse(com)
g.remove((s,p,com))
g.add((s,p,Literal(mcom)))
finally:
RDFLIBLOCK.release()
Markdown.setPre()
return g
def stripID (str):
l = len(str)
if (l > 17 and (str[:18] == 'http://schema.org/')):
return str[18:]
elif (l > 24 and (str[:25] == 'http://purl.org/dc/terms/')):
return "dc:" + str[25:]
elif (l > 36 and (str[:37] == 'http://www.w3.org/2000/01/rdf-schema#')):
return "rdfs:" + str[37:]
elif (l > 42 and (str[:43] == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')):
return "rdf:" + str[43:]
elif (l > 29 and (str[:30] == 'http://www.w3.org/2002/07/owl#')):
return "owl:" + str[30:]
else:
return str
def full_path(filename):
"""convert local file name to full path."""
import os.path
folder = os.path.dirname(os.path.realpath(__file__))
return os.path.join(folder, filename)
|
DmPo/Schemaorg_CivicOS | scripts/scanissues.py | <reponame>DmPo/Schemaorg_CivicOS<filename>scripts/scanissues.py
#!/usr/bin/env python
# https://developer.github.com/v3/issues/
# https://developer.github.com/v3/repos/
# GET /repos/:owner/:repo/issues
# Beginning of script to scan github for issue/term associations.
# Note: https://developer.github.com/v3/#rate-limiting
# 60 requests per hour per IP address.
import requests # http://www.python-requests.org/en/latest/
import json
import re
import os
myre = re.compile(r"^\s*http://schema.org/(\w+)", re.MULTILINE)
def getPagedAPI(u):
r = requests.get(u)
if (r.ok):
if (len(r.json())==0):
return("000")
for i in r.json():
if "body" in i:
body = i["body"]
hits = myre.findall(body)
for h in hits:
print "http://schema.org/"+h
print i["url"]
if "title" in i:
print i["title"]
print "\n"
print
return None
else:
print "# Issue API error."
return("500")
# Auth - to avoid rate limits, create an OAuth application and put details in GH_AUTH env var.
# See https://developer.github.com/v3/#rate-limiting
gh_auth = os.environ['GH_AUTH']
for i in range(10):
u = "https://api.github.com/repos/schemaorg/schemaorg/issues?milestone=*;page=%i;%s" % ( i, gh_auth )
print "# Fetching page %i " % i
x = getPagedAPI(u) # bogus return codes
if x != None:
break
|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/test_formfiller.py | <filename>lib/html5lib/tests/test_formfiller.py
import sys
import unittest
from html5lib.filters.formfiller import SimpleFilter
class FieldStorage(dict):
def getlist(self, name):
l = self[name]
if isinstance(l, list):
return l
elif isinstance(l, tuple) or hasattr(l, '__iter__'):
return list(l)
return [l]
class TestCase(unittest.TestCase):
def runTest(self, input, formdata, expected):
try:
output = list(SimpleFilter(input, formdata))
except NotImplementedError, nie:
# Amnesty for those that confess...
print >>sys.stderr, "Not implemented:", str(nie)
else:
errorMsg = "\n".join(["\n\nInput:", str(input),
"\nForm data:", str(formdata),
"\nExpected:", str(expected),
"\nReceived:", str(output)])
self.assertEquals(output, expected, errorMsg)
def testSingleTextInputWithValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleTextInputWithoutValue(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"text"), (u"name", u"foo"), (u"value", u"bar")]}])
def testSingleCheckbox(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleCheckboxCheckedByDefault(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"bar"), (u"checked", u"")]}])
def testSingleCheckboxCheckedByDefaultShouldBeUnchecked(self):
self.runTest(
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux"), (u"checked", u"")]}],
FieldStorage({"foo": "bar"}),
[{"type": u"EmptyTag", "name": u"input",
"data": [(u"type", u"checkbox"), (u"name", u"foo"), (u"value", u"quux")]}])
def testSingleTextareaWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleTextareaWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"EndTag", "name": u"textarea", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"textarea", "data": [(u"name", u"foo")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"textarea", "data": []}])
def testSingleSelectWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"baz")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValue(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "bar"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"selected", u"")]},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectTwoOptionsWithoutValueShouldBeUnselected(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": "quux"}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"bar"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": []},
{"type": u"Characters", "data": u"baz"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testSingleSelectMultiple(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo"), (u"multiple", u"")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def testTwoSelect(self):
self.runTest(
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}],
FieldStorage({"foo": ["bar", "quux"]}),
[{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []},
{"type": u"StartTag", "name": u"select", "data": [(u"name", u"foo")]},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"bar")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"StartTag", "name": u"option", "data": [(u"value", u"quux"), (u"selected", u"")]},
{"type": u"Characters", "data": u"quux"},
{"type": u"EndTag", "name": u"option", "data": []},
{"type": u"EndTag", "name": u"select", "data": []}])
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/property.py | <filename>lib/rdflib/plugins/parsers/pyRdfa/property.py
# -*- coding: utf-8 -*-
"""
Implementation of the C{@property} value handling.
RDFa 1.0 and RDFa 1.1 are fairly different. RDFa 1.0 generates only literals, see
U{RDFa Task Force's wiki page<http://www.w3.org/2006/07/SWD/wiki/RDFa/LiteralObject>} for the details.
On the other hand, RDFa 1.1, beyond literals, can also generate URI references. Hence the duplicate method in the L{ProcessProperty} class, one for RDFa 1.0 and the other for RDFa 1.1.
@summary: RDFa Literal generation
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: property.py,v 1.11 2012/06/12 11:47:11 ivan Exp $
$Date: 2012/06/12 11:47:11 $
"""
import re, sys
import rdflib
from rdflib import BNode
from rdflib import Literal, URIRef, Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib.term import XSDToPython
else :
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Literal import XSDToPython
from . import IncorrectBlankNodeUsage, IncorrectLiteral, err_no_blank_node, ns_xsd
from .utils import has_one_of_attributes, return_XML
from .host.html5 import handled_time_types
XMLLiteral = ns_rdf["XMLLiteral"]
HTMLLiteral = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#HTML")
class ProcessProperty :
"""Generate the value for C{@property} taking into account datatype, etc.
Note: this class is created only if the C{@property} is indeed present, no need to check.
@ivar node: DOM element node
@ivar graph: the (RDF) graph to add the properies to
@ivar subject: the RDFLib URIRef serving as a subject for the generated triples
@ivar state: the current state to be used for the CURIE-s
@type state: L{state.ExecutionContext}
@ivar typed_resource: Typically the bnode generated by a @typeof
"""
def __init__(self, node, graph, subject, state, typed_resource = None) :
"""
@param node: DOM element node
@param graph: the (RDF) graph to add the properies to
@param subject: the RDFLib URIRef serving as a subject for the generated triples
@param state: the current state to be used for the CURIE-s
@param state: L{state.ExecutionContext}
@param typed_resource: Typically the bnode generated by a @typeof; in RDFa 1.1, that becomes the object for C{@property}
"""
self.node = node
self.graph = graph
self.subject = subject
self.state = state
self.typed_resource = typed_resource
def generate(self) :
"""
Common entry point for the RDFa 1.0 and RDFa 1.1 versions; bifurcates based on the RDFa version, as retrieved from the state object.
"""
if self.state.rdfa_version >= "1.1" :
self.generate_1_1()
else :
self.generate_1_0()
def generate_1_1(self) :
"""Generate the property object, 1.1 version"""
#########################################################################
# See if the target is _not_ a literal
irirefs = ("resource", "href", "src")
noiri = ("content", "datatype", "rel", "rev")
notypediri = ("content", "datatype", "rel", "rev", "about", "about_pruned")
if has_one_of_attributes(self.node, irirefs) and not has_one_of_attributes(self.node, noiri) :
# @href/@resource/@src takes the lead here...
object = self.state.getResource(irirefs)
elif self.node.hasAttribute("typeof") and not has_one_of_attributes(self.node, notypediri) and self.typed_resource != None :
# a @typeof creates a special branch in case the typed resource was set during parsing
object = self.typed_resource
else :
# We have to generate a literal
# Get, if exists, the value of @datatype
datatype = ''
dtset = False
if self.node.hasAttribute("datatype") :
dtset = True
dt = self.node.getAttribute("datatype")
if dt != "" :
datatype = self.state.getURI("datatype")
# Supress lange is set in case some elements explicitly want to supress the effect of language
# There were discussions, for example, that the <time> element should do so. Although,
# after all, this was reversed, the functionality is kept in the code in case another
# element might need it...
if self.state.lang != None and self.state.supress_lang == False :
lang = self.state.lang
else :
lang = ''
# The simple case: separate @content attribute
if self.node.hasAttribute("content") :
val = self.node.getAttribute("content")
# Handling the automatic uri conversion case
if dtset == False :
object = Literal(val, lang=lang)
else :
object = self._create_Literal(val, datatype=datatype, lang=lang)
# The value of datatype has been set, and the keyword paramaters take care of the rest
else :
# see if there *is* a datatype (even if it is empty!)
if dtset :
if datatype == XMLLiteral :
litval = self._get_XML_literal(self.node)
object = Literal(litval,datatype=XMLLiteral)
elif datatype == HTMLLiteral :
# I am not sure why this hack is necessary, but otherwise an encoding error occurs
# In Python3 all this should become moot, due to the unicode everywhere approach...
if sys.version_info[0] >= 3 :
object = Literal(self._get_HTML_literal(self.node), datatype=HTMLLiteral)
else :
litval = self._get_HTML_literal(self.node)
o = Literal(litval, datatype=XMLLiteral)
object = Literal(o, datatype=HTMLLiteral)
else :
object = self._create_Literal(self._get_literal(self.node), datatype=datatype, lang=lang)
else :
object = self._create_Literal(self._get_literal(self.node), lang=lang)
if object != None :
for prop in self.state.getURI("property") :
if not isinstance(prop, BNode) :
if self.node.hasAttribute("inlist") :
self.state.add_to_list_mapping(prop, object)
else :
self.graph.add( (self.subject, prop, object) )
else :
self.state.options.add_warning(err_no_blank_node % "property", warning_type=IncorrectBlankNodeUsage, node=self.node.nodeName)
# return
def generate_1_0(self) :
"""Generate the property object, 1.0 version"""
#########################################################################
# We have to generate a literal indeed.
# Get, if exists, the value of @datatype
datatype = ''
dtset = False
if self.node.hasAttribute("datatype") :
dtset = True
dt = self.node.getAttribute("datatype")
if dt != "" :
datatype = self.state.getURI("datatype")
if self.state.lang != None :
lang = self.state.lang
else :
lang = ''
# The simple case: separate @content attribute
if self.node.hasAttribute("content") :
val = self.node.getAttribute("content")
# Handling the automatic uri conversion case
if dtset == False :
object = Literal(val, lang=lang)
else :
object = self._create_Literal(val, datatype=datatype, lang=lang)
# The value of datatype has been set, and the keyword paramaters take care of the rest
else :
# see if there *is* a datatype (even if it is empty!)
if dtset :
# yep. The Literal content is the pure text part of the current element:
# We have to check whether the specified datatype is, in fact, an
# explicit XML Literal
if datatype == XMLLiteral :
litval = self._get_XML_literal(self.node)
object = Literal(litval,datatype=XMLLiteral)
elif datatype == HTMLLiteral :
# I am not sure why this hack is necessary, but otherwise an encoding error occurs
# In Python3 all this should become moot, due to the unicode everywhere approach...
if sys.version_info[0] >= 3 :
object = Literal(self._get_HTML_literal(self.node), datatype=HTMLLiteral)
else :
litval = self._get_HTML_literal(self.node)
o = Literal(litval, datatype=XMLLiteral)
object = Literal(o, datatype=HTMLLiteral)
else :
object = self._create_Literal(self._get_literal(self.node), datatype=datatype, lang=lang)
else :
# no controlling @datatype. We have to see if there is markup in the contained
# element
if True in [ n.nodeType == self.node.ELEMENT_NODE for n in self.node.childNodes ] :
# yep, and XML Literal should be generated
object = self._create_Literal(self._get_XML_literal(self.node), datatype=XMLLiteral)
else :
# At this point, there might be entities in the string that are returned as real characters by the dom
# implementation. That should be turned back
object = self._create_Literal(self._get_literal(self.node), lang=lang)
for prop in self.state.getURI("property") :
if not isinstance(prop,BNode) :
self.graph.add( (self.subject,prop,object) )
else :
self.state.options.add_warning(err_no_blank_node % "property", warning_type=IncorrectBlankNodeUsage, node=self.node.nodeName)
# return
######################################################################################################################################
def _putBackEntities(self, str) :
"""Put 'back' entities for the '&','<', and '>' characters, to produce a proper XML string.
Used by the XML Literal extraction.
@param str: string to be converted
@return: string with entities
@rtype: string
"""
return str.replace('&','&').replace('<','<').replace('>','>')
def _get_literal(self, Pnode):
"""
Get (recursively) the full text from a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + self._get_literal(node)
# The decision of the group in February 2008 is not to normalize the result by default.
# This is reflected in the default value of the option
if self.state.options.space_preserve :
return rc
else :
return re.sub(r'(\r| |\n|\t)+'," ",rc).strip()
# end getLiteral
def _get_XML_literal(self, Pnode) :
"""
Get (recursively) the XML Literal content of a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + self._putBackEntities(node.data)
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + return_XML(self.state, node, base = False)
return rc
# end getXMLLiteral
def _get_HTML_literal(self, Pnode) :
"""
Get (recursively) the XML Literal content of a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + self._putBackEntities(node.data)
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + return_XML(self.state, node, base = False, xmlns = False )
return rc
# end getXMLLiteral
def _create_Literal(self, val, datatype = '', lang = '') :
"""
Create a literal, taking into account the datatype and language.
@return: Literal
"""
if datatype == None or datatype == '' :
return Literal(val, lang=lang)
#elif datatype == ns_xsd["string"] :
# return Literal(val)
else :
# This is a bit convoluted... the default setup of rdflib does not gracefully react if the
# datatype cannot properly be converted to Python. I have to copy and reuse some of the
# rdflib code to get this working...
# To make things worse: rdlib 3.1.0 does not handle the various xsd date types properly, ie,
# the conversion function below will generate errors. Ie, the check should be skipped for those
if ("%s" % datatype) in handled_time_types and rdflib.__version__ < "3.2.0" :
convFunc = False
else :
convFunc = XSDToPython.get(datatype, None)
if convFunc :
try :
pv = convFunc(val)
# If we got there the literal value and its datatype match
except :
self.state.options.add_warning("Incompatible value (%s) and datatype (%s) in Literal definition." % (val, datatype), warning_type=IncorrectLiteral, node=self.node.nodeName)
return Literal(val, datatype=datatype)
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/sparql/update.py | <filename>lib/rdflib/plugins/sparql/update.py
"""
Code for carrying out Update Operations
"""
from rdflib import Graph, Variable
from rdflib.plugins.sparql.sparql import QueryContext
from rdflib.plugins.sparql.evalutils import _fillTemplate, _join
from rdflib.plugins.sparql.evaluate import evalBGP, evalPart
def _graphOrDefault(ctx, g):
if g == 'DEFAULT':
return ctx.graph
else:
return ctx.dataset.get_context(g)
def _graphAll(ctx, g):
"""
return a list of graphs
"""
if g == 'DEFAULT':
return [ctx.graph]
elif g == 'NAMED':
return [c for c in ctx.dataset.contexts()
if c.identifier != ctx.graph.identifier]
elif g == 'ALL':
return list(ctx.dataset.contexts())
else:
return [ctx.dataset.get_context(g)]
def evalLoad(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#load
"""
if u.graphiri:
ctx.load(u.iri, default=False, publicID=u.graphiri)
else:
ctx.load(u.iri, default=True)
def evalCreate(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#create
"""
g = ctx.datset.get_context(u.graphiri)
if len(g) > 0:
raise Exception("Graph %s already exists." % g.identifier)
raise Exception("Create not implemented!")
def evalClear(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#clear
"""
for g in _graphAll(ctx, u.graphiri):
g.remove((None, None, None))
def evalDrop(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#drop
"""
if ctx.dataset.store.graph_aware:
for g in _graphAll(ctx, u.graphiri):
ctx.dataset.store.remove_graph(g)
else:
evalClear(ctx, u)
def evalInsertData(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#insertData
"""
# add triples
g = ctx.graph
g += u.triples
# add quads
# u.quads is a dict of graphURI=>[triples]
for g in u.quads:
cg = ctx.dataset.get_context(g)
cg += u.quads[g]
def evalDeleteData(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#deleteData
"""
# remove triples
g = ctx.graph
g -= u.triples
# remove quads
# u.quads is a dict of graphURI=>[triples]
for g in u.quads:
cg = ctx.dataset.get_context(g)
cg -= u.quads[g]
def evalDeleteWhere(ctx, u):
"""
http://www.w3.org/TR/sparql11-update/#deleteWhere
"""
res = evalBGP(ctx, u.triples)
for g in u.quads:
cg = ctx.dataset.get_context(g)
c = ctx.pushGraph(cg)
res = _join(res, list(evalBGP(c, u.quads[g])))
for c in res:
g = ctx.graph
g -= _fillTemplate(u.triples, c)
for g in u.quads:
cg = ctx.dataset.get_context(c.get(g))
cg -= _fillTemplate(u.quads[g], c)
def evalModify(ctx, u):
originalctx = ctx
# Using replaces the dataset for evaluating the where-clause
if u.using:
otherDefault = False
for d in u.using:
if d.default:
if not otherDefault:
# replace current default graph
dg = Graph()
ctx = ctx.pushGraph(dg)
otherDefault = True
ctx.load(d.default, default=True)
elif d.named:
g = d.named
ctx.load(g, default=False)
# "The WITH clause provides a convenience for when an operation
# primarily refers to a single graph. If a graph name is specified
# in a WITH clause, then - for the purposes of evaluating the
# WHERE clause - this will define an RDF Dataset containing a
# default graph with the specified name, but only in the absence
# of USING or USING NAMED clauses. In the presence of one or more
# graphs referred to in USING clauses and/or USING NAMED clauses,
# the WITH clause will be ignored while evaluating the WHERE
# clause."
if not u.using and u.withClause:
g = ctx.dataset.get_context(u.withClause)
ctx = ctx.pushGraph(g)
res = evalPart(ctx, u.where)
if u.using:
if otherDefault:
ctx = originalctx # restore original default graph
if u.withClause:
g = ctx.dataset.get_context(u.withClause)
ctx = ctx.pushGraph(g)
for c in res:
dg = ctx.graph
if u.delete:
dg -= _fillTemplate(u.delete.triples, c)
for g, q in u.delete.quads.iteritems():
cg = ctx.dataset.get_context(c.get(g))
cg -= _fillTemplate(q, c)
if u.insert:
dg += _fillTemplate(u.insert.triples, c)
for g, q in u.insert.quads.iteritems():
cg = ctx.dataset.get_context(c.get(g))
cg += _fillTemplate(q, c)
def evalAdd(ctx, u):
"""
add all triples from src to dst
http://www.w3.org/TR/sparql11-update/#add
"""
src, dst = u.graph
srcg = _graphOrDefault(ctx, src)
dstg = _graphOrDefault(ctx, dst)
if srcg.identifier == dstg.identifier:
return
dstg += srcg
def evalMove(ctx, u):
"""
remove all triples from dst
add all triples from src to dst
remove all triples from src
http://www.w3.org/TR/sparql11-update/#move
"""
src, dst = u.graph
srcg = _graphOrDefault(ctx, src)
dstg = _graphOrDefault(ctx, dst)
if srcg.identifier == dstg.identifier:
return
dstg.remove((None, None, None))
dstg += srcg
if ctx.dataset.store.graph_aware:
ctx.dataset.store.remove_graph(srcg)
else:
srcg.remove((None, None, None))
def evalCopy(ctx, u):
"""
remove all triples from dst
add all triples from src to dst
http://www.w3.org/TR/sparql11-update/#copy
"""
src, dst = u.graph
srcg = _graphOrDefault(ctx, src)
dstg = _graphOrDefault(ctx, dst)
if srcg.identifier == dstg.identifier:
return
dstg.remove((None, None, None))
dstg += srcg
def evalUpdate(graph, update, initBindings=None):
"""
http://www.w3.org/TR/sparql11-update/#updateLanguage
'A request is a sequence of operations [...] Implementations MUST
ensure that operations of a single request are executed in a
fashion that guarantees the same effects as executing them in
lexical order.
Operations all result either in success or failure.
If multiple operations are present in a single request, then a
result of failure from any operation MUST abort the sequence of
operations, causing the subsequent operations to be ignored.'
This will return None on success and raise Exceptions on error
"""
for u in update:
ctx = QueryContext(graph)
ctx.prologue = u.prologue
if initBindings:
for k, v in initBindings.iteritems():
if not isinstance(k, Variable):
k = Variable(k)
ctx[k] = v
# ctx.push() # nescessary?
try:
if u.name == 'Load':
evalLoad(ctx, u)
elif u.name == 'Clear':
evalClear(ctx, u)
elif u.name == 'Drop':
evalDrop(ctx, u)
elif u.name == 'Create':
evalCreate(ctx, u)
elif u.name == 'Add':
evalAdd(ctx, u)
elif u.name == 'Move':
evalMove(ctx, u)
elif u.name == 'Copy':
evalCopy(ctx, u)
elif u.name == 'InsertData':
evalInsertData(ctx, u)
elif u.name == 'DeleteData':
evalDeleteData(ctx, u)
elif u.name == 'DeleteWhere':
evalDeleteWhere(ctx, u)
elif u.name == 'Modify':
evalModify(ctx, u)
else:
raise Exception('Unknown update operation: %s' % (u,))
except:
if not u.silent:
raise
|
DmPo/Schemaorg_CivicOS | lib/html5lib/filters/inject_meta_charset.py | <reponame>DmPo/Schemaorg_CivicOS<filename>lib/html5lib/filters/inject_meta_charset.py
import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == u"head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == u"meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace,name),value in token["data"].iteritems():
if namespace != None:
continue
elif name.lower() == u'charset':
token["data"][(namespace,name)] = self.encoding
meta_found = True
break
elif name == u'http-equiv' and value.lower() == u'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, u"content") in token["data"]:
token["data"][(None, u"content")] = u'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == u"head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": u"head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": u"meta",
"data": {(None, u"charset"): self.encoding}}
yield {"type": "EndTag", "name": u"head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == u"head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": u"meta",
"data": {(None, u"charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
|
DmPo/Schemaorg_CivicOS | lib/rdflib_jsonld/__init__.py | """
"""
__version__ = "0.3-dev"
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/nquads.py | """
This is a rdflib plugin for parsing NQuad files into Conjunctive
graphs that can be used and queried. The store that backs the graph
*must* be able to handle contexts.
>>> from rdflib import ConjunctiveGraph, URIRef, Namespace
>>> g = ConjunctiveGraph()
>>> data = open("test/nquads.rdflib/example.nquads", "rb")
>>> g.parse(data, format="nquads") # doctest:+ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> assert len(g.store) == 449
>>> # There should be 16 separate contexts
>>> assert len([x for x in g.store.contexts()]) == 16
>>> # is the name of entity E10009 "Arco Publications"?
>>> # (in graph http://bibliographica.org/entity/E10009)
>>> # Looking for:
>>> # <http://bibliographica.org/entity/E10009>
>>> # <http://xmlns.com/foaf/0.1/name>
>>> # "Arco Publications"
>>> # <http://bibliographica.org/entity/E10009>
>>> s = URIRef("http://bibliographica.org/entity/E10009")
>>> FOAF = Namespace("http://xmlns.com/foaf/0.1/")
>>> assert(g.value(s, FOAF.name).eq("Arco Publications"))
"""
from codecs import getreader
from rdflib.py3compat import b
from rdflib import ConjunctiveGraph
# Build up from the NTriples parser:
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.plugins.parsers.ntriples import r_tail
from rdflib.plugins.parsers.ntriples import r_wspace
from rdflib.plugins.parsers.ntriples import r_wspaces
__all__ = ['NQuadsParser']
class NQuadsParser(NTriplesParser):
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
assert sink.store.context_aware, ("NQuadsParser must be given"
" a context aware store.")
self.sink = ConjunctiveGraph(store=sink.store)
source = inputsource.getByteStream()
if not hasattr(source, 'read'):
raise ParseError("Item to parse must be a file-like object.")
source = getreader('utf-8')(source)
self.file = source
self.buffer = ''
while True:
self.line = __line = self.readline()
if self.line is None:
break
try:
self.parseline()
except ParseError, msg:
raise ParseError("Invalid line (%s):\n%r" % (msg, __line))
return self.sink
def parseline(self):
self.eat(r_wspace)
if (not self.line) or self.line.startswith(('#')):
return # The line is empty or a comment
subject = self.subject()
self.eat(r_wspace)
predicate = self.predicate()
self.eat(r_wspace)
obj = self.object()
self.eat(r_wspace)
context = self.uriref() or self.nodeid()
self.eat(r_tail)
if self.line:
raise ParseError("Trailing garbage")
# Must have a context aware store - add on a normal Graph
# discards anything where the ctx != graph.identifier
self.sink.get_context(context).add((subject, predicate, obj))
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/host/atom.py | <reponame>DmPo/Schemaorg_CivicOS
# -*- coding: utf-8 -*-
"""
Simple transfomer for Atom: the C{@typeof=""} is added to the C{<entry>} element (unless something is already there).
@summary: Add a top "about" to <head> and <body>
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: <NAME>, <EMAIL>
"""
"""
$Id: atom.py,v 1.3 2013-01-07 13:03:16 ivan Exp $
$Date: 2013-01-07 13:03:16 $
"""
def atom_add_entry_type(node, state) :
"""
@param node: the current node that could be modified
@param state: current state
@type state: L{Execution context<pyRdfa.state.ExecutionContext>}
"""
def res_set(node) :
return True in [ node.hasAttribute(a) for a in ["resource", "about", "href", "src"] ]
if node.tagName == "entry" and not res_set(node) and node.hasAttribute("typeof") == False :
node.setAttribute("typeof","")
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/transform/lite.py | <filename>lib/rdflib/plugins/parsers/pyRdfa/transform/lite.py
# -*- coding: utf-8 -*-
"""
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: <NAME>, <EMAIL>
@version: $Id: lite.py,v 1.11 2013-09-26 16:37:54 ivan Exp $
$Date: 2013-09-26 16:37:54 $
"""
from ..host import HostLanguage
non_lite_attributes = ["about","inlist","datatype","rev","rel","content"]
non_lite_attributes_html = ["about","inlist","datatype","rev"]
def lite_prune(top, options, state) :
"""
This is a misnomer. The current version does not remove anything from the tree, just generates warnings as for the
usage of non-lite attributes. A more aggressive version would mean to remove those attributes, but that would,
in fact, define an RDFa Lite conformance level in the parser, which is against the WG decisions. So this should
not be done; the corresponding commands are commented in the code below...
@param top: a DOM node for the top level element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
def generate_warning(node, attr) :
if attr == "rel" :
msg = "Attribute @rel should not be used in RDFa Lite (consider using @property)"
elif attr == "about" :
msg = "Attribute @about should not be used in RDFa Lite (consider using a <link> element with @href or @resource)"
else :
msg = "Attribute @%s should not be used in RDFa Lite" % attr
options.add_warning(msg, node=node)
def remove_attrs(node) :
from ..termorcurie import termname
# first the @content; this has a special treatment
# there are some extras to check for HTML dialects
if options.host_language in [ HostLanguage.html5, HostLanguage.xhtml5, HostLanguage.xhtml ] :
if node.tagName != "meta" and node.hasAttribute("content") :
generate_warning(node, "content")
# node.removeAttribute("content")
if node.tagName != "link" and node.hasAttribute("rel") :
# Unfortunately, this has to be checked separately and run-time for <link> where @rel is allowed for non-RDFa purposes...
# Additional complication: @rel is allowed in an <a> element, for example, if used as a pure term and not as a URI or CURIE
if node.tagName == "a" :
vals = node.getAttribute("rel").strip().split()
if len(vals) != 0 :
final_vals = [ v for v in vals if not termname.match(v) ]
if len(final_vals) != 0 :
generate_warning(node, "rel")
else :
generate_warning(node, "rel")
for attr in non_lite_attributes_html :
if node.hasAttribute(attr) :
generate_warning(node, attr)
# node.removeAttribute(attr)
else :
for attr in non_lite_attributes :
if node.hasAttribute(attr) :
generate_warning(node, attr)
# node.removeAttribute(attr)
remove_attrs(top)
for n in top.childNodes :
if n.nodeType == top.ELEMENT_NODE :
lite_prune(n, options, state)
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/rdfs/__init__.py | <filename>lib/rdflib/plugins/parsers/pyRdfa/rdfs/__init__.py
# -*- coding: utf-8 -*-
"""
Separate module to handle vocabulary expansions. The L{cache} module takes care of caching vocabulary graphs; the L{process}
module takes care of the expansion itself.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.4 2012/08/20 13:15:28 ivan Exp $ $Date: 2012/08/20 13:15:28 $
"""
import sys
import os
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
from .. import RDFaError, pyRdfaError
from .. import ns_rdfa, ns_xsd, ns_distill
VocabCachingInfo = ns_distill["VocabCachingInfo"]
# Error message texts
err_outdated_cache = "Vocab document <%s> could not be dereferenced; using possibly outdated cache"
err_unreachable_vocab = "Vocab document <%s> could not be dereferenced"
err_unparsable_Turtle_vocab = "Could not parse vocab in Turtle at <%s> (%s)"
err_unparsable_xml_vocab = "Could not parse vocab in RDF/XML at <%s> (%s)"
err_unparsable_ntriples_vocab = "Could not parse vocab in N-Triple at <%s> (%s)"
err_unparsable_rdfa_vocab = "Could not parse vocab in RDFa at <%s> (%s)"
err_unrecognised_vocab_type = "Unrecognized media type for the vocab file <%s>: '%s'"
|
DmPo/Schemaorg_CivicOS | scripts/acks.py | #!/usr/bin/env python
import sys
import re
# Takes a list of property URLs and generates property-acks.rdfa markup
string = ""
with open(sys.argv[1], 'r') as file:
for line in file.readlines():
if re.match("^\s",line):
pass
else:
line = line.rstrip("\n")
string += """<div typeof="rdf:Property" resource="%s"><link property="dc:source" href="http://www.w3.org/wiki/WebSchemas/SchemaDotOrgSources#source_GoodRelationsTerms"/></div>\n""" % line
# string = string + '<a href="%s">%s</a>, ' % (line, line)
print "<div>\n%s</div>\n\n" % string
|
DmPo/Schemaorg_CivicOS | lib/rdflib/tools/rdfs2dot.py | <reponame>DmPo/Schemaorg_CivicOS
"""
A commandline tool for drawing RDFS Class diagrams in Graphviz DOT
format
You can draw the graph of an RDFS file directly:
.. code-block: bash
rdf2dot my_rdfs_file.rdf | dot -Tpng | display
"""
import rdflib.extras.cmdlineutils
import sys
import itertools
import collections
from rdflib import XSD, RDF, RDFS
XSDTERMS = [XSD[x] for x in (
"anyURI", "base64Binary", "boolean", "byte", "date", "dateTime", "decimal",
"double", "duration", "float", "gDay", "gMonth", "gMonthDay", "gYear",
"gYearMonth", "hexBinary", "ID", "IDREF", "IDREFS", "int", "integer",
"language", "long", "Name", "NCName", "negativeInteger", "NMTOKEN",
"NMTOKENS", "nonNegativeInteger", "nonPositiveInteger", "normalizedString",
"positiveInteger", "QName", "short", "string", "time", "token",
"unsignedByte", "unsignedInt", "unsignedLong", "unsignedShort")]
EDGECOLOR = "blue"
NODECOLOR = "black"
ISACOLOR = "black"
def rdfs2dot(g, stream, opts={}):
"""
Convert the RDFS schema in a graph
writes the dot output to the stream
"""
fields = collections.defaultdict(set)
nodes = {}
def node(x):
if x not in nodes:
nodes[x] = "node%d" % len(nodes)
return nodes[x]
def label(x, g):
l = g.value(x, RDFS.label)
if l is None:
try:
l = g.namespace_manager.compute_qname(x)[2]
except:
pass # bnodes and some weird URIs cannot be split
return l
stream.write(u"digraph { \n node [ fontname=\"DejaVu Sans\" ] ; \n")
for x in g.subjects(RDF.type, RDFS.Class):
n = node(x)
for x, y in g.subject_objects(RDFS.subClassOf):
x = node(x)
y = node(y)
stream.write(u"\t%s -> %s [ color=%s ] ;\n" % (y, x, ISACOLOR))
for x in g.subjects(RDF.type, RDF.Property):
for a, b in itertools.product(
g.objects(x, RDFS.domain), g.objects(x, RDFS.range)):
if b in XSDTERMS or b == RDFS.Literal:
l = label(b, g)
if b == RDFS.Literal:
l = "literal"
fields[node(a)].add((label(x, g), l))
else:
# if a in nodes and b in nodes:
stream.write(
"\t%s -> %s [ color=%s, label=\"%s\" ];\n" % (
node(a), node(b), EDGECOLOR, label(x, g)))
for u, n in nodes.items():
stream.write(u"# %s %s\n" % (u, n))
f = [u"<tr><td align='left'>%s</td><td>%s</td></tr>" %
x for x in sorted(fields[n])]
opstr = u"%s [ shape=none, color=%s label=< <table color='#666666'" + \
u" cellborder=\"0\" cellspacing='0' border=\"1\"><tr>" + \
u"<td colspan=\"2\" bgcolor='grey'><B>%s</B></td>" + \
u"</tr>%s</table> > ] \n"
stream.write(opstr % (n, NODECOLOR, label(u, g), u"".join(f)))
stream.write("}\n")
def _help():
sys.stderr.write("""
rdfs2dot.py [-f <format>] files...
Read RDF files given on STDOUT, writes a graph of the RDFS schema in
DOT language to stdout
-f specifies parser to use, if not given,
""")
def main():
rdflib.extras.cmdlineutils.main(rdfs2dot, _help)
if __name__ == '__main__':
main()
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/embeddedRDF.py | # -*- coding: utf-8 -*-
"""
Extracting possible embedded RDF/XML content from the file and parse it separately into the Graph. This is used, for example
by U{SVG 1.2 Tiny<http://www.w3.org/TR/SVGMobile12/>}.
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: <NAME>, <EMAIL>
@version: $Id: embeddedRDF.py,v 1.15 2012/11/16 17:51:53 ivan Exp $
"""
# Python 3 foolproof way...
try :
from io import StringIO
except :
from StringIO import StringIO
from .host import HostLanguage, accept_embedded_rdf_xml, accept_embedded_turtle
from .utils import return_XML
import re, sys
def handle_embeddedRDF(node, graph, state) :
"""
Handles embedded RDF. There are two possibilities:
- the file is one of the XML dialects that allows for an embedded RDF/XML portion. See the L{host.accept_embedded_rdf_xml} for those (a typical example is SVG).
- the file is HTML and there is a turtle portion in the C{<script>} element with type text/turtle.
@param node: a DOM node for the top level element
@param graph: target rdf graph
@type graph: RDFLib's Graph object instance
@param state: the inherited state (namespaces, lang, etc)
@type state: L{state.ExecutionContext}
@return: whether an RDF/XML or turtle content has been detected or not. If TRUE, the RDFa processing should not occur on the node and its descendents.
@rtype: Boolean
"""
#def _get_prefixes_in_turtle() :
# retval = ""
# for key in state.term_or_curie.ns :
# retval += "@prefix %s: <%s> .\n" % (key, state.term_or_curie.ns[key])
# retval += '\n'
# return retval
# This feature is optional!
def _get_literal(Pnode):
"""
Get the full text
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType in [node.TEXT_NODE, node.CDATA_SECTION_NODE] :
rc = rc + node.data
# Sigh... the HTML5 parser does not recognize the CDATA escapes, ie, it just passes on the <![CDATA[ and ]]> strings:-(
return rc.replace("<![CDATA[","").replace("]]>","")
if state.options.embedded_rdf :
# Embedded turtle, per the latest Turtle draft
if state.options.host_language in accept_embedded_turtle and node.nodeName.lower() == "script" :
if node.hasAttribute("type") and node.getAttribute("type") == "text/turtle" :
#prefixes = _get_prefixes_in_turtle()
#content = _get_literal(node)
#rdf = StringIO(prefixes + content)
content = _get_literal(node)
rdf = StringIO(content)
try :
graph.parse(rdf, format="n3", publicID = state.base)
state.options.add_info("The output graph includes triples coming from an embedded Turtle script")
except :
(type,value,traceback) = sys.exc_info()
state.options.add_error("Embedded Turtle content could not be parsed (problems with %s?); ignored" % value)
return True
elif state.options.host_language in accept_embedded_rdf_xml and node.localName == "RDF" and node.namespaceURI == "http://www.w3.org/1999/02/22-rdf-syntax-ns#" :
rdf = StringIO(return_XML(state, node))
try :
graph.parse(rdf)
state.options.add_info("The output graph includes triples coming from an embedded RDF/XML subtree")
except :
(type,value,traceback) = sys.exc_info()
state.options.add_error("Embedded RDF/XML content could not parsed (problems with %s?); ignored" % value)
return True
else :
return False
else :
return False
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/sparql/compat.py | """
Function/methods to help supporting 2.5-2.7
"""
# Collection ABCs
try:
from collections import Mapping, MutableMapping # was added in 2.6
except:
from UserDict import DictMixin
class MutableMapping(DictMixin):
def keys(self):
return list(self)
Mapping = MutableMapping
# OrderedDict
try:
from collections import OrderedDict # was added in 2.7
except ImportError:
from ordereddict import OrderedDict # extra module
import sys
if sys.version_info[:2] < (2, 7):
from decimal import Decimal
# Pre-2.7 decimal and float did not compare correctly
def _numericKey(n):
if isinstance(n, Decimal):
return float(n)
else:
return n
def num_max(*args, **kwargs):
kwargs["key"] = _numericKey
return max(*args, **kwargs)
def num_min(*args, **kwargs):
kwargs["key"] = _numericKey
return min(*args, **kwargs)
else:
num_max = max
num_min = min
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/trix.py | """
A TriX parser for RDFLib
"""
from rdflib.namespace import Namespace
from rdflib.term import URIRef
from rdflib.term import BNode
from rdflib.term import Literal
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.exceptions import ParserError
from rdflib.parser import Parser
from xml.sax.saxutils import handler
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
__all__ = ['create_parser', 'TriXHandler', 'TriXParser']
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
XMLNS = Namespace("http://www.w3.org/XML/1998/namespace")
class TriXHandler(handler.ContentHandler):
"""An Sax Handler for TriX. See http://sw.nokia.com/trix/"""
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
self.reset()
def reset(self):
self.bnode = {}
self.graph = None
self.triple = None
self.state = 0
self.lang = None
self.datatype = None
# ContentHandler methods
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
pass
def startPrefixMapping(self, prefix, namespace):
pass
def endPrefixMapping(self, prefix):
pass
def startElementNS(self, name, qname, attrs):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
% (name[0], TRIXNS))
if name[1] == "TriX":
if self.state == 0:
self.state = 1
else:
self.error("Unexpected TriX element")
elif name[1] == "graph":
if self.state == 1:
self.state = 2
else:
self.error("Unexpected graph element")
elif name[1] == "uri":
if self.state == 2:
# the context uri
self.state = 3
elif self.state == 4:
# part of a triple
pass
else:
self.error("Unexpected uri element")
elif name[1] == "triple":
if self.state == 2:
if self.graph is None:
# anonymous graph, create one with random bnode id
self.graph = Graph(store=self.store)
# start of a triple
self.triple = []
self.state = 4
else:
self.error("Unexpected triple element")
elif name[1] == "typedLiteral":
if self.state == 4:
# part of triple
self.lang = None
self.datatype = None
try:
self.lang = attrs.getValue((unicode(XMLNS), u"lang"))
except:
# language not required - ignore
pass
try:
self.datatype = attrs.getValueByQName(u"datatype")
except KeyError:
self.error("No required attribute 'datatype'")
else:
self.error("Unexpected typedLiteral element")
elif name[1] == "plainLiteral":
if self.state == 4:
# part of triple
self.lang = None
self.datatype = None
try:
self.lang = attrs.getValue((unicode(XMLNS), u"lang"))
except:
# language not required - ignore
pass
else:
self.error("Unexpected plainLiteral element")
elif name[1] == "id":
if self.state == 2:
# the context uri
self.state = 3
elif self.state == 4:
# part of triple
pass
else:
self.error("Unexpected id element")
else:
self.error("Unknown element %s in TriX namespace" % name[1])
self.chars = ""
def endElementNS(self, name, qname):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
% (name[0], TRIXNS))
if name[1] == "uri":
if self.state == 3:
self.graph = Graph(store=self.store,
identifier=URIRef(self.chars.strip()))
self.state = 2
elif self.state == 4:
self.triple += [URIRef(self.chars.strip())]
else:
self.error(
"Illegal internal self.state - This should never " +
"happen if the SAX parser ensures XML syntax correctness")
elif name[1] == "id":
if self.state == 3:
self.graph = Graph(self.store, identifier=self.get_bnode(
self.chars.strip()))
self.state = 2
elif self.state == 4:
self.triple += [self.get_bnode(self.chars.strip())]
else:
self.error(
"Illegal internal self.state - This should never " +
"happen if the SAX parser ensures XML syntax correctness")
elif name[1] == "plainLiteral" or name[1] == "typedLiteral":
if self.state == 4:
self.triple += [Literal(
self.chars, lang=self.lang, datatype=self.datatype)]
else:
self.error(
"This should never happen if the SAX parser " +
"ensures XML syntax correctness")
elif name[1] == "triple":
if self.state == 4:
if len(self.triple) != 3:
self.error("Triple has wrong length, got %d elements: %s" %
(len(self.triple), self.triple))
self.graph.add(self.triple)
# self.store.store.add(self.triple,context=self.graph)
# self.store.addN([self.triple+[self.graph]])
self.state = 2
else:
self.error(
"This should never happen if the SAX parser " +
"ensures XML syntax correctness")
elif name[1] == "graph":
self.graph = None
self.state = 1
elif name[1] == "TriX":
self.state = 0
else:
self.error("Unexpected close element")
def get_bnode(self, label):
if self.preserve_bnode_ids:
bn = BNode(label)
else:
if label in self.bnode:
bn = self.bnode[label]
else:
bn = BNode(label)
self.bnode[label] = bn
return bn
def characters(self, content):
self.chars += content
def ignorableWhitespace(self, content):
pass
def processingInstruction(self, target, data):
pass
def error(self, message):
locator = self.locator
info = "%s:%s:%s: " % (
locator.getSystemId(),
locator.getLineNumber(),
locator.getColumnNumber())
raise ParserError(info + message)
def create_parser(store):
parser = make_parser()
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
parser.start_namespace_decl(
"xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
trix = TriXHandler(store)
parser.setContentHandler(trix)
parser.setErrorHandler(ErrorHandler())
return parser
class TriXParser(Parser):
"""A parser for TriX. See http://sw.nokia.com/trix/"""
def __init__(self):
pass
def parse(self, source, sink, **args):
assert sink.store.context_aware, (
"TriXParser must be given a context aware store.")
self._parser = create_parser(sink.store)
content_handler = self._parser.getContentHandler()
preserve_bnode_ids = args.get("preserve_bnode_ids", None)
if preserve_bnode_ids is not None:
content_handler.preserve_bnode_ids = preserve_bnode_ids
# We're only using it once now
# content_handler.reset()
# self._parser.reset()
self._parser.parse(source)
|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/test_serializer.py | <reponame>DmPo/Schemaorg_CivicOS
import os
import unittest
from support import html5lib_test_files
try:
import json
except ImportError:
import simplejson as json
import html5lib
from html5lib import html5parser, serializer, constants
from html5lib.treewalkers._base import TreeWalker
optionals_loaded = []
try:
from lxml import etree
optionals_loaded.append("lxml")
except ImportError:
pass
default_namespace = constants.namespaces["html"]
class JsonWalker(TreeWalker):
def __iter__(self):
for token in self.tree:
type = token[0]
if type == "StartTag":
if len(token) == 4:
namespace, name, attrib = token[1:4]
else:
namespace = default_namespace
name, attrib = token[1:3]
yield self.startTag(namespace, name, self._convertAttrib(attrib))
elif type == "EndTag":
if len(token) == 3:
namespace, name = token[1:3]
else:
namespace = default_namespace
name = token[1]
yield self.endTag(namespace, name)
elif type == "EmptyTag":
if len(token) == 4:
namespace, name, attrib = token[1:]
else:
namespace = default_namespace
name, attrib = token[1:]
for token in self.emptyTag(namespace, name, self._convertAttrib(attrib)):
yield token
elif type == "Comment":
yield self.comment(token[1])
elif type in ("Characters", "SpaceCharacters"):
for token in self.text(token[1]):
yield token
elif type == "Doctype":
if len(token) == 4:
yield self.doctype(token[1], token[2], token[3])
elif len(token) == 3:
yield self.doctype(token[1], token[2])
else:
yield self.doctype(token[1])
else:
raise ValueError("Unknown token type: " + type)
def _convertAttrib(self, attribs):
"""html5lib tree-walkers use a dict of (namespace, name): value for
attributes, but JSON cannot represent this. Convert from the format
in the serializer tests (a list of dicts with "namespace", "name",
and "value" as keys) to html5lib's tree-walker format."""
attrs = {}
for attrib in attribs:
name = (attrib["namespace"], attrib["name"])
assert(name not in attrs)
attrs[name] = attrib["value"]
return attrs
def serialize_html(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.HTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def serialize_xhtml(input, options):
options = dict([(str(k),v) for k,v in options.iteritems()])
return serializer.XHTMLSerializer(**options).render(JsonWalker(input),options.get("encoding",None))
def make_test(input, expected, xhtml, options):
result = serialize_html(input, options)
if len(expected) == 1:
assert expected[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:False\n%s"%(expected[0], result, str(options))
elif result not in expected:
assert False, "Expected: %s, Received: %s" % (expected, result)
if not xhtml:
return
result = serialize_xhtml(input, options)
if len(xhtml) == 1:
assert xhtml[0] == result, "Expected:\n%s\nActual:\n%s\nOptions\nxhtml:True\n%s"%(xhtml[0], result, str(options))
elif result not in xhtml:
assert False, "Expected: %s, Received: %s" % (xhtml, result)
class EncodingTestCase(unittest.TestCase):
def throwsWithLatin1(self, input):
self.assertRaises(UnicodeEncodeError, serialize_html, input, {"encoding": "iso-8859-1"})
def testDoctypeName(self):
self.throwsWithLatin1([["Doctype", u"\u0101"]])
def testDoctypePublicId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"\u0101"]])
def testDoctypeSystemId(self):
self.throwsWithLatin1([["Doctype", u"potato", u"potato", u"\u0101"]])
def testCdataCharacters(self):
self.assertEquals("<style>ā", serialize_html([["StartTag", "http://www.w3.org/1999/xhtml", "style", {}],
["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testCharacters(self):
self.assertEquals("ā", serialize_html([["Characters", u"\u0101"]],
{"encoding": "iso-8859-1"}))
def testStartTagName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testEmptyTagName(self):
self.throwsWithLatin1([["EmptyTag", u"http://www.w3.org/1999/xhtml", u"\u0101", []]])
def testAttributeName(self):
self.throwsWithLatin1([["StartTag", u"http://www.w3.org/1999/xhtml", u"span", [{"namespace": None, "name": u"\u0101", "value": u"potato"}]]])
def testAttributeValue(self):
self.assertEquals("<span potato=ā>", serialize_html([["StartTag", u"http://www.w3.org/1999/xhtml", u"span",
[{"namespace": None, "name": u"potato", "value": u"\u0101"}]]],
{"encoding": "iso-8859-1"}))
def testEndTagName(self):
self.throwsWithLatin1([["EndTag", u"http://www.w3.org/1999/xhtml", u"\u0101"]])
def testComment(self):
self.throwsWithLatin1([["Comment", u"\u0101"]])
if "lxml" in optionals_loaded:
class LxmlTestCase(unittest.TestCase):
def setUp(self):
self.parser = etree.XMLParser(resolve_entities=False)
self.treewalker = html5lib.getTreeWalker("lxml")
self.serializer = serializer.HTMLSerializer()
def testEntityReplacement(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>\u03B2</html>""", result)
def testEntityXML(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>></html>""", result)
def testEntityNoResolve(self):
doc = """<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>"""
tree = etree.fromstring(doc, parser = self.parser).getroottree()
result = serializer.serialize(tree, tree="lxml", omit_optional_tags=False,
resolve_entities=False)
self.assertEquals(u"""<!DOCTYPE html SYSTEM "about:legacy-compat"><html>β</html>""", result)
def test_serializer():
for filename in html5lib_test_files('serializer', '*.test'):
tests = json.load(file(filename))
test_name = os.path.basename(filename).replace('.test','')
for index, test in enumerate(tests['tests']):
xhtml = test.get("xhtml", test["expected"])
if test_name == 'optionaltags':
xhtml = None
yield make_test, test["input"], test["expected"], xhtml, test.get("options", {})
|
DmPo/Schemaorg_CivicOS | lib/rdflib/extras/describer.py | <reponame>DmPo/Schemaorg_CivicOS
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
from rdflib import py3compat
__doc__ = py3compat.format_doctest_out("""
A Describer is a stateful utility for creating RDF statements in a
semi-declarative manner. It has methods for creating literal values, rel and
rev resource relations (somewhat resembling RDFa).
The `rel` and ``rev`` methods return a context manager which sets the current
about to the referenced resource for the context scope (for use with the
``with`` statement).
Full example in the ``to_rdf`` method below::
>>> import datetime
>>> from rdflib.graph import Graph
>>> from rdflib.namespace import Namespace, RDFS, FOAF
>>>
>>> ORG_URI = "http://example.org/"
>>>
>>> CV = Namespace("http://purl.org/captsolo/resume-rdf/0.2/cv#")
>>>
>>> class Person(object):
... def __init__(self):
... self.first_name = %(u)s"Some"
... self.last_name = %(u)s"Body"
... self.username = "some1"
... self.presentation = %(u)s"Just a Python & RDF hacker."
... self.image = "/images/persons/" + self.username + ".jpg"
... self.site = "http://example.net/"
... self.start_date = datetime.date(2009, 9, 4)
... def get_full_name(self):
... return %(u)s" ".join([self.first_name, self.last_name])
... def get_absolute_url(self):
... return "/persons/" + self.username
... def get_thumbnail_url(self):
... return self.image.replace('.jpg', '-thumb.jpg')
...
... def to_rdf(self):
... graph = Graph()
... graph.bind('foaf', FOAF)
... graph.bind('cv', CV)
... lang = 'en'
... d = Describer(graph, base=ORG_URI)
... d.about(self.get_absolute_url()+'#person')
... d.rdftype(FOAF.Person)
... d.value(FOAF.name, self.get_full_name())
... d.value(FOAF.firstName, self.first_name)
... d.value(FOAF.surname, self.last_name)
... d.rel(FOAF.homepage, self.site)
... d.value(RDFS.comment, self.presentation, lang=lang)
... with d.rel(FOAF.depiction, self.image):
... d.rdftype(FOAF.Image)
... d.rel(FOAF.thumbnail, self.get_thumbnail_url())
... with d.rev(CV.aboutPerson):
... d.rdftype(CV.CV)
... with d.rel(CV.hasWorkHistory):
... d.value(CV.startDate, self.start_date)
... d.rel(CV.employedIn, ORG_URI+"#company")
... return graph
...
>>> person_graph = Person().to_rdf()
>>> expected = Graph().parse(data='''<?xml version="1.0" encoding="utf-8"?>
... <rdf:RDF
... xmlns:foaf="http://xmlns.com/foaf/0.1/"
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:cv="http://purl.org/captsolo/resume-rdf/0.2/cv#"
... xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#">
... <foaf:Person rdf:about="http://example.org/persons/some1#person">
... <foaf:name>Some Body</foaf:name>
... <foaf:firstName>Some</foaf:firstName>
... <foaf:surname>Body</foaf:surname>
... <foaf:depiction>
... <foaf:Image
... rdf:about=
... "http://example.org/images/persons/some1.jpg">
... <foaf:thumbnail
... rdf:resource=
... "http://example.org/images/persons/some1-thumb.jpg"/>
... </foaf:Image>
... </foaf:depiction>
... <rdfs:comment xml:lang="en">
... Just a Python & RDF hacker.
... </rdfs:comment>
... <foaf:homepage rdf:resource="http://example.net/"/>
... </foaf:Person>
... <cv:CV>
... <cv:aboutPerson
... rdf:resource="http://example.org/persons/some1#person">
... </cv:aboutPerson>
... <cv:hasWorkHistory>
... <rdf:Description>
... <cv:startDate
... rdf:datatype="http://www.w3.org/2001/XMLSchema#date"
... >2009-09-04</cv:startDate>
... <cv:employedIn rdf:resource="http://example.org/#company"/>
... </rdf:Description>
... </cv:hasWorkHistory>
... </cv:CV>
... </rdf:RDF>
... ''')
>>>
>>> from rdflib.compare import isomorphic
>>> isomorphic(person_graph, expected) #doctest: +SKIP
True
""")
from contextlib import contextmanager
from rdflib.graph import Graph
from rdflib.namespace import RDF
from rdflib.term import BNode
from rdflib.term import Identifier
from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.py3compat import format_doctest_out
class Describer(object):
def __init__(self, graph=None, about=None, base=None):
if graph is None:
graph = Graph()
self.graph = graph
self.base = base
self._subjects = []
self.about(about or None)
@format_doctest_out
def about(self, subject, **kws):
"""
Sets the current subject. Will convert the given object into an
``URIRef`` if it's not an ``Identifier``.
Usage::
>>> d = Describer()
>>> d._current() #doctest: +ELLIPSIS
rdflib.term.BNode(...)
>>> d.about("http://example.org/")
>>> d._current()
rdflib.term.URIRef(%(u)s'http://example.org/')
"""
kws.setdefault('base', self.base)
subject = cast_identifier(subject, **kws)
if self._subjects:
self._subjects[-1] = subject
else:
self._subjects.append(subject)
@format_doctest_out
def value(self, p, v, **kws):
"""
Set a literal value for the given property. Will cast the value to an
``Literal`` if a plain literal is given.
Usage::
>>> from rdflib import URIRef
>>> from rdflib.namespace import RDF, RDFS
>>> d = Describer(about="http://example.org/")
>>> d.value(RDFS.label, "Example")
>>> d.graph.value(URIRef('http://example.org/'), RDFS.label)
rdflib.term.Literal(%(u)s'Example')
"""
v = cast_value(v, **kws)
self.graph.add((self._current(), p, v))
@format_doctest_out
def rel(self, p, o=None, **kws):
"""Set an object for the given property. Will convert the given object
into an ``URIRef`` if it's not an ``Identifier``. If none is given, a
new ``BNode`` is used.
Returns a context manager for use in a ``with`` block, within which the
given object is used as current subject.
Usage::
>>> from rdflib import URIRef
>>> from rdflib.namespace import RDF, RDFS
>>> d = Describer(about="/", base="http://example.org/")
>>> _ctxt = d.rel(RDFS.seeAlso, "/about")
>>> d.graph.value(URIRef('http://example.org/'), RDFS.seeAlso)
rdflib.term.URIRef(%(u)s'http://example.org/about')
>>> with d.rel(RDFS.seeAlso, "/more"):
... d.value(RDFS.label, "More")
>>> (URIRef('http://example.org/'), RDFS.seeAlso,
... URIRef('http://example.org/more')) in d.graph
True
>>> d.graph.value(URIRef('http://example.org/more'), RDFS.label)
rdflib.term.Literal(%(u)s'More')
"""
kws.setdefault('base', self.base)
p = cast_identifier(p)
o = cast_identifier(o, **kws)
self.graph.add((self._current(), p, o))
return self._subject_stack(o)
@format_doctest_out
def rev(self, p, s=None, **kws):
"""
Same as ``rel``, but uses current subject as *object* of the relation.
The given resource is still used as subject in the returned context
manager.
Usage::
>>> from rdflib import URIRef
>>> from rdflib.namespace import RDF, RDFS
>>> d = Describer(about="http://example.org/")
>>> with d.rev(RDFS.seeAlso, "http://example.net/"):
... d.value(RDFS.label, "Net")
>>> (URIRef('http://example.net/'), RDFS.seeAlso,
... URIRef('http://example.org/')) in d.graph
True
>>> d.graph.value(URIRef('http://example.net/'), RDFS.label)
rdflib.term.Literal(%(u)s'Net')
"""
kws.setdefault('base', self.base)
p = cast_identifier(p)
s = cast_identifier(s, **kws)
self.graph.add((s, p, self._current()))
return self._subject_stack(s)
def rdftype(self, t):
"""
Shorthand for setting rdf:type of the current subject.
Usage::
>>> from rdflib import URIRef
>>> from rdflib.namespace import RDF, RDFS
>>> d = Describer(about="http://example.org/")
>>> d.rdftype(RDFS.Resource)
>>> (URIRef('http://example.org/'),
... RDF.type, RDFS.Resource) in d.graph
True
"""
self.graph.add((self._current(), RDF.type, t))
def _current(self):
return self._subjects[-1]
@contextmanager
def _subject_stack(self, subject):
self._subjects.append(subject)
yield None
self._subjects.pop()
def cast_value(v, **kws):
if not isinstance(v, Literal):
v = Literal(v, **kws)
return v
def cast_identifier(ref, **kws):
ref = ref or BNode()
if not isinstance(ref, Identifier):
ref = URIRef(ref, **kws)
return ref
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/rdfs/process.py | <reponame>DmPo/Schemaorg_CivicOS
# -*- coding: utf-8 -*-
"""
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: process.py,v 1.7 2012-03-23 14:06:38 ivan Exp $ $Date: 2012-03-23 14:06:38 $
"""
import sys
import os
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
ns_owl = Namespace("http://www.w3.org/2002/07/owl#")
from ..host import MediaTypes
from ..utils import URIOpener
from . import err_outdated_cache
from . import err_unreachable_vocab
from . import err_unparsable_Turtle_vocab
from . import err_unparsable_xml_vocab
from . import err_unparsable_ntriples_vocab
from . import err_unparsable_rdfa_vocab
from . import err_unrecognised_vocab_type
from .. import VocabReferenceError
from .cache import CachedVocab
from .. import HTTPError, RDFaError
#############################################################################################################
def return_graph(uri, options, newCache = False) :
"""Parse a file, and return an RDFLib Graph. The URI's content type is checked and either one of
RDFLib's parsers is invoked (for the Turtle, RDF/XML, and N Triple cases) or a separate RDFa processing is invoked
on the RDFa content.
The Accept header of the HTTP request gives a preference to Turtle, followed by RDF/XML and then HTML (RDFa), in case content negotiation is used.
This function is used to retreive the vocabulary file and turn it into an RDFLib graph.
@param uri: URI for the graph
@param options: used as a place where warnings can be sent
@param newCache: in case this is used with caching, whether a new cache is generated; that modifies the warning text
@return: A tuple consisting of an RDFLib Graph instance and an expiration date); None if the dereferencing or the parsing was unsuccessful
"""
def return_to_cache(msg) :
if newCache :
options.add_warning(err_unreachable_vocab % uri, warning_type=VocabReferenceError)
else :
options.add_warning(err_outdated_cache % uri, warning_type=VocabReferenceError)
retval = None
expiration_date = None
content = None
try :
content = URIOpener(uri,
{'Accept' : 'text/html;q=0.8, application/xhtml+xml;q=0.8, text/turtle;q=1.0, application/rdf+xml;q=0.9'})
except HTTPError :
(type,value,traceback) = sys.exc_info()
return_to_cache(value)
return (None,None)
except RDFaError :
(type,value,traceback) = sys.exc_info()
return_to_cache(value)
return (None,None)
except Exception :
(type,value,traceback) = sys.exc_info()
return_to_cache(value)
return (None,None)
# Store the expiration date of the newly accessed data
expiration_date = content.expiration_date
if content.content_type == MediaTypes.turtle :
try :
retval = Graph()
retval.parse(content.data, format="n3")
except :
(type,value,traceback) = sys.exc_info()
options.add_warning(err_unparsable_Turtle_vocab % (uri,value))
elif content.content_type == MediaTypes.rdfxml :
try :
retval = Graph()
retval.parse(content.data)
except :
(type,value,traceback) = sys.exc_info()
options.add_warning(err_unparsable_Turtle_vocab % (uri,value))
elif content.content_type == MediaTypes.nt :
try :
retval = Graph()
retval.parse(content.data, format="nt")
except :
(type,value,traceback) = sys.exc_info()
options.add_warning(err_unparsable_ntriples_vocab % (uri,value))
elif content.content_type in [MediaTypes.xhtml, MediaTypes.html, MediaTypes.xml] or xml_application_media_type.match(content.content_type) != None :
try :
from pyRdfa import pyRdfa
from pyRdfa.options import Options
options = Options()
retval = pyRdfa(options).graph_from_source(content.data)
except :
(type,value,traceback) = sys.exc_info()
options.add_warning(err_unparsable_rdfa_vocab % (uri,value))
else :
options.add_warning(err_unrecognised_vocab_type % (uri, content.content_type))
return (retval, expiration_date)
############################################################################################
type = ns_rdf["type"]
Property = ns_rdf["Property"]
Class = ns_rdfs["Class"]
subClassOf = ns_rdfs["subClassOf"]
subPropertyOf = ns_rdfs["subPropertyOf"]
equivalentProperty = ns_owl["equivalentProperty"]
equivalentClass = ns_owl["equivalentClass"]
class MiniOWL :
"""
Class implementing the simple OWL RL Reasoning required by RDFa in managing vocabulary files. This is done via
a forward chaining process (in the L{closure} method) using a few simple rules as defined by the RDF and the OWL Semantics
specifications.
@ivar graph: the graph that has to be expanded
@ivar added_triples: each cycle collects the triples that are to be added to the graph eventually.
@type added_triples: a set, to ensure the unicity of triples being added
"""
def __init__(self, graph, schema_semantics = False) :
self.graph = graph
self.added_triples = None
self.schema_semantics = schema_semantics
def closure(self) :
"""
Generate the closure the graph. This is the real 'core'.
The processing rules store new triples via the L{separate method<store_triple>} which stores
them in the L{added_triples<added_triples>} array. If that array is emtpy at the end of a cycle,
it means that the whole process can be stopped.
"""
# Go cyclically through all rules until no change happens
new_cycle = True
cycle_num = 0
while new_cycle :
# yes, there was a change, let us go again
cycle_num += 1
# go through all rules, and collect the replies (to see whether any change has been done)
# the new triples to be added are collected separately not to interfere with
# the current graph yet
self.added_triples = set()
# Execute all the rules; these might fill up the added triples array
for t in self.graph : self.rules(t)
# Add the tuples to the graph (if necessary, that is). If any new triple has been generated, a new cycle
# will be necessary...
new_cycle = len(self.added_triples) > 0
for t in self.added_triples : self.graph.add(t)
def store_triple(self, t) :
"""
In contrast to its name, this does not yet add anything to the graph itself, it just stores the tuple in an
L{internal set<added_triples>}. (It is important for this to be a set: some of the rules in the various closures may
generate the same tuples several times.) Before adding the tuple to the set, the method checks whether
the tuple is in the final graph already (if yes, it is not added to the set).
The set itself is emptied at the start of every processing cycle; the triples are then effectively added to the
graph at the end of such a cycle. If the set is
actually empty at that point, this means that the cycle has not added any new triple, and the full processing can stop.
@param t: the triple to be added to the graph, unless it is already there
@type t: a 3-element tuple of (s,p,o)
"""
(s,p,o) = t
if t not in self.graph :
self.added_triples.add(t)
def rules(self, t) :
"""
Go through the OWL-RL entailement rules prp-spo1, prp-eqp1, prp-eqp2, cax-sco, cax-eqc1, and cax-eqc2 by extending the graph.
@param t: a triple (in the form of a tuple)
"""
s,p,o = t
if self.schema_semantics :
# extra resonings on the vocabulary only to reduce the overall load by reducing the expected number of chaining cycles
if p == subPropertyOf :
for Z,Y,xxx in self.graph.triples((o, subPropertyOf, None)) :
self.store_triple((s,subPropertyOf,xxx))
elif p == equivalentProperty :
for Z,Y,xxx in self.graph.triples((o, equivalentProperty, None)) :
self.store_triple((s,equivalentProperty,xxx))
for xxx,Y,Z in self.graph.triples((None, equivalentProperty, s)) :
self.store_triple((xxx,equivalentProperty,o))
elif p == subClassOf :
for Z,Y,xxx in self.graph.triples((o, subClassOf, None)) :
self.store_triple((s,subClassOf,xxx))
elif p == equivalentClass :
for Z,Y,xxx in self.graph.triples((o, equivalentClass, None)) :
self.store_triple((s,equivalentClass,xxx))
for xxx,Y,Z in self.graph.triples((None, equivalentClass, s)) :
self.store_triple((xxx,equivalentClass,o))
else :
if p == subPropertyOf :
# prp-spo1
for zzz,Z,www in self.graph.triples((None, s, None)) :
self.store_triple((zzz, o, www))
elif p == equivalentProperty :
# prp-eqp1
for zzz,Z,www in self.graph.triples((None, s, None)) :
self.store_triple((zzz, o, www))
# prp-eqp2
for zzz,Z,www in self.graph.triples((None, o, None)) :
self.store_triple((zzz, s, www))
elif p == subClassOf :
# cax-sco
for vvv,Y,Z in self.graph.triples((None, type, s)) :
self.store_triple((vvv, type, o))
elif p == equivalentClass :
# cax-eqc1
for vvv,Y,Z in self.graph.triples((None, type, s)) :
self.store_triple((vvv, type, o))
# cax-eqc2
for vvv,Y,Z in self.graph.triples((None, type, o)) :
self.store_triple((vvv, type, s))
########################################################################################################
def process_rdfa_sem(graph, options) :
"""
Expand the graph through the minimal RDFS and OWL rules defined for RDFa.
The expansion is done in several steps:
1. the vocabularies are retrieved from the incoming graph (there are RDFa triples generated for that)
2. all vocabularies are merged into a separate vocabulary graph
3. the RDFS/OWL expansion is done on the vocabulary graph, to take care of all the subproperty, subclass, etc, chains
4. the (expanded) vocabulary graph content is added to the incoming graph
5. the incoming graph is expanded
6. the triples appearing in the vocabulary graph are removed from the incoming graph, to avoid unnecessary extra triples from the data
@param graph: an RDFLib Graph instance, to be expanded
@param options: options as defined for the RDFa run; used to generate warnings
@type options: L{pyRdfa.Options}
"""
# 1. collect the vocab URI-s
vocabs = set()
from pyRdfa import RDFA_VOCAB
for ((s,p,v)) in graph.triples((None,RDFA_VOCAB,None)) :
vocabs.add((str(v)))
if len(vocabs) >= 0 :
# 2. get all the vocab graphs
vocab_graph = Graph()
for uri in vocabs :
if options.vocab_cache :
v_graph = CachedVocab(uri, options).graph
else :
(v_graph, exp_date) = return_graph(uri, options)
if v_graph != None :
for t in v_graph :
vocab_graph.add(t)
# 3. Get the closure of the vocab graph; this will take care of local subproperty, etc, statements
# Strictly speaking this is not necessary, but will speed up processing, because it may save chaining cycles on the
# real graph
MiniOWL(vocab_graph, schema_semantics = True).closure()
# 4. Now get the vocab graph content added to the default graph
for t in vocab_graph :
graph.add(t)
# 5. get the graph expanded through RDFS
MiniOWL(graph).closure()
# 4. clean up the graph by removing the schema triples
for t in vocab_graph : graph.remove(t)
# That was it...
return graph
|
DmPo/Schemaorg_CivicOS | lib/rdflib/parser.py | <gh_stars>1-10
"""
Parser plugin interface.
This module defines the parser plugin interface and contains other
related parser support code.
The module is mainly useful for those wanting to write a parser that
can plugin to rdflib. If you are wanting to invoke a parser you likely
want to do so through the Graph class parse method.
"""
import os
import sys
from urllib import pathname2url, url2pathname
from urllib2 import urlopen, Request
from urlparse import urljoin
from rdflib.py3compat import PY3
if PY3:
from io import BytesIO
assert BytesIO
else:
from StringIO import StringIO as BytesIO
from xml.sax import xmlreader
from rdflib import __version__
from rdflib.term import URIRef
from rdflib.namespace import Namespace
__all__ = [
'Parser', 'InputSource', 'StringInputSource',
'URLInputSource', 'FileInputSource']
class Parser(object):
def __init__(self):
pass
def parse(self, source, sink):
pass
class InputSource(xmlreader.InputSource, object):
"""
TODO:
"""
def __init__(self, system_id=None):
xmlreader.InputSource.__init__(self, system_id=system_id)
self.content_type = None
class StringInputSource(InputSource):
"""
TODO:
"""
def __init__(self, value, system_id=None):
super(StringInputSource, self).__init__(system_id)
stream = BytesIO(value)
self.setByteStream(stream)
# TODO:
# encoding = value.encoding
# self.setEncoding(encoding)
headers = {
'User-agent':
'rdflib-%s (http://rdflib.net/; <EMAIL>)' % __version__
}
class URLInputSource(InputSource):
"""
TODO:
"""
def __init__(self, system_id=None, format=None):
super(URLInputSource, self).__init__(system_id)
self.url = system_id
# copy headers to change
myheaders = dict(headers)
if format == 'application/rdf+xml':
myheaders['Accept'] = 'application/rdf+xml, */*;q=0.1'
elif format == 'n3':
myheaders['Accept'] = 'text/n3, */*;q=0.1'
elif format == 'nt':
myheaders['Accept'] = 'text/plain, */*;q=0.1'
elif format == 'json-ld':
myheaders['Accept'] = (
'application/ld+json, application/json;p=0.9, */*;q=0.1')
else:
myheaders['Accept'] = (
'application/rdf+xml,text/rdf+n3;q=0.9,' +
'application/xhtml+xml;q=0.5, */*;q=0.1')
req = Request(system_id, None, myheaders)
file = urlopen(req)
# Fix for issue 130 https://github.com/RDFLib/rdflib/issues/130
self.url = file.geturl() # in case redirections took place
self.setPublicId(self.url)
self.content_type = file.info().get('content-type')
if self.content_type is not None:
self.content_type = self.content_type.split(";", 1)[0]
self.setByteStream(file)
# TODO: self.setEncoding(encoding)
self.response_info = file.info() # a mimetools.Message instance
def __repr__(self):
return self.url
class FileInputSource(InputSource):
def __init__(self, file):
base = urljoin("file:", pathname2url(os.getcwd()))
system_id = URIRef(urljoin("file:", pathname2url(file.name)), base=base)
super(FileInputSource, self).__init__(system_id)
self.file = file
self.setByteStream(file)
# TODO: self.setEncoding(encoding)
def __repr__(self):
return repr(self.file)
def create_input_source(source=None, publicID=None,
location=None, file=None, data=None, format=None):
"""
Return an appropriate InputSource instance for the given
parameters.
"""
# TODO: test that exactly one of source, location, file, and data
# is not None.
input_source = None
if source is not None:
if isinstance(source, InputSource):
input_source = source
else:
if isinstance(source, basestring):
location = source
elif hasattr(source, "read") and not isinstance(source, Namespace):
f = source
input_source = InputSource()
input_source.setByteStream(f)
if f is sys.stdin:
input_source.setSystemId("file:///dev/stdin")
elif hasattr(f, "name"):
input_source.setSystemId(f.name)
else:
raise Exception("Unexpected type '%s' for source '%s'" %
(type(source), source))
absolute_location = None # Further to fix for issue 130
if location is not None:
# Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145
if os.path.exists(location):
location = pathname2url(location)
base = urljoin("file:", "%s/" % pathname2url(os.getcwd()))
absolute_location = URIRef(location, base=base).defrag()
if absolute_location.startswith("file:///"):
filename = url2pathname(absolute_location.replace("file:///", "/"))
file = open(filename, "rb")
else:
input_source = URLInputSource(absolute_location, format)
# publicID = publicID or absolute_location # Further to fix
# for issue 130
if file is not None:
input_source = FileInputSource(file)
if data is not None:
if isinstance(data, unicode):
data = data.encode('utf-8')
input_source = StringInputSource(data)
if input_source is None:
raise Exception("could not create InputSource")
else:
if publicID is not None: # Further to fix for issue 130
input_source.setPublicId(publicID)
# Further to fix for issue 130
elif input_source.getPublicId() is None:
input_source.setPublicId(absolute_location or "")
return input_source
|
DmPo/Schemaorg_CivicOS | lib/rdflib/tools/graphisomorphism.py | <reponame>DmPo/Schemaorg_CivicOS
"""
A commandline tool for testing if RDF graphs are isomorpic, i.e. equal
if BNode labels are ignored.
"""
from rdflib.graph import Graph
from rdflib import BNode
try:
from itertools import combinations
assert combinations
except ImportError: # Python == 2.5
# Copied from
# http://docs.python.org/2/library/itertools.html#itertools.combinations
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
class IsomorphicTestableGraph(Graph):
"""
Ported from:
http://www.w3.org/2001/sw/DataAccess/proto-tests/tools/rdfdiff.py
(<NAME>'s RDF Graph Isomorphism Tester)
"""
def __init__(self, **kargs):
super(IsomorphicTestableGraph, self).__init__(**kargs)
self.hash = None
def internal_hash(self):
"""
This is defined instead of __hash__ to avoid a circular recursion
scenario with the Memory store for rdflib which requires a hash
lookup in order to return a generator of triples
"""
return hash(tuple(sorted(self.hashtriples())))
def hashtriples(self):
for triple in self:
g = ((isinstance(t, BNode) and self.vhash(t)) or t for t in triple)
yield hash(tuple(g))
def vhash(self, term, done=False):
return tuple(sorted(self.vhashtriples(term, done)))
def vhashtriples(self, term, done):
for t in self:
if term in t:
yield tuple(self.vhashtriple(t, term, done))
def vhashtriple(self, triple, term, done):
for p in xrange(3):
if not isinstance(triple[p], BNode):
yield triple[p]
elif done or (triple[p] == term):
yield p
else:
yield self.vhash(triple[p], done=True)
def __eq__(self, G):
"""Graph isomorphism testing."""
if not isinstance(G, IsomorphicTestableGraph):
return False
elif len(self) != len(G):
return False
elif list.__eq__(list(self), list(G)):
return True # @@
return self.internal_hash() == G.internal_hash()
def __ne__(self, G):
"""Negative graph isomorphism testing."""
return not self.__eq__(G)
def main():
import sys
from optparse import OptionParser
usage = '''usage: %prog [options] file1 file2 ... fileN'''
op = OptionParser(usage=usage)
op.add_option('-s', '--stdin', action='store_true', default=False,
help='Load from STDIN as well')
op.add_option('--format',
default='xml',
dest='inputFormat',
metavar='RDF_FORMAT',
choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
help="The format of the RDF document(s) to compare" +
"One of 'xml','n3','trix', 'nt', " +
"or 'rdfa'. The default is %default")
(options, args) = op.parse_args()
graphs = []
graph2FName = {}
if options.stdin:
graph = IsomorphicTestableGraph().parse(
sys.stdin, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = '(STDIN)'
for fn in args:
graph = IsomorphicTestableGraph().parse(
fn, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = fn
checked = set()
for graph1, graph2 in combinations(graphs, 2):
if (graph1, graph2) not in checked and (graph2, graph1) not in checked:
assert graph1 == graph2, "%s != %s" % (
graph2FName[graph1], graph2FName[graph2])
if __name__ == '__main__':
main()
|
DmPo/Schemaorg_CivicOS | lib/isodate/tests/test_date.py | ##############################################################################
# Copyright 2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Test cases for the isodate module.
'''
import unittest
from datetime import date
from isodate import parse_date, ISO8601Error, date_isoformat
from isodate import DATE_CENTURY, DATE_YEAR, DATE_MONTH
from isodate import DATE_EXT_COMPLETE, DATE_BAS_COMPLETE
from isodate import DATE_BAS_ORD_COMPLETE, DATE_EXT_ORD_COMPLETE
from isodate import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from isodate import DATE_EXT_WEEK, DATE_EXT_WEEK_COMPLETE
# the following list contains tuples of ISO date strings and the expected
# result from the parse_date method. A result of None means an ISO8601Error
# is expected. The test cases are grouped into dates with 4 digit years
# and 6 digit years.
TEST_CASES = {4: [('19', date(1901, 1, 1), DATE_CENTURY),
('1985', date(1985, 1, 1), DATE_YEAR),
('1985-04', date(1985, 4, 1), DATE_MONTH),
('1985-04-12', date(1985, 4, 12), DATE_EXT_COMPLETE),
('19850412', date(1985, 4, 12), DATE_BAS_COMPLETE),
('1985102', date(1985, 4, 12), DATE_BAS_ORD_COMPLETE),
('1985-102', date(1985, 4, 12), DATE_EXT_ORD_COMPLETE),
('1985W155', date(1985, 4, 12), DATE_BAS_WEEK_COMPLETE),
('1985-W15-5', date(1985, 4, 12), DATE_EXT_WEEK_COMPLETE),
('1985W15', date(1985, 4, 8), DATE_BAS_WEEK),
('1985-W15', date(1985, 4, 8), DATE_EXT_WEEK),
('1989-W15', date(1989, 4, 10), DATE_EXT_WEEK),
('1989-W15-5', date(1989, 4, 14), DATE_EXT_WEEK_COMPLETE),
('1-W1-1', None, DATE_BAS_WEEK_COMPLETE)],
6: [('+0019', date(1901, 1, 1), DATE_CENTURY),
('+001985', date(1985, 1, 1), DATE_YEAR),
('+001985-04', date(1985, 4, 1), DATE_MONTH),
('+001985-04-12', date(1985, 4, 12), DATE_EXT_COMPLETE),
('+0019850412', date(1985, 4, 12), DATE_BAS_COMPLETE),
('+001985102', date(1985, 4, 12), DATE_BAS_ORD_COMPLETE),
('+001985-102', date(1985, 4, 12), DATE_EXT_ORD_COMPLETE),
('+001985W155', date(1985, 4, 12), DATE_BAS_WEEK_COMPLETE),
('+001985-W15-5', date(1985, 4, 12), DATE_EXT_WEEK_COMPLETE),
('+001985W15', date(1985, 4, 8), DATE_BAS_WEEK),
('+001985-W15', date(1985, 4, 8), DATE_EXT_WEEK)]}
def create_testcase(yeardigits, datestring, expectation, format):
'''
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
TEST_CASES list, so that a failed test won't stop other tests.
'''
class TestDate(unittest.TestCase):
'''
A test case template to parse an ISO date string into a date
object.
'''
def test_parse(self):
'''
Parse an ISO date string and compare it to the expected value.
'''
if expectation is None:
self.assertRaises(ISO8601Error, parse_date, datestring,
yeardigits)
else:
result = parse_date(datestring, yeardigits)
self.assertEqual(result, expectation)
def test_format(self):
'''
Take date object and create ISO string from it.
This is the reverse test to test_parse.
'''
if expectation is None:
self.assertRaises(AttributeError,
date_isoformat, expectation, format,
yeardigits)
else:
self.assertEqual(date_isoformat(expectation, format,
yeardigits),
datestring)
return unittest.TestLoader().loadTestsFromTestCase(TestDate)
def test_suite():
'''
Construct a TestSuite instance for all test cases.
'''
suite = unittest.TestSuite()
for yeardigits, tests in TEST_CASES.items():
for datestring, expectation, format in tests:
suite.addTest(create_testcase(yeardigits, datestring,
expectation, format))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
DmPo/Schemaorg_CivicOS | lib/rdflib/compat.py | <gh_stars>1-10
#
# code to simplify supporting older python versions
#
import sys
from decimal import Decimal
if sys.version_info[:2] < (2, 7):
# Pre-2.7 decimal and float did not compare correctly
def numeric_greater(a, b):
if isinstance(a, Decimal) and isinstance(b, float):
return float(a) > b
elif isinstance(a, float) and isinstance(b, Decimal):
return a > float(b)
else:
return a > b
else:
def numeric_greater(a, b):
return a > b
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/serializers/nt.py | <filename>lib/rdflib/plugins/serializers/nt.py
"""
N-Triples RDF graph serializer for RDFLib.
See <http://www.w3.org/TR/rdf-testcases/#ntriples> for details about the
format.
"""
from rdflib.term import Literal
from rdflib.serializer import Serializer
from rdflib.py3compat import b
import warnings
__all__ = ['NTSerializer']
class NTSerializer(Serializer):
"""
Serializes RDF graphs to NTriples format.
"""
def serialize(self, stream, base=None, encoding=None, **args):
if base is not None:
warnings.warn("NTSerializer does not support base.")
if encoding is not None:
warnings.warn("NTSerializer does not use custom encoding.")
encoding = self.encoding
for triple in self.store:
stream.write(_nt_row(triple).encode(encoding, "replace"))
stream.write(b("\n"))
def _nt_row(triple):
if isinstance(triple[2], Literal):
return u"%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
_xmlcharref_encode(_quoteLiteral(triple[2])))
else:
return u"%s %s %s .\n" % (triple[0].n3(),
triple[1].n3(),
_xmlcharref_encode(triple[2].n3()))
def _quoteLiteral(l):
'''
a simpler version of term.Literal.n3()
'''
encoded = _quote_encode(l)
if l.language:
if l.datatype:
raise Exception("Literal has datatype AND language!")
return '%s@%s' % (encoded, l.language)
elif l.datatype:
return '%s^^<%s>' % (encoded, l.datatype)
else:
return '%s' % encoded
def _quote_encode(l):
return '"%s"' % l.replace('\\', '\\\\')\
.replace('\n', '\\n')\
.replace('"', '\\"')\
.replace('\r', '\\r')
# from <http://code.activestate.com/recipes/303668/>
def _xmlcharref_encode(unicode_data, encoding="ascii"):
"""Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
res = ""
# Step through the unicode_data string one character at a time in
# order to catch unencodable characters:
for char in unicode_data:
try:
char.encode(encoding, 'strict')
except UnicodeError:
if ord(char) <= 0xFFFF:
res += '\\u%04X' % ord(char)
else:
res += '\\U%08X' % ord(char)
else:
res += char
return res
|
DmPo/Schemaorg_CivicOS | lib/isodate/__init__.py | ##############################################################################
# Copyright 2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Import all essential functions and constants to re-export them here for easy
access.
This module contains also various pre-defined ISO 8601 format strings.
'''
from isodate.isodates import parse_date, date_isoformat
from isodate.isotime import parse_time, time_isoformat
from isodate.isodatetime import parse_datetime, datetime_isoformat
from isodate.isoduration import parse_duration, duration_isoformat, Duration
from isodate.isoerror import ISO8601Error
from isodate.isotzinfo import parse_tzinfo, tz_isoformat
from isodate.tzinfo import UTC, FixedOffset, LOCAL
from isodate.duration import Duration
from isodate.isostrf import strftime
from isodate.isostrf import DATE_BAS_COMPLETE, DATE_BAS_ORD_COMPLETE
from isodate.isostrf import DATE_BAS_WEEK, DATE_BAS_WEEK_COMPLETE
from isodate.isostrf import DATE_CENTURY, DATE_EXT_COMPLETE
from isodate.isostrf import DATE_EXT_ORD_COMPLETE, DATE_EXT_WEEK
from isodate.isostrf import DATE_EXT_WEEK_COMPLETE, DATE_MONTH, DATE_YEAR
from isodate.isostrf import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from isodate.isostrf import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from isodate.isostrf import TIME_HOUR
from isodate.isostrf import TZ_BAS, TZ_EXT, TZ_HOUR
from isodate.isostrf import DT_BAS_COMPLETE, DT_EXT_COMPLETE
from isodate.isostrf import DT_BAS_ORD_COMPLETE, DT_EXT_ORD_COMPLETE
from isodate.isostrf import DT_BAS_WEEK_COMPLETE, DT_EXT_WEEK_COMPLETE
from isodate.isostrf import D_DEFAULT, D_WEEK, D_ALT_EXT, D_ALT_BAS
from isodate.isostrf import D_ALT_BAS_ORD, D_ALT_EXT_ORD
|
DmPo/Schemaorg_CivicOS | lib/rdflib_jsonld/context.py | # -*- coding: utf-8 -*-
"""
Implementation of the JSON-LD Context structure. See:
http://json-ld.org/
"""
from rdflib.namespace import RDF
from .util import source_to_json, urljoin, split_iri, norm_url
from .keys import (BASE, CONTAINER, CONTEXT, GRAPH, ID, INDEX, LANG, LIST,
REV, SET, TYPE, VALUE, VOCAB)
NODE_KEYS = set([LANG, ID, TYPE, VALUE, LIST, SET, REV, GRAPH])
class Defined(int): pass
UNDEF = Defined(0)
class Context(object):
def __init__(self, source=None, base=None):
self.language = None
self.vocab = None
self.base = base
self.doc_base = base
self.terms = {}
self._alias = {}
self._lookup = {}
self._prefixes = {}
self.active = False
if source:
self.load(source)
def load(self, source, base=None):
self.active = True
inputs = not isinstance(source, list) and [source] or source
sources = []
for source in inputs:
if isinstance(source, basestring):
url = urljoin(base, source)
#if url in visited_urls: continue
#visited_urls.append(url)
source = source_to_json(url)
if isinstance(source, dict):
if CONTEXT in source:
source = source[CONTEXT]
if isinstance(source, list):
sources.extend(source)
else:
sources.append(source)
for source in sources:
self._read_source(source)
def subcontext(self, source):
# IMPROVE: to optimize, implement SubContext with parent fallback support
ctx = Context()
ctx.language = self.language
ctx.vocab = self.vocab
ctx.base = self.base
ctx.doc_base = self.doc_base
ctx._alias = self._alias.copy()
ctx.terms = self.terms.copy()
ctx._lookup = self._lookup.copy()
ctx._prefixes = self._prefixes.copy()
ctx.load(source)
return ctx
def get_id(self, obj):
return self._get(obj, ID)
def get_type(self, obj):
return self._get(obj, TYPE)
def get_language(self, obj):
return self._get(obj, LANG)
def get_value(self, obj):
return self._get(obj, VALUE)
def get_graph(self, obj):
return self._get(obj, GRAPH)
def get_list(self, obj):
return self._get(obj, LIST)
def get_set(self, obj):
return self._get(obj, SET)
def get_rev(self, obj):
return self._get(obj, REV)
def _get(self, obj, key):
return obj.get(self._alias.get(key)) or obj.get(key)
def get_key(self, key):
return self._alias.get(key, key)
lang_key = property(lambda self: self.get_key(LANG))
id_key = property(lambda self: self.get_key(ID))
type_key = property(lambda self: self.get_key(TYPE))
value_key = property(lambda self: self.get_key(VALUE))
list_key = property(lambda self: self.get_key(LIST))
rev_key = property(lambda self: self.get_key(REV))
graph_key = property(lambda self: self.get_key(GRAPH))
def add_term(self, name, idref, coercion=UNDEF, container=UNDEF,
language=UNDEF, reverse=False):
term = Term(idref, name, coercion, container, language, reverse)
self.terms[name] = term
self._lookup[(idref, coercion or language, container, reverse)] = term
self._prefixes[idref] = name
def find_term(self, idref, coercion=None, container=UNDEF,
language=None, reverse=False):
lu = self._lookup
if coercion is None:
coercion = language
if coercion is not UNDEF and container:
found = lu.get((idref, coercion, container, reverse))
if found: return found
if coercion is not UNDEF:
found = lu.get((idref, coercion, UNDEF, reverse))
if found: return found
if container:
found = lu.get((idref, coercion, container, reverse))
if found: return found
elif language:
found = lu.get((idref, UNDEF, LANG, reverse))
if found: return found
else:
found = lu.get((idref, coercion or UNDEF, SET, reverse))
if found: return found
return lu.get((idref, UNDEF, UNDEF, reverse))
def resolve(self, curie_or_iri):
iri = self.expand(curie_or_iri, False)
if iri.startswith('_:'):
return iri
return self.resolve_iri(iri)
def resolve_iri(self, iri):
return norm_url(self.base, iri)
def expand(self, term_curie_or_iri, use_vocab=True):
if use_vocab:
term = self.terms.get(term_curie_or_iri)
if term:
return term.id
is_term, pfx, local = self._prep_expand(term_curie_or_iri)
if pfx == '_':
return term_curie_or_iri
if pfx is not None:
ns = self.terms.get(pfx)
if ns and ns.id:
return ns.id + local
elif is_term and use_vocab:
if self.vocab:
return self.vocab + term_curie_or_iri
return None
return self.resolve_iri(term_curie_or_iri)
def shrink_iri(self, iri):
ns, name = split_iri(unicode(iri))
pfx = self._prefixes.get(ns)
if pfx:
return u":".join((pfx, name))
return iri
def to_symbol(self, iri):
iri = unicode(iri)
term = self.find_term(iri)
if term:
return term.name
ns, name = split_iri(iri)
if ns == self.vocab:
return name
pfx = self._prefixes.get(ns)
if pfx:
return u":".join((pfx, name))
return iri
def _read_source(self, source):
self.vocab = source.get(VOCAB, self.vocab)
for key, value in source.items():
if key == LANG:
self.language = value
elif key == VOCAB:
continue
elif key == BASE:
# TODO: only base to None if source is embedded
#if value is None and remote:
# self.base = self.doc_base
#else:
self.base = value
else:
self._read_term(source, key, value)
def _read_term(self, source, name, dfn):
if isinstance(dfn, dict):
#term = self._create_term(source, key, value)
rev = dfn.get(REV)
idref = rev or dfn.get(ID, UNDEF)
if idref == TYPE:
idref = unicode(RDF.type)
elif idref is not UNDEF:
idref = self._rec_expand(source, idref)
elif ':' in name:
idref = self._rec_expand(source, name)
elif self.vocab:
idref = self.vocab + name
coercion = dfn.get(TYPE, UNDEF)
if coercion and coercion not in (ID, TYPE, VOCAB):
coercion = self._rec_expand(source, coercion)
self.add_term(name, idref, coercion,
dfn.get(CONTAINER, UNDEF), dfn.get(LANG, UNDEF), bool(rev))
else:
idref = self._rec_expand(source, dfn)
self.add_term(name, idref)
if idref in NODE_KEYS:
self._alias[idref] = name
def _rec_expand(self, source, expr, prev=None):
if expr == prev or expr in NODE_KEYS:
return expr
is_term, pfx, nxt = self._prep_expand(expr)
if pfx:
iri = self._get_source_id(source, pfx) or self.expand(pfx)
if iri is None:
nxt = expr
else:
nxt = iri + nxt
else:
nxt = self._get_source_id(source, nxt) or nxt
if ':' not in nxt and self.vocab:
return self.vocab + nxt
return self._rec_expand(source, nxt, expr)
def _prep_expand(self, expr):
if ':' not in expr:
return True, None, expr
pfx, local = expr.split(':', 1)
if not local.startswith('//'):
return False, pfx, local
else:
return False, None, expr
def _get_source_id(self, source, key):
# .. from source dict or if already defined
term = source.get(key)
if term is None:
dfn = self.terms.get(key)
if dfn:
term = dfn.id
elif isinstance(term, dict):
term = term.get(ID)
return term
class Term(object):
def __init__(self, idref, name, coercion=UNDEF, container=UNDEF,
language=UNDEF, reverse=False):
self.name = name
self.id = idref
self.type = coercion
self.container = container
self.language = language
self.reverse = reverse
|
DmPo/Schemaorg_CivicOS | lib/rdflib_jsonld/parser.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This parser will interpret a JSON-LD document as an RDF Graph. See:
http://json-ld.org/
Example usage::
>>> from rdflib.plugin import register, Parser
>>> register('json-ld', Parser, 'rdflib_jsonld.parser', 'JsonLDParser')
>>> from rdflib import Graph, URIRef, Literal
>>> test_json = '''
... {
... "@context": {
... "dc": "http://purl.org/dc/terms/",
... "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
... "rdfs": "http://www.w3.org/2000/01/rdf-schema#"
... },
... "@id": "http://example.org/about",
... "dc:title": {
... "@language": "en",
... "@value": "Someone's Homepage"
... }
... }
... '''
>>> g = Graph().parse(data=test_json, format='json-ld')
>>> list(g) == [(URIRef('http://example.org/about'),
... URIRef('http://purl.org/dc/terms/title'),
... Literal("Someone's Homepage", lang='en'))]
True
"""
# NOTE: This code reads the entire JSON object into memory before parsing, but
# we should consider streaming the input to deal with arbitrarily large graphs.
import warnings
from rdflib.graph import ConjunctiveGraph
from rdflib.parser import Parser
from rdflib.namespace import RDF, XSD
from rdflib.term import URIRef, BNode, Literal
from .context import Context, Term, UNDEF
from .util import source_to_json, VOCAB_DELIMS
from .keys import CONTEXT, GRAPH, ID, INDEX, LANG, LIST, REV, SET, TYPE, VALUE, VOCAB
__all__ = ['JsonLDParser', 'to_rdf']
TYPE_TERM = Term(unicode(RDF.type), TYPE, VOCAB)
class JsonLDParser(Parser):
def __init__(self):
super(JsonLDParser, self).__init__()
def parse(self, source, sink, **kwargs):
# TODO: docstring w. args and return value
encoding = kwargs.get('encoding') or 'utf-8'
if encoding not in ('utf-8', 'utf-16'):
warnings.warn("JSON should be encoded as unicode. " +
"Given encoding was: %s" % encoding)
base = kwargs.get('base') or sink.absolutize(
source.getPublicId() or source.getSystemId() or "")
context_data = kwargs.get('context')
produce_generalized_rdf = kwargs.get('produce_generalized_rdf', False)
data = source_to_json(source)
conj_sink = ConjunctiveGraph(
store=sink.store, identifier=sink.identifier)
to_rdf(data, conj_sink, base, context_data)
generalized_rdf = False
def to_rdf(data, graph, base=None, context_data=None, produce_generalized_rdf=False):
# TODO: docstring w. args and return value
global generalized_rdf # FIXME: not thread-safe and error-prone
generalized_rdf = produce_generalized_rdf
context = Context(base=base)
if context_data:
context.load(context_data)
topcontext = False
if isinstance(data, list):
resources = data
elif isinstance(data, dict):
l_ctx = data.get(CONTEXT)
if l_ctx:
context.load(l_ctx, base)
topcontext = True
resources = data
if not isinstance(resources, list):
resources = [resources]
if context.vocab:
graph.bind(None, context.vocab)
for name, term in context.terms.items():
if term.id and term.id.endswith(VOCAB_DELIMS):
graph.bind(name, term.id)
for node in resources:
_add_to_graph(graph, graph, context, node, topcontext)
return graph
def _add_to_graph(dataset, graph, context, node, topcontext=False):
if not isinstance(node, dict) or context.get_value(node):
return
if CONTEXT in node and not topcontext:
l_ctx = node.get(CONTEXT)
if l_ctx:
context = context.subcontext(l_ctx)
else:
context = Context(base=context.doc_base)
id_val = context.get_id(node)
if isinstance(id_val, basestring):
subj = _to_rdf_id(context, id_val)
else:
subj = BNode()
if subj is None:
return None
for key, obj in node.items():
if key in (CONTEXT, ID, context.get_key(ID)):
continue
if key in (REV, context.get_key(REV)):
for rkey, robj in obj.items():
_key_to_graph(dataset, graph, context, subj, rkey, robj, True)
else:
_key_to_graph(dataset, graph, context, subj, key, obj)
return subj
def _key_to_graph(dataset, graph, context, subj, key, obj, reverse=False):
if isinstance(obj, list):
obj_nodes = obj
else:
obj_nodes = [obj]
term = context.terms.get(key)
if term:
term_id = term.id
if term.container == LIST:
obj_nodes = [{LIST: obj_nodes}]
elif isinstance(obj, dict):
if term.container == INDEX:
obj_nodes = []
for values in obj.values():
if not isinstance(values, list):
obj_nodes.append(values)
else:
obj_nodes += values
elif term.container == LANG:
obj_nodes = []
for lang, values in obj.items():
if not isinstance(values, list):
values = [values]
for v in values:
obj_nodes.append((v, lang))
else:
term_id = None
if TYPE in (key, term_id):
term = TYPE_TERM
elif GRAPH in (key, term_id):
#assert graph.context_aware
subgraph = dataset.get_context(subj)
for onode in obj_nodes:
_add_to_graph(dataset, subgraph, context, onode)
return
elif SET in (key, term_id):
for onode in obj_nodes:
_add_to_graph(dataset, graph, context, onode)
return
pred_uri = term.id if term else context.expand(key)
flattened = []
for obj in obj_nodes:
if isinstance(obj, dict):
objs = context.get_set(obj)
if objs is not None:
obj = objs
if isinstance(obj, list):
flattened += obj
continue
flattened.append(obj)
obj_nodes = flattened
if not pred_uri:
return
if term and term.reverse:
reverse = not reverse
bid = _get_bnodeid(pred_uri)
if bid:
if not generalized_rdf:
return
pred = BNode(bid)
else:
pred = URIRef(pred_uri)
for obj_node in obj_nodes:
obj = _to_object(dataset, graph, context, term, obj_node)
if obj is None:
continue
if reverse:
graph.add((obj, pred, subj))
else:
graph.add((subj, pred, obj))
def _to_object(dataset, graph, context, term, node, inlist=False):
if node is None:
return
if isinstance(node, tuple):
value, lang = node
if value is None:
return
return Literal(value, lang=lang)
if isinstance(node, dict):
node_list = context.get_list(node)
if node_list is not None:
if inlist: # TODO: and NO_LISTS_OF_LISTS
return
listref = _add_list(dataset, graph, context, term, node_list)
if listref:
return listref
else: # expand..
if not term or not term.type:
if isinstance(node, float):
return Literal(node, datatype=XSD.double)
if term and term.language is not UNDEF:
lang = term.language
else:
lang = context.language
return Literal(node, lang=lang)
else:
if term.type == ID:
node = {ID: context.resolve(node)}
elif term.type == VOCAB:
node = {ID: context.expand(node) or context.resolve_iri(node)}
else:
node = {TYPE: term.type,
VALUE: node}
lang = context.get_language(node)
if lang or context.get_key(VALUE) in node or VALUE in node:
value = context.get_value(node)
if value is None:
return None
datatype = not lang and context.get_type(node) or None
if lang:
return Literal(value, lang=lang)
elif datatype:
return Literal(value, datatype=context.expand(datatype))
else:
return Literal(value)
else:
return _add_to_graph(dataset, graph, context, node)
def _to_rdf_id(context, id_val):
bid = _get_bnodeid(id_val)
if bid:
return BNode(bid)
else:
uri = context.resolve(id_val)
if not generalized_rdf and ':' not in uri:
return None
return URIRef(uri)
def _get_bnodeid(ref):
if not ref.startswith('_:'):
return
bid = ref.split('_:', 1)[-1]
return bid or None
def _add_list(dataset, graph, context, term, node_list):
if not isinstance(node_list, list):
node_list = [node_list]
first_subj = BNode()
subj, rest = first_subj, None
for node in node_list:
if node is None:
continue
if rest:
graph.add((subj, RDF.rest, rest))
subj = rest
obj = _to_object(dataset, graph, context, term, node, inlist=True)
if obj is None:
continue
graph.add((subj, RDF.first, obj))
rest = BNode()
if rest:
graph.add((subj, RDF.rest, RDF.nil))
return first_subj
else:
return RDF.nil
|
DmPo/Schemaorg_CivicOS | scripts/vocab_counts.py | <reponame>DmPo/Schemaorg_CivicOS
# The file containing the actual counts is not checked in.
# This file has the script for bucketizing the counts
import sys
def vocab_term (term) :
parts = term.split('/', 4)
if (len(parts) < 4) :
return None
domain = parts[2]
if (not (domain == "schema.org")) :
return None
else :
return parts[3]
counts = {}
def addCount (term, count) :
if (term in counts) :
counts[term] = counts[term] + count
else:
counts[term] = count
def bucket (vt, count) :
if (vt != None):
if (count < 10) :
return None
elif (count < 100) :
return "%s\t%i" % (vt, 1)
elif (count < 1000) :
return "%s\t%i" % (vt, 2)
elif (count < 10000) :
return "%s\t%i" % (vt, 3)
elif (count < 50000) :
return "%s\t%i" % (vt, 4)
elif (count < 100000) :
return "%s\t%i" % (vt, 5)
elif (count < 250000) :
return "%s\t%i" % (vt, 7)
elif (count < 500000) :
return "%s\t%i" % (vt, 8)
elif (count < 1000000) :
return "%s\t%i" % (vt, 9)
else:
return "%s\t%i" % (vt, 10)
input_file = sys.argv[1]
if (input_file != None):
f = open(input_file)
if (f != None):
for line in f:
parts = line.strip().split(',')
if (len(parts) > 1):
term = parts[0]
count = 0
count_string = parts[1].replace(' ', '')
try:
count = int(parts[1])
except:
count = 0
term = vocab_term(term)
addCount(term, count)
for term in sorted(counts.keys(), key= lambda term: counts[term], reverse=True):
print bucket(term, counts[term])
else:
print "Cannot open file " + input_file
|
DmPo/Schemaorg_CivicOS | lib/rdflib/query.py |
import os
import shutil
import tempfile
import warnings
import types
from urlparse import urlparse
try:
from io import BytesIO
assert BytesIO
except:
from StringIO import StringIO as BytesIO
from . import py3compat
__all__ = ['Processor', 'Result', 'ResultParser', 'ResultSerializer',
'ResultException']
class Processor(object):
"""
Query plugin interface.
This module is useful for those wanting to write a query processor
that can plugin to rdf. If you are wanting to execute a query you
likely want to do so through the Graph class query method.
"""
def __init__(self, graph):
pass
def query(self, strOrQuery, initBindings={}, initNs={}, DEBUG=False):
pass
class UpdateProcessor(object):
"""
Update plugin interface.
This module is useful for those wanting to write an update
processor that can plugin to rdflib. If you are wanting to execute
an update statement you likely want to do so through the Graph
class update method.
.. versionadded:: 4.0
"""
def __init__(self, graph):
pass
def update(self, strOrQuery, initBindings={}, initNs={}):
pass
class ResultException(Exception):
pass
class EncodeOnlyUnicode(object):
"""
This is a crappy work-around for
http://bugs.python.org/issue11649
"""
def __init__(self, stream):
self.__stream = stream
def write(self, arg):
if isinstance(arg, unicode):
self.__stream.write(arg.encode("utf-8"))
else:
self.__stream.write(arg)
def __getattr__(self, name):
return getattr(self.__stream, name)
class ResultRow(tuple):
"""
a single result row
allows accessing bindings as attributes or with []
>>> from rdflib import URIRef, Variable
>>> rr=ResultRow({ Variable('a'): URIRef('urn:cake') }, [Variable('a')])
>>> rr[0]
rdflib.term.URIRef(%(u)s'urn:cake')
>>> rr[1]
Traceback (most recent call last):
...
IndexError: tuple index out of range
>>> rr.a
rdflib.term.URIRef(%(u)s'urn:cake')
>>> rr.b
Traceback (most recent call last):
...
AttributeError: b
>>> rr['a']
rdflib.term.URIRef(%(u)s'urn:cake')
>>> rr['b']
Traceback (most recent call last):
...
KeyError: 'b'
>>> rr[Variable('a')]
rdflib.term.URIRef(%(u)s'urn:cake')
.. versionadded:: 4.0
"""
__doc__ = py3compat.format_doctest_out(__doc__)
def __new__(cls, values, labels):
instance = super(ResultRow, cls).__new__(
cls, (values.get(v) for v in labels))
instance.labels = dict((unicode(x[1]), x[0])
for x in enumerate(labels))
return instance
def __getattr__(self, name):
if name not in self.labels:
raise AttributeError(name)
return tuple.__getitem__(self, self.labels[name])
def __getitem__(self, name):
try:
return tuple.__getitem__(self, name)
except TypeError:
if name in self.labels:
return tuple.__getitem__(self, self.labels[name])
if unicode(name) in self.labels: # passing in variable object
return tuple.__getitem__(self, self.labels[unicode(name)])
raise KeyError(name)
def asdict(self):
return dict((v, self[v]) for v in self.labels if self[v] != None)
class Result(object):
"""
A common class for representing query result.
There is a bit of magic here that makes this appear like different
Python objects, depending on the type of result.
If the type is "SELECT", iterating will yield lists of QueryRow objects
If the type is "ASK", iterating will yield a single bool (or
bool(result) will return the same bool)
If the type is "CONSTRUCT" or "DESCRIBE" iterating will yield the
triples.
len(result) also works.
"""
def __init__(self, type_):
if type_ not in ('CONSTRUCT', 'DESCRIBE', 'SELECT', 'ASK'):
raise ResultException('Unknown Result type: %s' % type_)
self.type = type_
self.vars = None
self._bindings = None
self._genbindings = None
self.askAnswer = None
self.graph = None
def _get_bindings(self):
if self._genbindings:
self._bindings += list(self._genbindings)
self._genbindings = None
return self._bindings
def _set_bindings(self, b):
if isinstance(b, types.GeneratorType):
self._genbindings = b
self._bindings = []
else:
self._bindings = b
bindings = property(
_get_bindings, _set_bindings, doc="a list of variable bindings as dicts")
@staticmethod
def parse(source, format='xml', **kwargs):
from rdflib import plugin
parser = plugin.get(format, ResultParser)()
return parser.parse(source, **kwargs)
def serialize(
self, destination=None, encoding="utf-8", format='xml', **args):
if self.type in ('CONSTRUCT', 'DESCRIBE'):
return self.graph.serialize(
destination, encoding=encoding, format=format, **args)
"""stolen wholesale from graph.serialize"""
from rdflib import plugin
serializer = plugin.get(format, ResultSerializer)(self)
if destination is None:
stream = BytesIO()
stream2 = EncodeOnlyUnicode(stream)
serializer.serialize(stream2, encoding=encoding, **args)
return stream.getvalue()
if hasattr(destination, "write"):
stream = destination
serializer.serialize(stream, encoding=encoding, **args)
else:
location = destination
scheme, netloc, path, params, query, fragment = urlparse(location)
if netloc != "":
print("WARNING: not saving as location" +
"is not a local file reference")
return
fd, name = tempfile.mkstemp()
stream = os.fdopen(fd, 'wb')
serializer.serialize(stream, encoding=encoding, **args)
stream.close()
if hasattr(shutil, "move"):
shutil.move(name, path)
else:
shutil.copy(name, path)
os.remove(name)
def __len__(self):
if self.type == 'ASK':
return 1
elif self.type == 'SELECT':
return len(self.bindings)
else:
return len(self.graph)
def __nonzero__(self):
if self.type == 'ASK':
return self.askAnswer
else:
return len(self)>0
def __iter__(self):
if self.type in ("CONSTRUCT", "DESCRIBE"):
for t in self.graph:
yield t
elif self.type == 'ASK':
yield self.askAnswer
elif self.type == 'SELECT':
# this iterates over ResultRows of variable bindings
if self._genbindings:
for b in self._genbindings:
self._bindings.append(b)
yield ResultRow(b, self.vars)
self._genbindings = None
else:
for b in self._bindings:
yield ResultRow(b, self.vars)
def __getattr__(self, name):
if self.type in ("CONSTRUCT", "DESCRIBE") and self.graph is not None:
return self.graph.__getattr__(self, name)
elif self.type == 'SELECT' and name == 'result':
warnings.warn(
"accessing the 'result' attribute is deprecated."
" Iterate over the object instead.",
DeprecationWarning, stacklevel=2)
# copied from __iter__, above
return [(tuple(b[v] for v in self.vars)) for b in self.bindings]
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self, name))
def __eq__(self, other):
try:
if self.type != other.type:
return False
if self.type == 'ASK':
return self.askAnswer == other.askAnswer
elif self.type == 'SELECT':
return self.vars == other.vars \
and self.bindings == other.bindings
else:
return self.graph == other.graph
except:
return False
class ResultParser(object):
def __init__(self):
pass
def parse(self, source, **kwargs):
"""return a Result object"""
pass # abstract
class ResultSerializer(object):
def __init__(self, result):
self.result = result
def serialize(self, stream, encoding="utf-8", **kwargs):
"""return a string properly serialized"""
pass # abstract
|
DmPo/Schemaorg_CivicOS | lib/html5lib/tests/runparsertests.py | import sys
import os
import glob
import unittest
#Allow us to import the parent module
os.chdir(os.path.split(os.path.abspath(__file__))[0])
sys.path.insert(0, os.path.abspath(os.curdir))
sys.path.insert(0, os.path.abspath(os.pardir))
sys.path.insert(0, os.path.join(os.path.abspath(os.pardir), "src"))
def buildTestSuite():
suite = unittest.TestSuite()
for testcase in glob.glob('test_*.py'):
if testcase in ("test_tokenizer.py", "test_parser.py", "test_parser2.py"):
module = os.path.splitext(testcase)[0]
suite.addTest(__import__(module).buildTestSuite())
return suite
def main():
results = unittest.TextTestRunner().run(buildTestSuite())
return results
if __name__ == "__main__":
results = main()
if not results.wasSuccessful():
sys.exit(1)
|
DiscordDigital/ui.py | ui.py | import curses
import collections
import sys
import os
from time import sleep
# File syntax
#
# When there's not enough space for all elements UI will go into scroll mode
#
# Syntax:
# script.py ui_example.txt
#
# an object is one line, split by ;
# The first part is the Name the second part is the shell action
# Use the sample file to tweak colors.
# Valid colors are: black, red, green, yellow, blue, magenta, cyan, white
# Also valid colors are black2, red2, green2.. those are usually brighter versions
#
# To run an inbuilt function just use an action as followed:
# Show version;function:Show_version
#
# To implement a quit button you can do so:
# Quit menu;quit
#
# For more information check out the github readme: https://github.com/DiscordDigital/ui.py/
def RunInbuiltFunction(function_name):
if (function_name == "Show_version"):
print("Running python version " + sys.version)
def generate_sample_file():
sample_file = open('sample_ui.txt','w')
sample_file.write(
"""menutext=Sample UI!\nmaxh=3\ntitlecolor=white\nwindow_bg=blue\nobjcolor_text=white\nobjcolor_bg=blue\nobjcolor_sel_text=black\nobjcolor_sel_bg=white\nStart Nano;nano\nShow date;date\nCredits;echo Made by discord.digital\nShow Python version;function:Show_version\nQuit;quit"""
)
sample_file.close()
if len(sys.argv) != 2:
print("Specify ui file")
print("Get started by typing: " + sys.argv[0] + " sample")
exit()
elif (sys.argv[1] == "sample"):
generate_sample_file()
print("Created sample_ui.txt")
print("Use it like that: " + sys.argv[0] + " sample_ui.txt")
exit(0)
else:
if not os.path.isfile(sys.argv[1]):
print("File not found!")
exit()
screen = curses.initscr()
curses.curs_set(0)
curses.noecho()
screen.keypad(1)
curses.start_color()
curses.mousemask(1)
def convert_text_to_color(text):
textup = text.upper()
if (textup == "BLACK"):
return 0
if (textup == "RED"):
return 1
if (textup == "GREEN"):
return 2
if (textup == "YELLOW"):
return 3
if (textup == "BLUE"):
return 4
if (textup == "MAGENTA"):
return 5
if (textup == "CYAN"):
return 6
if (textup == "WHITE"):
return 7
if (textup == "BLACK2"):
return 8
if (textup == "RED2"):
return 9
if (textup == "GREEN2"):
return 10
if (textup == "YELLOW2"):
return 11
if (textup == "BLUE2"):
return 12
if (textup == "MAGENTA2"):
return 13
if (textup == "CYAN2"):
return 14
if (textup == "WHITE2"):
return 15
return 7
objects = collections.defaultdict(dict)
object_i = 0
menutext = "Menu"
maxh = 3
titlecolor = "white"
window_bg = "black"
objcolor_text = "white"
objcolor_bg = "black"
objcolor_sel_text = "black"
objcolor_sel_bg = "white"
fp = open(sys.argv[1])
for _, line in enumerate(fp):
if line.startswith("menutext="):
menutext = line.replace('menutext=','').replace('\n','')
elif line.startswith("maxh="):
maxh = line.replace('maxh=','').replace('\n','')
elif line.startswith("titlecolor="):
titlecolor = line.replace('titlecolor=','').replace('\n','')
elif line.startswith("window_bg="):
window_bg = line.replace('window_bg=','').replace('\n','')
elif line.startswith("objcolor_text="):
objcolor_text = line.replace('objcolor_text=','').replace('\n','')
elif line.startswith("objcolor_bg="):
objcolor_bg = line.replace('objcolor_bg=','').replace('\n','')
elif line.startswith("objcolor_sel_text="):
objcolor_sel_text = line.replace('objcolor_sel_text=','').replace('\n','')
elif line.startswith("objcolor_sel_bg="):
objcolor_sel_bg = line.replace('objcolor_sel_bg=','').replace('\n','')
else:
if (line == '\n'):
break
interface = line.split(';')
objects[object_i]['Label'] = interface[0].replace('\n','')
objects[object_i]['Action'] = interface[1].replace('\n','')
object_i = object_i + 1
fp.close()
colorcode = convert_text_to_color(titlecolor)
colorcode_bg = convert_text_to_color(window_bg)
curses.init_pair(2, colorcode, colorcode_bg)
colorcode_text = convert_text_to_color(objcolor_text)
colorcode_bg = convert_text_to_color(objcolor_bg)
curses.init_pair(3, colorcode_text, colorcode_bg)
colorcode_text = convert_text_to_color(objcolor_sel_text)
colorcode_bg = convert_text_to_color(objcolor_sel_bg)
curses.init_pair(4, colorcode_text, colorcode_bg)
maxh = int(maxh)
screen.bkgd(' ', curses.color_pair(2))
_, x = screen.getmaxyx()
titlepad = curses.newpad(1, x-2)
titlepad.addstr(menutext, curses.color_pair(2))
titlepad.bkgd(' ', curses.color_pair(2) | curses.A_BOLD)
infopad = curses.newpad(3, 15)
infopad.addstr("Press q to exit", curses.color_pair(2))
def create_entry(text,startheight):
_, x = screen.getmaxyx()
pad = curses.newpad(maxh, x - 2)
cheight = int(maxh / 2)
tstart = int((x / 2) - (len(text) / 2))-1
pad.addstr(cheight,tstart,text)
pad.bkgd(' ', curses.color_pair(3))
return pad
def select_entry(pad):
global parseoffset
global select
global refreshlist
global selectedpad
global scrolldirection
global object_i
global maxfitobj
global resize
if (object_i > maxfitobj) or (parseoffset != 0):
selectpad.erase()
selectpad.resize(3,len(str(100) + "/") + len(str(object_i)))
selectpad.addstr(str(select + 1) + "/" + str(object_i), curses.color_pair(2))
selectpad.refresh(0, 0, 1, 2, 1, x-2)
if (pad):
if (selectedpad != None) and not (resize):
deselect_entry(selectedpad)
pad['pad'].bkgd(' ', curses.color_pair(4))
cheight = int(maxh / 2)
tstart = int((x / 2) - (len(pad['label']) / 2))-1
pad['pad'].addstr(cheight,tstart,pad['label'])
y, _ = pad['pad'].getbegyx()
sy, sx = screen.getmaxyx()
pad['pad'].refresh(0,0,y,1,sy,sx-2)
selectedpad = pad
else:
scrolldirection = "up"
parseoffset = parseoffset - 1
refreshlist = True
screen.refresh()
def deselect_entry(pad):
pad['pad'].bkgd(' ', curses.color_pair(3))
cheight = int(maxh / 2)
tstart = int((x / 2) - (len(pad['label']) / 2))-1
pad['pad'].addstr(cheight,tstart,pad['label'])
y, _ = pad['pad'].getbegyx()
sy, sx = screen.getmaxyx()
pad['pad'].refresh(0,0,y,1,sy,sx-2)
screen.refresh()
curseLoop = True
pads = False
action = False
select = 0
selectedpad = None
scroll = False
parseoffset = 0
refreshlist = False
scrolldirection = "down"
seltext = "Selecting 0/0"
selectpad = curses.newpad(3, len(seltext))
selectpad.bkgd(' ', curses.color_pair(3))
y, x = screen.getmaxyx()
screensize = y - 4
maxfitobj = int(screensize / maxh)
while curseLoop:
screen.refresh()
resize = curses.is_term_resized(y, x)
if resize is True:
y, x = screen.getmaxyx()
screen.clear()
curses.resizeterm(y, x)
screensize = y - 4
maxfitobj = int(screensize / maxh)
pads = False
screen.refresh()
else:
try:
titlepad.refresh(0, 0, 2, int((x/2)-(len(menutext)/2)), 2, x-2)
infopad.refresh(0, 0, 1, x-17, 1, x-2)
except:
pass
j = 4
if (pads == False) or (refreshlist):
pads = collections.defaultdict(dict)
if (object_i > maxfitobj):
parserange = range(0 + parseoffset, maxfitobj + parseoffset)
else:
parserange = range(object_i)
for i in parserange:
pads[i]['pad'] = create_entry(objects[i]['Label'],j)
try:
pads[i]['pad'].refresh(0,0,j,1,y,x-2)
except:
pass
pads[i]['action'] = objects[i]['Action']
pads[i]['label'] = objects[i]['Label']
pads[i]['range-start'] = j
pads[i]['range-end'] = j + maxh
j = j + maxh
if (refreshlist):
if (scrolldirection == "down"):
select = maxfitobj + parseoffset - 1
select_entry(pads[select])
if (scrolldirection == "up"):
select = parseoffset
select_entry(pads[select])
else:
select = 0
select_entry(pads[select])
refreshlist = False
event = screen.getch()
if event == ord("q"): break
if event == curses.KEY_MOUSE:
try:
_, _, my, _, _ = curses.getmouse()
if (object_i > maxfitobj):
parserange = range(0 + parseoffset, maxfitobj + parseoffset)
else:
parserange = range(object_i)
for i in parserange:
if (my >= pads[i]['range-start']) and (my < pads[i]['range-end']):
if (selectedpad != None):
deselect_entry(selectedpad)
select_entry(pads[i])
action = pads[i]['action']
y, _ = pads[i]['pad'].getbegyx()
sy, sx = screen.getmaxyx()
pads[i]['pad'].refresh(0,0,y,1,sy,sx-2)
sleep(0.2)
curseLoop = False
except:
pass
if event == curses.KEY_UP:
if (selectedpad == None):
select = 0
select_entry(pads[select])
if (select != 0):
select = select - 1
select_entry(pads[select])
if event == curses.KEY_DOWN:
if (selectedpad != None):
if (select != maxfitobj + parseoffset - 1):
if not (select == object_i - 1):
select = select + 1
deselect_entry(selectedpad)
select_entry(pads[select])
else:
if (select == maxfitobj + parseoffset - 1):
if (select != object_i - 1):
select = select + 1
parseoffset = parseoffset + 1
scrolldirection = "down"
refreshlist = True
else:
if (object_i == 1):
select = 0
select_entry(pads[select])
else:
select = 1
select_entry(pads[select])
if event == 10:
if (selectedpad != None):
action = objects[select]['Action']
curseLoop = False
curses.endwin()
sleep(0.1)
if (action):
if action.startswith("function:"):
function = action.split(":")[1]
RunInbuiltFunction(function)
elif (action == "quit"):
exit()
else:
os.system(action)
|
jacobbieker/DataMining_Jaccard | main.py | import numpy as np
import sys
import scipy.sparse as sparse
import itertools
def minhashing(csc_matrix, num_users, num_movies):
"""
Does the minhashing on a CSC sparse matrix, returning the dense signature matrix
:param csc_matrix: CSC matrix with movies as rows, and users and columns
:param num_users: Number of users
:param num_movies: Number of movies
:return: Signature matrix with rows as signatures and columns as users
"""
signature = 120
signature_matrix = np.zeros((signature, num_users), dtype='int16')
# now get the 120 permutations for the signatures
for permutation in range(signature):
row_order = np.random.permutation(np.arange(num_movies))
# While CSC is not ideal for changing rows, this is faster than converting to csr and converting back
permuted_csc_matrix = csc_matrix[row_order, :]
# Number of columns should be the number of users
for i in range(num_users):
# Gets all the ones in the column, chooses the min index from the list of all indices in a column
first = permuted_csc_matrix.indices[permuted_csc_matrix.indptr[i]:permuted_csc_matrix.indptr[i + 1]].min()
signature_matrix[permutation, i] = first
return signature_matrix, signature
def lsh(sig_mat, signature, num_bands, sparse_matrix):
"""
LSH takes the signature matrix and "hashes" them into buckets that are then used to find the similarity
:param sig_mat: Signature Matrix, dense matrix
:param signature: Length of the signature
:param num_bands: Number of bands to use
:param sparse_matrix: The sparse original matrix
:return: The unique sets of the data
"""
buckets = []
num_rows = int(np.floor(signature / num_bands))
# Make the sparse matrix dense for the jaccard similarity check
sparse_matrix = sparse_matrix.toarray()
# Go through each band
current_row = 0
unique_set = set()
total_ones_found = 0
for bands in range(num_bands):
# These are the one in the band
band = sig_mat[current_row:num_rows + current_row, :]
current_row += num_rows
# Create the buckets
indexes = np.ravel_multi_index(band, band.max(1) + 1)
s_indexes = indexes.argsort()
sorted_indexes = indexes[s_indexes]
bucket_array = np.array(np.split(s_indexes, np.nonzero(sorted_indexes[1:] > sorted_indexes[:-1])[0] + 1))
# Only get buckets with more than one user
for index in range(len(bucket_array)):
if len(bucket_array[index]) > 1:
buckets.append(bucket_array[index])
# Go through all the buckets, finding the actual similar pairs
for i in range(len(buckets)):
# Creates a generator to go through all the combinations in a given bucket
user_pairs = set(pair for pair in itertools.combinations(buckets[i], 2))
# Count how many buckets both pairs have in common vs total number of buckets to get the answer
for pair in user_pairs:
# Check if already in unique_set
if pair not in unique_set and (pair[1], pair[0]) not in unique_set:
# This is a much faster check of the similarity, not always accurate though, could also eliminate
# some truly similar objects, but is much faster, so have lower threshold for this one
sim = signature_similarity(pair[0], pair[1], sig_mat)
if sim > 0.4:
# Much more time consuming, but makes sure it is actually higher than 0.5
j_sim = bool_jaccards_similarity(pair[0], pair[1], sparse_matrix)
if j_sim > 0.5:
if pair[0] < pair[1]:
unique_set.add(pair)
else:
unique_set.add((pair[1], pair[0]))
# Now write out as it goes
if len(unique_set) > total_ones_found + 10:
# Write every 10 as a checkpoint
write_file(unique_set)
total_ones_found = len(unique_set)
# Also write it when its all done
write_file(unique_set)
return unique_set
def write_file(unique_set):
"""
Writes the unique set to the results file
:param unique_set: Python set of pairs with Jaccard sim > 0.5
:return:
"""
unique_set = sorted(unique_set)
with open("results.txt", "w") as f:
for set in unique_set:
f.write(str(set[0]) + "," + str(set[1]) + "\n")
def signature_similarity(user1, user2, signature_matrix):
"""
Calculates the similarity in the signature matrix, directly since its much smaller
:param user1: The first user
:param user2: The second user
:param signature_matrix: The dense signature matrix
:return: The similarity score based on the signature matrix for two users
"""
sim_score = float(np.count_nonzero(signature_matrix[:, user1] == signature_matrix[:, user2]))
sim_score /= signature_matrix.shape[0]
return sim_score
def bool_jaccards_similarity(user1, user2, dense_matrix):
"""
Calculates the Jaccard Similarity on a boolean matrix given two users
:param user1: First user
:param user2: Second User
:param dense_matrix: Movie x User Dense matrix to use
:return: The Jaccard similarity
"""
# Numerator, the intersection of both users
intersection = np.logical_and(dense_matrix[:, user1], dense_matrix[:, user2])
union = np.logical_or(dense_matrix[:, user1], dense_matrix[:, user2])
jacard_sim = intersection.sum() / float(union.sum())
return jacard_sim
def convert_data(data):
"""
Converts the data storage in the .npy file to a sparse matrix
:param data:
:return: Compressed Sparse Row matrix for use in minhashing
"""
num_users = np.max(data[:, 0]) + 1
num_movies = np.max(data[:, 1]) + 1
matrix_values = np.ones(data.shape[0])
# Used boolean as that saves on memory and allows calculating the Jaccard similarity easier
csr_matrix = sparse.csc_matrix((matrix_values, (data[:, 1], data[:, 0])), shape=(num_movies, num_users), dtype='b')
return csr_matrix
if __name__ == "__main__":
# Get arguments
arguments = sys.argv
if len(arguments) < 3:
# Not long enough
print("Not enough arguments, input should be of form:\n python main.py seed path\\to\\user_movie.npy")
exit()
else:
# Set seed for random generator
np.random.seed(int(arguments[1]))
data = convert_data(np.load(arguments[2]))
sig_mat, signature = minhashing(data, data.shape[1], data.shape[0])
unique_set = lsh(sig_mat, signature, num_bands=20, sparse_matrix=data)
|
10-15-5/Crypto-Trader | trader-no-fstrings.py | <reponame>10-15-5/Crypto-Trader
import cbpro
import os
import logging
import time
import configparser
import sys
import smtplib
import math
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceOrderException
from binance.enums import *
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# ------------------------------------------------------------------
# Logging Setup
# ------------------------------------------------------------------
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
file_handler = logging.FileHandler("settings\\logs.log", encoding='utf8')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# ------------------------------------------------------------------
config = configparser.RawConfigParser()
configFilePath = r"settings/config.txt"
config.read(configFilePath, encoding="utf-8")
# Global variables
auth_client = cbpro.AuthenticatedClient(config.get("CONFIG", "CB_PRO_PUBLIC"),
config.get("CONFIG", "CB_PRO_PRIVATE"),
config.get("CONFIG", "CB_PRO_PASSPHRASE")
)
public_client = cbpro.PublicClient()
binance_client = Client(config.get("CONFIG", "BINANCE_PUBLIC_KEY"),
config.get("CONFIG", "BINANCE_PRIVATE_KEY")
)
def getcoins():
print("Please enter the symbol of the coins you want to buy (if buying multiple, seperate them with commas):")
coins = input().upper()
coins = coins.replace(" ", "")
coins = coins.split(",")
for i in range(len(coins)):
# Check to see if coin can be bought on CoinbasePro
pair = coins[i] + "-" + config.get("CONFIG", "COINBASE_CURRENCY")
response = public_client.get_product_order_book(pair)
if "message" in response:
try:
binance_client.get_order_book(symbol=coins[i] + config.get("CONFIG", "BINANCE_CURRENCY"))
coins[i] += "-BINANCE"
except:
print(pair + " is an invalid trading pair for CoinbasePro & Binance, "
"please re-run the program and try again")
sys.exit()
else:
coins[i] += "-COINBASE"
getpurchaseamount(coins)
def getpurchaseamount(coins):
amount = []
# currency = config.get("CONFIG", "CURRENCY")
# print("How much do you want to spend? (Minimum amount per transaction is 10" + currency + ")")
for i in range(len(coins)):
market = coins[i].split("-")[1]
if market == "COINBASE":
currency = config.get("CONFIG", "COINBASE_CURRENCY")
print("How much do you want to spend? (Minimum amount per transaction is €10)")
else:
currency = config.get("CONFIG", "BINANCE_CURRENCY")
print("How much do you want to spend? (Minimum amount per transaction is €10)")
next_coin = False
while not next_coin:
spend = input(coins[i] + ":\t" + currency)
try:
spend = float(spend)
amount.append(str(spend))
if spend < 10:
print("Has to be more than 10, Try again!")
else:
next_coin = True
except ValueError:
print("Please only enter digits, Try again!")
with open("settings/coins.txt", "w") as file:
for i in range(len(coins)):
value_to_write = coins[i],"-", amount[i], "\n"
file.write(value_to_write)
def buycrypto(specs):
if specs["market"] == "Coinbase":
order = auth_client.buy(order_type="market",
product_id=specs["coin"] + "-" + config.get("CONFIG", "CURRENCY"),
funds=specs["amount"]) # Amount you want to buy
order_id = order["id"] # Uses the order ID to get specific details of transaction
time.sleep(10) # Wait 10 seconds for CB to catch up and log all the transactions
else:
try:
coin_price = float(binance_client.get_symbol_ticker(
symbol=specs["coin"] + config.get("CONFIG", "BINANCE_CURRENCY")
)["price"])
quantity_floor = round_decimals_down((float(specs["amount"]) / coin_price), 6)
# Binance API won't let you buy in fractions if the coin costs less than $1
if quantity_floor > 1:
quantity_floor = math.floor(quantity_floor)
order = binance_client.create_order(
symbol=specs["coin"] + config.get("CONFIG", "BINANCE_CURRENCY"),
side=SIDE_BUY,
type=ORDER_TYPE_MARKET,
quantity=quantity_floor,
)
order_id = order["orderId"]
except BinanceAPIException as e:
print(e.status_code)
print(e.message)
except BinanceOrderException as e:
print(e)
return order_id
def round_decimals_down(number:float, decimals:int=2):
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals < 0:
raise ValueError("decimal places has to be 0 or more")
elif decimals == 0:
return math.floor(number)
factor = 10 ** decimals
return math.floor(number * factor) / factor
def writetolog(dets, market):
if market == "COINBASE":
try:
msg = dets["product_id"], " - Date & Time: ", dets["created_at"], " - Gross Spent: ", dets["specified_funds"],
" - Fees: ", dets["fill_fees"], " - Net Spent: ", dets["funds"],
" - Amount Bought: ", dets["filled_size"]
except:
msg = "Error getting order details from Coinbase" # Don't want to break the whole program so it prints this instead
else:
created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(dets["time"]))
try:
msg = dets["symbol"], " - Date & Time: ", created_at, " - Gross Spent: ", dets["price"],
" - Amount Bought: ", dets["origQty"]
except:
msg = "Error getting order details from Binance" # Don't want to break the whole program so it prints this instead
logger.info(msg)
def sendemail(order_details, market):
smtp = smtplib.SMTP(config.get('CONFIG', 'SMTP_SERVER'), int(config.get('CONFIG', 'SMTP_PORT')))
smtp.ehlo()
smtp.starttls()
smtp.login(config.get('CONFIG', 'SMTP_SENDING_EMAIL'), config.get('CONFIG', 'SMTP_PASSWORD'))
if market == "COINBASE":
try:
text = order_details["product_id"], " - You got ", order_details["filled_size"], " for ",
config.get("CONFIG", "CURRENCY"), float(order_details["specified_funds"])
except:
text = "You bought some crypto but for some reason the messaging part of it fucked up!"
else:
try:
text = order_details["symbol"], " - You got ", order_details["executedQty"], " for ",
config.get("CONFIG", "CURRENCY"), float(order_details["price"])
except:
text = "You bought some crypto but for some reason the messaging part of it fucked up!"
subject = "DCA Weekly Notification"
msg = MIMEMultipart()
msg["Subject"] = subject
msg.attach(MIMEText(text))
smtp.sendmail(
from_addr=config.get('CONFIG', 'SMTP_SENDING_EMAIL'),
to_addrs=config.get('CONFIG', 'SMTP_RECEIVING_EMAIL'),
msg=msg.as_string()
)
smtp.quit()
def main():
if not os.path.isfile("settings/coins.txt"):
getcoins()
order_ids = {}
with open("settings/coins.txt", "r") as coins:
coin_and_amount = coins.read().splitlines()
for i in range(len(coin_and_amount)):
split = coin_and_amount[i].split("-")
specs = {"coin": split[0], "market": split[1], "amount": split[2]}
order_ids.update({
"market": specs["market"],
"order id": buycrypto(specs),
"coin paring": split[0],
})
for x in order_ids:
if x["market"] == "COINBASE":
# Uses the order ID to get specific details of transaction
dets = auth_client.get_order(x["order id"])
else:
dets = binance_client.get_order(symbol=x["coin pairing"], orderId=x["order id"])
writetolog(dets, x["market"])
sendemail(dets, x["market"])
if __name__ == '__main__':
main()
|
jinjamator/jinjamator-plugin-content-ssh | jinjamator/plugins/content/ssh/__init__.py | # Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netmiko import ConnectHandler, ssh_exception
import textfsm
import os
from jinjamator.plugins.content.fsm import process
import logging
log = logging.getLogger()
from netmiko import log as netmiko_log
try:
from textfsm import clitable
except ImportError:
import clitable
def connect(**kwargs):
"""Run a command via SSH and return the text output.
:param command: The command that should be run.
:type command: ``str``
:raises Exception: If the command cannot be run on the remote device and best_effort is not set within the task.
:return: The text output of the command.
:rtype: ``str``
:Keyword Arguments:
* *ssh_username* (``str``), ``optional``, ``jinjamator enforced`` --
Username for the SSH connection. If not set in task configuration or via keyword argument jinjamator asks the user to input the data.
* *ssh_password* (``str``), ``optional``, ``jinjamator enforced`` --
Password for the SSH connection. If not set in task configuration or via keyword argument jinjamator asks the user to input the data.
* *ssh_host* (``str``), ``optional``, ``jinjamator enforced`` --
Target hostname or IP address for the SSH connection. If not set in task configuration or via keyword argument jinjamator asks the user to input the data.
* *ssh_port* (``int``), ``optional`` --
SSH TCP port, defaults to 22
* *ssh_device_type* (``str``), ``optional`` --
Netmiko device type, defaults to "cisco_nxos".
Currently supported device_types can be found here: https://github.com/ktbyers/netmiko/tree/develop/netmiko
* *fast_cli* (``bool``), ``optional`` --
Use Netmiko fast_cli mode, defaults to False
* *verbose* (``bool``), ``optional`` --
Set Netmiko to debug mode, defaults to False
:Examples:
If one of the following conditions are met,
* *ssh_username*, *ssh_password*, *ssh_host* is specified via command line parameter in CLI Mode e.g -m 'ssh_username':'admin'
* Any of *ssh_username*, *ssh_password*, *ssh_host* is not specified via command line parameter in CLI Mode and the user enters the data correctly via CLI.
* The task is run via Daemon mode and ssh_username, ssh_password, ssh_host are defined in the task defaults.yaml, environment site defaults.yaml.
* The task is run via Daemon mode and ssh_username, ssh_password, ssh_host are entered correctly in the generated webform.
the raw output of the command show inventory from a cisco nxos box is returned by the tasklet .
jinja2 tasklet:
.. code-block:: jinja
{{ssh.run('show inventory')}}
python tasklet:
.. code-block:: python
return ssh.run('show inventory')
To set the arguments directly on call of the function. The example will ask for the password and connects to 1.2.3.4 port 22 and runs the command "show inventory"
jinja2 tasklet:
.. code-block:: jinja
{{ssh.run('show inventory',ssh_username=admin,ssh_host='1.2.3.4')}}
python tasklet:
.. code-block:: python
return ssh.run('show inventory',ssh_username='admin','ssh_host'='1.2.3.4')
"""
defaults = {
"port": 22,
"device_type": "cisco_nxos",
"fast_cli": False,
"verbose": False,
}
cfg = {}
opts = {}
for var_name in ["host", "username", "password", "port", "device_type", "verbose"]:
cfg[var_name] = kwargs.get(
f"ssh_{var_name}",
kwargs.get(
var_name,
_jinjamator.configuration.get(
f"ssh_{var_name}", defaults.get(var_name)
),
),
)
if cfg[var_name] == None:
cfg[var_name] = _jinjamator.handle_undefined_var(f"ssh_{var_name}")
try:
del kwargs[var_name]
except KeyError:
pass
for var_name in kwargs:
opts[var_name] = kwargs[var_name]
try:
if cfg["verbose"]:
netmiko_log.setLevel(logging.DEBUG)
else:
netmiko_log.setLevel(logging.ERROR)
connection = ConnectHandler(**cfg)
return connection
except ssh_exception.NetMikoAuthenticationException as e:
if _jinjamator.configuration["best_effort"]:
_jinjamator._log.error(
f'Unable to run command {command} on platform {cfg["device_type"]} - {str(e)}'
)
return ""
else:
raise Exception(
f'Unable to run command {command} on platform {cfg["device_type"]} - {str(e)}'
)
def query(command, connection=None, **kwargs):
device_type = (
kwargs.get("device_type")
or _jinjamator.configuration.get(f"ssh_device_type")
or _jinjamator.handle_undefined_var("ssh_device_type")
)
kwargs["device_type"] = device_type
config = run(command, connection, **kwargs)
return process(device_type, command, config)
def disconnect(connection):
connection.cleanup()
def run(command, connection=None, **kwargs):
auto_disconnect = False
if not connection:
connection = connect(**kwargs)
auto_disconnect = True
opts = {}
for var_name in ["host", "username", "password", "port", "device_type"]:
try:
del kwargs[var_name]
except KeyError:
pass
for var_name in kwargs:
opts[var_name] = kwargs[var_name]
retval = connection.send_command_expect(command, max_loops=10000, **opts)
if auto_disconnect:
disconnect(connection)
# netmiko_log.setLevel(backup_log_level)
return retval
|
yukitheclown/io_scene_yuk | __init__.py | <reponame>yukitheclown/io_scene_yuk
bl_info = {
"name": "Yuk2 format",
"author": "yukizini",
"version": (0, 0, 0),
"blender": (2, 74, 0),
"location": "File > Export, Scene properties",
"description": "Export yuk2",
"wiki_url": "http://github.com/yukizini",
"category": "Export",
}
if "bpy" in locals():
import imp
if "export_yuk2" in locals():
imp.reload(export_yuk2)
import bpy
from bpy.props import (BoolProperty)
from bpy_extras.io_utils import ( ExportHelper, path_reference_mode, axis_conversion )
class ExportYUK2(bpy.types.Operator, ExportHelper):
"""Save a yuk2 File"""
bl_idname = "export_scene.yuk2"
bl_label = 'Export yuk2'
bl_options = {'PRESET'}
filename_ext = ".yuk2"
exportAnim = BoolProperty(name="Export selected objects active animation?", default = True)
exportMesh = BoolProperty(name="Export selectced mesh?", default = True)
def execute(self, context):
from . import export_yuk2
from mathutils import Matrix
keywords = self.as_keywords(ignore=("filename_ext", "check_existing" ))
keywords["globalMatrix"] = axis_conversion(to_forward='-Z', to_up='Y').to_4x4()
return export_yuk2.Export(self, context, **keywords)
def menu_func_export(self, context):
self.layout.operator(ExportYUK2.bl_idname, "Export yuk2 (.yuk2)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
|
yukitheclown/io_scene_yuk | export_yuk2.py | <reponame>yukitheclown/io_scene_yuk
import os
import struct
import shutil
import zlib
import bpy
import mathutils
import bpy_extras.io_utils
def GetMesh(obj, context, GLOBAL_MATRIX):
mesh = None
try:
mesh = obj.to_mesh(context.scene, False, 'PREVIEW', calc_tessface=False)
except RuntimeError:
return
if GLOBAL_MATRIX:
mesh.transform(GLOBAL_MATRIX * obj.matrix_world)
else:
mesh.transform(obj.matrix_world)
import bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(mesh)
bm.free()
return mesh
def WriteTexture(out, image):
data = bytearray(image.size[0] * image.size[1] * image.channels)
out += struct.pack("<iii", image.size[0], image.size[1], image.channels)
# pixels = list(image.pixels)
# for k in range(0, image.size[1] * image.size[0]):
# index = k*image.channels
# for m in range(0, image.channels):
# data[index+m] = int(pixels[index+m] * 0xFF) & 0xFF
# out += zlib.compress(data, 9)[2:-4]
out += bytes(map(int, [0xFF * i for i in image.pixels]))
# out += data
# out += zlib.compress(bytes(map(int, image.pixels)), 9)[2:-4]
def WriteFile(out, context, bones, GLOBAL_MATRIX=None):
selected = context.selected_objects[0]
mesh = GetMesh(selected, context, GLOBAL_MATRIX)
uvLayer = mesh.uv_layers.active .data[:]
out += struct.pack("<i", len(mesh.materials))
images = []
topImageIndex = 0
imagesMap = {}
for mat in mesh.materials:
texIndex = None
normTexIndex = None
for mtex in mat.texture_slots:
if mtex and mtex.texture and mtex.texture.type == 'IMAGE' and len(mtex.texture.image.pixels):
image = mtex.texture.image
val = imagesMap.get(image.filepath)
if val is None:
topImageIndex += 1
imagesMap[image.filepath] = topImageIndex
val = topImageIndex
images.append(image)
if mtex.use_map_normal and normTexIndex == None:
normTexIndex = val
elif texIndex == None:
texIndex = val
if normTexIndex and texIndex:
break
out += struct.pack("<i", mat.subsurface_scattering.use)
out += struct.pack("<i", texIndex or 0)
out += struct.pack("<i", normTexIndex or 0)
color = (mat.diffuse_intensity * mat.diffuse_color)
out += struct.pack("<2f", ((mat.specular_hardness-1) / 510) * 100, mat.ambient)
out += struct.pack("<4f", color[0], color[1], color[2], 1)
out += struct.pack("<4f", mat.specular_color[0], mat.specular_color[1], mat.specular_color[2], mat.specular_intensity)
out += struct.pack("<i", len(images))
for image in images:
WriteTexture(out, image)
faces = [(index, face) for index, face in enumerate(mesh.polygons)]
faces.sort(key=lambda face: face[1].material_index)
materialElements = [[] for i in range(0, len(mesh.materials))]
mesh.calc_normals_split()
verts = mesh.vertices
loops = mesh.loops
vertMap = {}
uniquePacked = []
topElementIndex = 0
for fIndex, face in faces:
fV = [(vI, vIndex, lIndex) for vI, (vIndex, lIndex) in enumerate(zip(face.vertices, face.loop_indices))]
v1 = mathutils.Vector(verts[fV[0][1]].co)
v2 = mathutils.Vector(verts[fV[1][1]].co)
v3 = mathutils.Vector(verts[fV[2][1]].co)
uv1 = mathutils.Vector(uvLayer[fV[0][2]].uv)
uv2 = mathutils.Vector(uvLayer[fV[1][2]].uv)
uv3 = mathutils.Vector(uvLayer[fV[2][2]].uv)
edge1 = v2 - v1
edge2 = v3 - v1
uvEdge1 = uv2 - uv1
uvEdge2 = uv3 - uv1
bitangent = mathutils.Vector()
tangent = mathutils.Vector()
mul = uvEdge1.x * uvEdge2.y - uvEdge2.x * uvEdge1.y
if mul != 0:
mul = 1.0 / mul
tangent = mul * ((edge1 * uvEdge2.y) - (edge2 * uvEdge1.y))
bitangent = mul * ((edge1 * -uvEdge2.x) + (edge2 * uvEdge1.x))
tangent.normalize()
bitangent.normalize()
for vI, v, li in fV:
normal = mathutils.Vector(loops[li].normal)
sign = 1
if normal.cross(tangent).dot(bitangent) > 0.0:
sign = -1
ctangent = tangent - tangent.dot(normal) * normal
ctangent.normalize()
packed = (v, (uvLayer[li].uv[0], uvLayer[li].uv[1]),
(normal.x, normal.y, normal.z),
(ctangent.x, ctangent.y, ctangent.z, sign))
val = vertMap.get(packed)
if val is None:
vertMap[packed] = val = topElementIndex
uniquePacked.append(packed)
topElementIndex += 1
if materialElements[face.material_index] is None:
materialElements[face.material_index] = []
materialElements[face.material_index].append(val)
out += struct.pack("<i", len(uniquePacked))
data = bytearray()
for packed in uniquePacked:
data += struct.pack("<3f", verts[packed[0]].co[0], verts[packed[0]].co[1], verts[packed[0]].co[2])
data += struct.pack("<2f", packed[1][0], packed[1][1])
data += struct.pack("<3f", packed[2][0], packed[2][1], packed[2][2])
data += struct.pack("<4f", packed[3][0], packed[3][1], packed[3][2], packed[3][3])
if bones:
weights = []
if len(verts[packed[0]].groups) > 0:
for group in verts[packed[0]].groups:
if group.weight > 0:
index = selected.vertex_groups[group.group].index
weights.append([group.weight, index])
if len(weights) < 4:
for j in range(len(weights), 4):
weights.append([0,-1])
weights.sort(key=lambda tup: tup[0], reverse=True)
data += struct.pack("<4f", weights[0][0], weights[1][0], weights[2][0], weights[3][0])
data += struct.pack("<4f", weights[0][1], weights[1][1], weights[2][1], weights[3][1])
# out += zlib.compress(data, 9)[2:-4]
out += data
for i in range(0, len(materialElements)):
if materialElements[i]:
out += struct.pack("<i", len(materialElements[i]))
else:
out += struct.pack("<i", 0)
for i in range(0, len(materialElements)):
if materialElements[i]:
for element in materialElements[i]:
out += struct.pack("<i", element)
return mesh
def WriteAnimation(out, action, armatureObj, bones, GLOBAL_MATRIX=None):
endFrame = action.frame_range[1]
boneKeyframes = [None] * len(armatureObj.pose.bones)
for group in action.groups:
index = bones.get(group.name)
if index is None:
continue
if boneKeyframes[index] is None:
boneKeyframes[index] = []
setFrames = [False] * int(endFrame+1)
for channel in group.channels:
for keyframe in channel.keyframe_points:
if int(keyframe.co[0]) <= endFrame:
setFrames[int(keyframe.co[0])] = True
bone = armatureObj.pose.bones[index]
for i in range(0, len(setFrames)):
if setFrames[i] is False:
continue
rot = list(bone.rotation_quaternion)
pos = list(bone.location)
for channel in group.channels:
data = armatureObj.path_resolve(channel.data_path)
if data == bone.rotation_quaternion:
rot[channel.array_index] = channel.evaluate(i)
elif data == bone.location:
pos[channel.array_index] = channel.evaluate(i)
boneKeyframes[index].append((i, pos[0], pos[1], pos[2], rot[1], rot[2], rot[3], rot[0]))
out += struct.pack("<i", len(boneKeyframes))
for j in range(0, len(boneKeyframes)):
if boneKeyframes[j] is None:
out += struct.pack("<i", 0)
continue;
out += struct.pack("<i", len(boneKeyframes[j]))
for k in boneKeyframes[j]:
out += struct.pack("<ifffffff", k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7])
def WriteSkeleton(out, context, selected, bones, mesh, GLOBAL_MATRIX=None):
armatureObj = selected.find_armature()
armatureMatrix = GLOBAL_MATRIX * armatureObj.matrix_world
invBindMatrices = [mathutils.Matrix() for i in range(0, len(selected.vertex_groups))]
relativeMatrices = [mathutils.Matrix() for i in range(0, len(selected.vertex_groups))]
cubes = [[float("inf"), float("inf"), float("inf"),
-float("inf"), -float("inf"), -float("inf")] for i in range(0, len(selected.vertex_groups))]
for poseBone in armatureObj.pose.bones:
armatureBone = poseBone.bone
index = bones.get(armatureBone.name)
if armatureBone.parent is None:
relativeMatrices[index] = armatureMatrix * armatureBone.matrix_local
else:
parentMatrix = armatureBone.parent.matrix_local
relativeMatrices[index] = armatureBone.matrix_local
relativeMatrices[index] = parentMatrix.inverted() * relativeMatrices[index]
invBindMatrices[index] = (armatureMatrix * armatureBone.matrix_local).inverted()
for vert in mesh.vertices:
if len(vert.groups) == 0:
continue
invMatrix = None
mostInfluence = None
for group in vert.groups:
if group.weight > 0:
index = selected.vertex_groups[group.group].index
if invMatrix is None:
invMatrix = invBindMatrices[index] * group.weight
else:
invMatrix += invBindMatrices[index] * group.weight
if mostInfluence is None or mostInfluence[0] < group.weight:
mostInfluence = (group.weight, index)
if invMatrix is None:
continue
vertex = invMatrix * mathutils.Vector(vert.co).to_4d()
vertex /= vertex.w
cube = cubes[mostInfluence[1]]
if vertex.x < cube[0]:
cube[0] = vertex.x
if vertex.y < cube[1]:
cube[1] = vertex.y
if vertex.z < cube[2]:
cube[2] = vertex.z
if vertex.x > cube[3]:
cube[3] = vertex.x
if vertex.y > cube[4]:
cube[4] = vertex.y
if vertex.z > cube[5]:
cube[5] = vertex.z
out += struct.pack("<i", len(armatureObj.pose.bones))
for poseBone in armatureObj.pose.bones:
armatureBone = poseBone.bone
index = bones.get(armatureBone.name)
parentIndex = -1
if armatureBone.parent:
parentIndex = bones.get(armatureBone.parent.name)
pos, rot, scale = relativeMatrices[index].decompose()
cube = cubes[bones.get(armatureBone.name)]
cube[3] -= cube[0]
cube[4] -= cube[1]
cube[5] -= cube[2]
out += struct.pack("<iifffffff", parentIndex, index, pos.x, pos.y, pos.z, rot.x, rot.y, rot.z, rot.w)
out += struct.pack("<ffffff", cube[0], cube[1], cube[2], cube[3], cube[4], cube[5])
def Export(operator, context, filepath="", globalMatrix=None, exportAnim=False, exportMesh=False):
baseName, ext = os.path.splitext(filepath)
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
selected = context.selected_objects[0]
armatureObj = selected.find_armature()
bones = None
if armatureObj:
bones = {}
poseBones = armatureObj.pose.bones
for index in range(0, len(poseBones)):
bones[poseBones[index].bone.name] = index
print(poseBones[index].bone.name, index)
if exportMesh:
out = bytearray()
mesh = WriteFile(out, context, bones, globalMatrix)
if armatureObj:
WriteSkeleton(out, context, selected, bones, mesh, globalMatrix)
fp = open(baseName + ".yuk2", "wb")
fp.write(out)
fp.close()
if exportAnim and armatureObj:
out = bytearray()
action = armatureObj.animation_data.action
fp = open(baseName + "_" + action.name + ".anm", "wb")
WriteAnimation(out, action, armatureObj, bones, globalMatrix)
fp.write(out)
fp.close()
return {'FINISHED'}
|
AlvarDev/Cloud-SQL-Connection | main.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/cloud-sql/mysql/sqlalchemy/main.py
"""
from flask import Flask, jsonify
import sqlalchemy
import google.auth
from google.cloud import secretmanager
_, PROJECT_ID = google.auth.default()
app = Flask(__name__)
def init_connection_engine():
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version.
db_user_secret = f"projects/{PROJECT_ID}/secrets/db_user_secret/versions/latest"
db_password_secret = f"projects/{PROJECT_ID}/secrets/db_password_secret/versions/latest"
db_name_secret = f"projects/{PROJECT_ID}/secrets/db_name_secret/versions/latest"
cloud_sql_conn_name_secret = f"projects/{PROJECT_ID}/secrets/cloud_sql_connection_name_secret/versions/latest"
# Access the secret version.
db_user_response = client.access_secret_version(request={"name": db_user_secret})
db_password_response = client.access_secret_version(request={"name": db_password_secret})
db_name_response = client.access_secret_version(request={"name": db_name_secret})
cloud_sql_conn_name_response = client.access_secret_version(request={"name": cloud_sql_conn_name_secret})
db_user = db_user_response.payload.data.decode("UTF-8")
db_pass = db_password_response.payload.data.decode("UTF-8")
db_name = db_name_response.payload.data.decode("UTF-8")
db_socket_dir = '/cloudsql'
cloud_sql_connection_name = cloud_sql_conn_name_response.payload.data.decode("UTF-8")
db_config = {
"pool_size": 5,
"max_overflow": 2,
"pool_timeout": 30, # 30 seconds
"pool_recycle": 1800, # 30 minutes
}
pool = sqlalchemy.create_engine(
sqlalchemy.engine.url.URL(
drivername="mysql+pymysql",
username=db_user,
password=<PASSWORD>,
database=db_name,
query={
"unix_socket": "{}/{}".format(
db_socket_dir,
cloud_sql_connection_name)
}
),
**db_config
)
return pool
@app.before_first_request
def create_connection():
global db
db = init_connection_engine()
@app.route("/", methods=['GET'])
def get_pets():
"""
This method returns a list of pets from DB on Cloud SQL
"""
pets = []
with db.connect() as conn:
pets_result = conn.execute("SELECT id, name from pets;").fetchall()
for row in pets_result:
pets.append({"id": row[0], "name": row[1]})
# Response
return jsonify(pets), 200
|
gguilherme42/conceitos_de_LP | cap_2/exercicios_de_programacao/e003/multiplica_matriz_com_laco_logico.py | <gh_stars>0
class Matrix():
def __init__(self, matrix: list[list[int]]) -> None:
self.columns = len(matrix[0]) if matrix else 0
self.lines = len(matrix)
self.elements = matrix
matrix_A = Matrix([[2, 3, 1],
[-1, 0, 2]])
matrix_B = Matrix([ [1,-2],
[0,5],
[4, 1],])
def multiply_matrix(a: Matrix, b: Matrix) -> Matrix:
result =[]
if a.columns == b.lines:
i = 0
while i < a.lines:
new_elements = []
p = 0
while p < b.columns:
product = 0
j = 0
while j < a.columns:
product += a.elements[i][j] * b.elements[j][p]
j += 1
new_elements.append(product)
p += 1
i += 1
result.append(new_elements)
return Matrix(result)
matrix_to_assert = multiply_matrix(matrix_A, matrix_B)
assert matrix_to_assert.elements == Matrix([[6,12], [7,4]]).elements
print(matrix_to_assert.elements)
assert f"{matrix_to_assert.columns}x{matrix_to_assert.lines}" == "2x2" |
gguilherme42/conceitos_de_LP | cap_2/exercicios_de_programacao/e002/quicksort.py | from random import randint
import quicksort_recursivo
def partition(input_list: list, start: int, end: int) -> int:
pivot_element = input_list[end]
i = start
for j in range(start, end):
if input_list[j] <= pivot_element:
input_list[j], input_list[i] = input_list[i], input_list[j]
i += 1
input_list[i], input_list[end] = input_list[end], input_list[i]
return i
def quicksort(input_list: list, start: int=0, end: int=None):
test_list = input_list[:]
if end is None:
end = len(test_list) - 1
pivot_left = partition(test_list, start, end)
pivot_right = partition(test_list, start, end)
while start < pivot_left:
pivot_left = partition(test_list, start, pivot_left - 1)
while pivot_right < end:
pivot_right = partition(test_list, pivot_right + 1, end)
return test_list
random_list = [randint(0,100), randint(0,100), randint(0,100), randint(0,100), randint(0,100)]
test = random_list[:]
print(f"Lista desordenada -> {random_list}")
print(f"Lista ordenada quicksort NÃO RECURSIVO: {quicksort(random_list)}")
quicksort_recursivo.quicksort(test)
print(f"Lista ordenada quicksort RECURSIVO: {test}")
assert test == quicksort(random_list)
|
gguilherme42/conceitos_de_LP | cap_2/exercicios_de_programacao/e001/exercicio_com_registros.py |
name = input("Student name: ").strip().lower()
age = int(input(f"{name}'s age: "))
average = float(input(f"{name}'s average: "))
level = input(f"{name}'s level: ")
student_data = [name, age, average, level]
print(student_data)
|
gguilherme42/conceitos_de_LP | cap_2/exercicios_de_programacao/e002/quicksort_recursivo.py | <reponame>gguilherme42/conceitos_de_LP
from random import randint
def partition(input_list: list, start: int, end: int) -> int:
pivot_element = input_list[end]
i = start
for j in range(start, end):
if input_list[j] <= pivot_element:
input_list[j], input_list[i] = input_list[i], input_list[j]
i += 1
input_list[i], input_list[end] = input_list[end], input_list[i]
return i
def quicksort(input_list: list, start: int=0, end: int=None):
if end is None:
end = len(input_list) - 1
if start < end:
pivot = partition(input_list, start, end)
quicksort(input_list, start, pivot - 1)
quicksort(input_list, pivot + 1, end)
#random_list = [randint(0,100), randint(0,100), randint(0,100), randint(0,100), randint(0,100)]
#
#print(random_list)
#quicksort(random_list)
#print(random_list)
|
ShawnTanzc/CG4002-Capstone-B15 | external_comms/ultra96/publish.py | # publish.py
import pika, os
import ssl
import json
# Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
url = 'INSERT URL'
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel() # start a channel
#channel.exchange_declare('test_exchange')
channel.queue_declare(queue='hello') # Declare a queue
# channel.queue_bind('test_queue', 'test_exchange', 'tests')
payload= [{
"player_type": 1,
"player_hp": 100,
"player_shield_hp": 0,
"is_shielded": False,
"player_shield_count": 3,
"player_grenade": 3,
"player_ammo": 6,
"player_kill_count": 0
},
{
"player_type": 2,
"player_hp": 100,
"player_shield_hp": 0,
"is_shielded": False,
"player_shield_count": 3,
"player_grenade": 3,
"player_ammo": 6,
"player_kill_count": 0
}]
channel.basic_publish(exchange='',
routing_key='hello',
body=json.dumps(payload))
print(" [x] Sent: {}".format(json.dumps((payload))))
channel.close()
connection.close() |
ShawnTanzc/CG4002-Capstone-B15 | external_comms/Others/eval_server/eval_server.py | # Changing the actions in self.actions should automatically change the script to function with the new number of moves.
# Developed and improved by past CG4002 TAs and students: <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
import os
import sys
import time
import traceback
import random
import socket
import threading
import base64
import tkinter as tk
from tkinter import ttk
from tkinter.constants import HORIZONTAL, VERTICAL
import pandas as pd
from Crypto.Cipher import AES
LOG_DIR = os.path.join(os.path.dirname(__file__), 'evaluation_logs')
MESSAGE_SIZE = 2
ACTIONS = ["shoot", "shield", "grenade", "reload"]
NUM_ACTION_REPEATS = 4
"""
Class that will generate randomized list of actions.
Actions will be displayed on the evaluation server UI for the
players to follow.
"""
class TurnGenerator():
def __init__(self):
self.cur_turn = 0
self.num_actions = len(ACTIONS)
# Generate random sequence of actions for Player 1
self.p1_actions = ACTIONS * NUM_ACTION_REPEATS
random.shuffle(self.p1_actions)
self.p1_actions.insert(0, "none")
self.p1_actions.append("logout")
print(self.p1_actions)
# Generate random sequence of actions for Player 2
self.p2_actions = ACTIONS * NUM_ACTION_REPEATS
random.shuffle(self.p2_actions)
self.p2_actions.insert(0, "none")
self.p2_actions.append("logout")
print(self.p2_actions)
"""
Called at the start of every turn to generate new values for player actions
"""
def iterate(self):
# Return True if we have finished going through all turns
if self.cur_turn + 1 >= len(self.p1_actions):
return True
self.cur_turn += 1
print(f"New P1 Action: {self.p1_actions[self.cur_turn]}")
print(f"New P2 Action: {self.p2_actions[self.cur_turn]}")
return False
"""
Return both player expected actions in tuple of tuples: (p1_action,p2_action)
"""
def get_correct_action(self):
return self.p1_actions[self.cur_turn], self.p2_actions[self.cur_turn]
class Server(threading.Thread):
def __init__(self, ip_addr, port_num, group_id):
super(Server, self).__init__()
# Setup logger
self.log_filename = 'group{}_logs.csv'.format(group_id)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
self.log_filepath = os.path.join(LOG_DIR, self.log_filename)
self.columns = [
'timestamp',
'p1_action', 'gt_p1_action', 'p2_action', 'gt_p2_action',
'response_time',
'is_p1_action_correct', 'is_p2_action_correct'
]
self.df = pd.DataFrame(columns=self.columns)
self.df = self.df.set_index('timestamp')
# Setup turns
self.turn_gen = TurnGenerator() # Initialize turn generator
self.action_set_time = 0 # Time turn instructions/actions were set by eval_server
self.turn_wait_timeout = 60 # Turn response timeout amount
self.turn_wait_timer = threading.Timer(self.turn_wait_timeout, self.setup_turn) # Timer object to keep track of turn response timeout
# Temporary storage for correct actions for each player
self.last_p1_action = None
self.last_p2_action = None
# Ultra96 Connection things
self.connection = None # Ultra96 connection object
self.has_no_response = False # Flag set when there was no response from Ulra96
self.logout = False # Flag for whether Ultra96 has triggered logout
# Create a TCP/IP socket and bind to port
self.shutdown = threading.Event()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (ip_addr, port_num)
print('starting up on %s port %s' % server_address)
self.socket.bind(server_address)
"""
Main loop of server. This function performs a blocking wait for data from the Ultra96 until the Ultra96
disconnected. The data it receives is decrypted and is written to the logger. Lastly, the function sends
the correct turn data (action) to the Ultra96 and sets up the next turn.
"""
def run(self):
# Listen for incoming connections
self.socket.listen(1)
self.client_address, self.secret_key = self.setup_connection() # Wait for secret key from Ultra96
while not self.shutdown.is_set(): # Stop waiting for data if we received a shutdown signal from Ultra96
data = self.connection.recv(1024) # Blocking wait for data from the Ultra96
if data:
try:
msg = data.decode("utf8") # Decode raw bytes to UTF-8
decrypted_message = self.decrypt_message(msg) # Decrypt message using secret key
# If the action we received from the Ultra96 was a logout
if decrypted_message['p1_action'] == "logout":
self.logout = True
self.stop()
# If no valid action was sent
elif len(decrypted_message['p1_action']) == 0:
pass
# Received normal response from Ultra96. We log this response.
else:
self.has_no_response = False
# Store last action data for sending back to Ultra96 since self.setup_turn() overwrites data
correct_p1_action = self.last_p1_action
correct_p2_action = self.last_p2_action
self.write_turn_to_logger(
decrypted_message['p1_action'],
correct_p1_action,
decrypted_message['p2_action'],
correct_p2_action
)
print(f"Received: P1: {decrypted_message['p1_action']}, " + \
f"P2: {decrypted_message['p2_action']}")
print(f"Expected: P1: {correct_p1_action}, " + \
f"P2: {correct_p2_action}")
self.setup_turn() # Setup new turn to get new grid data
self.send_update(correct_p1_action, correct_p2_action) # Send last action and new grid data to Ultra96
except Exception as e:
traceback.print_exc()
else:
print('no more data from', self.client_address)
self.stop()
"""
This function waits for a connection from an Ultra96 and then reads the encryption key for the Ultra96's
messages from STDIN. Returns the encryption key and the Ultra96's port and IP address.
"""
def setup_connection(self):
# print("No actions for 60 seconds to give time to connect")
# self.timer = threading.Timer(self.timeout, self.set_next_action)
# self.timer.start()
# Wait for a connection
print('Waiting for a connection')
self.connection, client_address = self.socket.accept()
print("Enter the secret key: ")
secret_key = sys.stdin.readline().strip()
print('connection from', client_address)
if len(secret_key) == 16 or len(secret_key) == 24 or len(secret_key) == 32:
p1_action, p2_action = self.turn_gen.get_correct_action()
self.send_update(p1_action, p2_action) # Send starting data to Ultra96
self.setup_turn()
else:
print("AES key must be either 16, 24, or 32 bytes long")
self.stop()
return client_address, secret_key
"""
This function executes every turn to set the actions for the 2 players.
It first cancels the turn wait timer since the previous turn has ended and checks if we
received a response from the Ultra96 in the previous turn.
Next, it uses the TurnGenerator object to generate new actions and restarts
the turn wait timer.
"""
def setup_turn(self):
self.turn_wait_timer.cancel()
if self.has_no_response: # If no response was sent by Ultra96
self.write_turn_to_logger("None", "None", "None", "None", "None", "None", "None", "None")
print("TURN TIMEOUT")
p1_action, p2_action = self.turn_gen.get_correct_action()
self.send_update(p1_action, p2_action) # Send turn state update even at timeout
# Generate new actions using the TurnGenerator object
# These generated values are read directly by Tkinter.
finished = self.turn_gen.iterate()
# Check if no more turns left
if finished:
self.logout = True
self.stop()
# Get correct expected actions for checking once received Ultra96 response
self.last_p1_action, self.last_p2_action = self.turn_gen.get_correct_action()
# Used to calculate time taken to get response from Ultra96
self.action_set_time = time.time()
# Restart turn wait timer
self.turn_wait_timer = threading.Timer(self.turn_wait_timeout, self.setup_turn)
self.has_no_response = True
self.turn_wait_timer.start()
"""
This function decrypts the response message received from the Ultra96 using the secret encryption key
that was entered in this script during initial connection by the Ultra96.
It returns an dictionary containing the action detected by the Ultra96.
"""
def decrypt_message(self, cipher_text):
decoded_message = base64.b64decode(cipher_text) # Decode message from base64 to bytes
iv = decoded_message[:16] # Get IV value
secret_key = bytes(str(self.secret_key), encoding="utf8") # Convert secret key to bytes
cipher = AES.new(secret_key, AES.MODE_CBC, iv) # Create new AES cipher object
decrypted_message = cipher.decrypt(decoded_message[16:]).strip() # Perform decryption
decrypted_message = decrypted_message.decode('utf8') # Decode bytes into utf-8
decrypted_message = decrypted_message[decrypted_message.find('#'):] # Trim to start of response string
decrypted_message = bytes(decrypted_message[1:], 'utf8').decode('utf8') # Trim starting # character
messages = decrypted_message.split('|') # Split parts of message by | token
return {
'p1_action': messages[0] , 'p2_action': messages[1]
}
"""
Send last expected action to Ultra96.
"""
def send_update(self, correct_p1_action: str, correct_p2_action: str):
# Pack send data into a single string
send_str = f"{correct_p1_action}|{correct_p2_action}"
# Send without any encryption (unlike receiving)
self.connection.sendall(send_str.encode())
"""
Closes server upon end of assessment, after Ultra96 disconnects/sends logout message.
"""
def stop(self):
self.connection.close()
self.shutdown.set()
self.turn_wait_timer.cancel()
print("bye bye")
sys.exit()
"""
Write data to logger.
"""
def write_turn_to_logger(self,
pred_p1_action: str, correct_p1_action: str,
pred_p2_action: str, correct_p2_action: str):
log_filepath = self.log_filepath
if not os.path.exists(log_filepath): # first write
with open(log_filepath, 'w') as f:
self.df.to_csv(f)
with open(log_filepath, 'a') as f:
data = dict()
data['timestamp'] = time.time()
data['p1_action'] = pred_p1_action
data['gt_p1_action'] = correct_p1_action
data['p2_action'] = pred_p2_action
data['gt_p2_action'] = correct_p2_action
data['response_time'] = data['timestamp'] - self.action_set_time
data['is_p1_action_correct'] = (pred_p1_action == correct_p1_action)
data['is_p2_action_correct'] = (pred_p2_action == correct_p2_action)
self.df = pd.DataFrame(data, index=[0])[self.columns].set_index('timestamp')
self.df.to_csv(f, header=False)
def main():
if len(sys.argv) != 4:
print('Invalid number of arguments')
print('python3 eval_server.py [IP address] [Port] [groupID]')
sys.exit()
ip_addr = sys.argv[1]
port_num = int(sys.argv[2])
group_id = sys.argv[3]
server = Server(ip_addr, port_num, group_id)
server.start()
# Initialize base window
display_window = tk.Tk()
display_window.title = "Evaluation Server"
main_frame = tk.Frame(
display_window
)
main_frame.pack(expand=True, fill='both')
# Finished turns
turn_text = tk.StringVar()
turn_label = tk.Label(main_frame, textvariable=turn_text, font=("times", 80))
turn_label.pack(fill='x')
# Player 1 column
p1_frame = tk.Frame(main_frame, bg="red")
p1_frame.pack(expand=True, fill='both', side='left')
# Player 1 title
p1_label = tk.Label(p1_frame, text="Player 1", font=('times', 100, 'bold'), bg="red")
p1_label.pack(ipady=20)
# Player 1 variable frame
p1_var_frame = tk.Frame(p1_frame, bg='#ffcccb')
p1_var_frame.pack(expand=True, fill='both')
# Player 1 action variable
p1_action_text = tk.StringVar()
p1_action_label = tk.Label(p1_var_frame, textvariable=p1_action_text, font=("times", 100), bg='#ffcccb')
p1_action_label.pack(expand=True)
# Player 2 variable separator
p1_var_sep = ttk.Separator(p1_var_frame, orient=HORIZONTAL)
p1_var_sep.pack(expand=True, fill='x')
# Player 2 column
p2_frame = tk.Frame(main_frame, bg="blue")
p2_frame.pack(expand=True, fill='both', side='right')
# Player 2 title
p2_label = tk.Label(p2_frame, text="Player 2", font=('times', 100, 'bold'), bg="blue")
p2_label.pack(ipady=20)
# Player 2 variable frame
p2_var_frame = tk.Frame(p2_frame, bg='#add8e6')
p2_var_frame.pack(expand=True, fill='both')
# Player 2 action variable
p2_action_text = tk.StringVar()
p2_action_label = tk.Label(p2_var_frame, textvariable=p2_action_text, font=("times", 100), bg='#add8e6')
p2_action_label.pack(expand=True)
# Player 2 variable separator
p2_var_sep = ttk.Separator(p2_var_frame, orient=HORIZONTAL)
p2_var_sep.pack(expand=True, fill='x')
# Player column separator
col_sep = ttk.Separator(main_frame, orient=VERTICAL)
col_sep.pack(expand=True, fill='y')
# Update window based on data in server
display_window.update()
while not server.shutdown.is_set():
turn_text.set(f"{server.turn_gen.cur_turn} / {len(server.turn_gen.p1_actions)-1}")
p1_action_text.set(server.turn_gen.get_correct_action()[0])
p2_action_text.set(server.turn_gen.get_correct_action()[1])
display_window.update()
time.sleep(0.2)
if __name__ == '__main__':
main()
|
ShawnTanzc/CG4002-Capstone-B15 | external_comms/ultra96/server_L2.py | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Waiting for connection....")
s.bind(('localhost', 15430))
s.listen()
connection, address = s.accept()
print("Connected to client")
with connection:
print(f"Connected by {address}")
while True:
buf = connection.recv(1024)
if not buf:
break
connection.sendall(buf)
print(buf)
|
ShawnTanzc/CG4002-Capstone-B15 | external_comms/ultra96/consume.py | <filename>external_comms/ultra96/consume.py
# consume.py
import pika, os
# import requests module
import requests
import json
# Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
url = 'INSERT URL'
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel() # start a channel
channel.queue_declare(queue='hello') # Declare a queue
def callback(ch, method, properties, body):
payload = json.loads(body)
print(" [x] Received: {} ".format(payload))
channel.basic_consume(queue='hello',
on_message_callback=callback,
auto_ack=True)
print(' [*] Waiting for messages:')
channel.start_consuming()
connection.close() |
ShawnTanzc/CG4002-Capstone-B15 | external_comms/laptop_1/client_L1.py | import paramiko
from paramiko import SSHClient, AutoAddPolicy
def sshCommand(hostname, port, username, password, command):
sshClient = paramiko.SSHClient()
sshClient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sshClient.load_system_host_keys()
sshClient.connect(hostname, port, username, password)
stdin, stdout, stderr = sshClient.exec_command(command)
print(stdout.read())
# if __name__ == '__main__':
# sshCommand('sunfire.comp.nus.edu.sg', 22, 'shawntan', 'stzc@S9817869D', 'ls')
# import socket
#
# clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# clientsocket.connect(('localhost', 15432))
# clientsocket.send('hello')
# import paramiko
# import sshtunnel
#
# from sshtunnel import SSHTunnelForwarder
#
# server = SSHTunnelForwarder(
# 'sunfire.comp.nus.edu.sg',
# ssh_username="USERNAME",
# ssh_password="PASSWORD",
# remote_bind_address=('127.0.0.1', 8080)
# )
#
# server.start()
#
# print(server.local_bind_port) # show assigned local port
# # work with `SECRET SERVICE` through `server.local_bind_port`.
# server.stop()
import paramiko
import sshtunnel
from sshtunnel import open_tunnel
from time import sleep
import socket
def open_ssh_tunneling_to_ultra96():
ssh_tunnel = open_tunnel(
('sunfire.comp.nus.edu.sg', 22),
ssh_username="shawntan",
ssh_password="<PASSWORD>",
remote_bind_address=('192.168.127.12', 22),
block_on_close=False)
ssh_tunnel.start()
# print(ssh_tunnel.local_bind_port)
print("Connection to ssh tunnel: OK...")
ultra96_tunnel = open_tunnel(
#('127.0.0.1',ssh_tunnel.local_bind_port),
ssh_address_or_host=('localhost', ssh_tunnel.local_bind_port),
remote_bind_address=('127.0.0.1', 15435),
ssh_username='xilinx',
ssh_password='<PASSWORD>',
local_bind_address=('127.0.0.1', 15435),
block_on_close=False
)
ultra96_tunnel.start()
print(ultra96_tunnel.local_bind_port)
print("Connection to ultra 96: OK...")
# sshCommand('localhost', ultra96_tunnel.local_bind_port, 'xilinx', 'apricot816', 'ls')
connect_socket()
def connect_socket():
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as clientsocket:
clientsocket.connect(('localhost', 15435))
print('Connection to server: OK...')
clientsocket.send(b'Hello Nice to Meet you')
data = clientsocket.recv(1024)
print(f'Received {data}')
except ConnectionRefusedError:
print("Unable to connect")
if __name__ == '__main__':
open_ssh_tunneling_to_ultra96()
# connect_socket() |
yjg30737/pyqt-capturer | setup.py | <gh_stars>0
from setuptools import setup, find_packages
setup(
name='pyqt-capturer',
version='0.2.0',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
package_data={'pyqt_capturer.ico': ['cam.svg', 'capture.svg', 'settings.svg', 'video.svg']},
description='PyQt application which can capture/record certain area of screen',
url='https://github.com/yjg30737/pyqt-capturer.git',
install_requires=[
'PyQt5>=5.15.1',
'mss',
'opencv-python',
'numpy',
'pyqt-timer-label>=0.0.1',
'pyqt-transparent-centralwidget-window>=0.0.1',
'pyqt-svg-button>=0.0.1',
'pyqt-color-button @ git+https://git@github.com/yjg30737/pyqt-color-button.git@main',
'pyqt-color-picker>=0.0.1',
'pyqt-find-path-widget @ git+https://git@github.com/yjg30737/pyqt-find-path-widget.git@main'
]
) |
yjg30737/pyqt-capturer | pyqt_capturer/settingsDialog.py | from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QDialog, QFormLayout, QWidget, QPushButton, QVBoxLayout, QHBoxLayout, QGroupBox
from PyQt5.QtCore import Qt, QSettings
from pyqt_color_button import ColorButton
from pyqt_color_picker import ColorPickerDialog
from pyqt_find_path_widget import FindPathWidget
class SettingsDialog(QDialog):
def __init__(self):
super().__init__()
self.__initVal()
self.__initUi()
def __initVal(self):
self.__settingsStruct = QSettings('capturer.ini', QSettings.IniFormat)
self.__frameColor = self.__settingsStruct.value('frameColor', '#FFFFFF')
self.__savePath = self.__settingsStruct.value('savePath', '.')
def __initUi(self):
self.setWindowTitle('Settings')
self.setWindowFlags(Qt.WindowCloseButtonHint)
frameColor = QColor(self.__frameColor)
r, g, b = frameColor.red(), frameColor.green(), frameColor.blue()
self.__frameColorBtn = ColorButton(r=r, g=g, b=b)
self.__frameColorBtn.clicked.connect(self.__showFrameColorDialog)
colorGrpBox = QGroupBox()
colorGrpBox.setTitle('Color')
lay = QFormLayout()
lay.addRow('Frame Color', self.__frameColorBtn)
colorGrpBox.setLayout(lay)
savePathGrpBox = QGroupBox()
savePathGrpBox.setTitle('Save Path')
self.__findPathWidget = FindPathWidget(self.__savePath)
self.__findPathWidget.setAsDirectory(True)
lay = QHBoxLayout()
lay.addWidget(self.__findPathWidget)
savePathGrpBox.setLayout(lay)
lay = QVBoxLayout()
lay.addWidget(colorGrpBox)
lay.addWidget(savePathGrpBox)
lay.setContentsMargins(0, 0, 0, 5)
topWidget = QWidget()
topWidget.setLayout(lay)
okBtn = QPushButton('OK')
okBtn.clicked.connect(self.accept)
closeBtn = QPushButton('Close')
closeBtn.clicked.connect(self.close)
lay = QHBoxLayout()
lay.addWidget(okBtn)
lay.addWidget(closeBtn)
lay.setContentsMargins(0, 0, 0, 0)
bottomWidget = QWidget()
bottomWidget.setLayout(lay)
lay = QVBoxLayout()
lay.addWidget(topWidget)
lay.addWidget(bottomWidget)
mainWidget = QWidget()
mainWidget.setLayout(lay)
self.setLayout(lay)
def __showFrameColorDialog(self):
dialog = ColorPickerDialog(self.__frameColor)
reply = dialog.exec()
if reply == QDialog.Accepted:
self.__frameColorBtn.setColor(dialog.getColor())
def getFrameColor(self):
return self.__frameColorBtn.getColor()
def getSavePath(self):
return self.__findPathWidget.getFileName() |
yjg30737/pyqt-capturer | pyqt_capturer/__init__.py | <reponame>yjg30737/pyqt-capturer
from .capturer import Capturer
from .settingsDialog import SettingsDialog |
yjg30737/pyqt-capturer | pyqt_capturer/capturer.py | import threading, time, os, cv2
import numpy as np
import mss
import mss.tools
from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtGui import QFont, QWindow
from PyQt5.QtWidgets import QMainWindow, QDialog
from pyqt_timer_label import TimerLabel
from pyqt_transparent_centralwidget_window import TransparentCentralWidgetWindow
from pyqt_svg_button import SvgButton
from pyqt_capturer.settingsDialog import SettingsDialog
class Capturer(TransparentCentralWidgetWindow):
def __init__(self):
main_window = QMainWindow()
super().__init__(main_window)
self.__initVal(main_window)
self.__initUi(main_window)
def __initVal(self, main_window):
self.__top = 0
self.__left = 0
self.__width = 0
self.__height = 0
self.__t = 0
self.__record_flag = False
self.__settingsStruct = QSettings('capturer.ini', QSettings.IniFormat)
self.__frameColor = self.__settingsStruct.value('frameColor', '#FFFFFF')
self.__savePath = self.__settingsStruct.value('savePath', os.getcwd())
def __initUi(self, main_window):
self.setMenuTitle(title='Capturer', icon_filename='ico/cam.svg')
self.setButtons()
cornerWidget = self.getCornerWidget()
lay = cornerWidget.layout()
recordBtn = SvgButton(self)
recordBtn.setShortcut('F6')
recordBtn.setIcon('ico/video.svg')
recordBtn.setCheckable(True)
recordBtn.toggled.connect(self.__recordToggled)
captureBtn = SvgButton(self)
captureBtn.setShortcut('F5')
captureBtn.setIcon('ico/capture.svg')
captureBtn.clicked.connect(self.__capture)
self.__timer_lbl = TimerLabel()
self.__timer_lbl.setFont(QFont('Arial', 12))
self.__timer_lbl.setTimerReverse(False)
settingsBtn = SvgButton(self)
settingsBtn.setIcon('ico/settings.svg')
settingsBtn.clicked.connect(self.__settings)
lay.insertWidget(0, settingsBtn)
lay.insertWidget(0, self.__timer_lbl)
lay.insertWidget(0, recordBtn)
lay.insertWidget(0, captureBtn)
lay.setAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.setFrameColor(self.__frameColor)
def __initScreenGeometry(self):
w = QWindow()
r = int(w.devicePixelRatio())
screen_g = self.window().geometry()
self.__top = (screen_g.top() + self.getInnerWidget().menuBar().height() + self._margin) * r
self.__left = (screen_g.left()+self._margin) * r
self.__width = (screen_g.width()-self._margin*2) * r
self.__height = (screen_g.height()-self.getInnerWidget().menuBar().height()-self._margin*2) * r
def __initRecordThread(self):
self.__t = threading.Thread(target=self.__recordThread,
args=(self.__top, self.__left, self.__width, self.__height, ))
def __recordThread(self, top, left, width, height):
with mss.mss() as sct:
# part of the screen
monitor = {'top': top, 'left': left, 'width': width, 'height': height}
# full screen
name = 'sample.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
desired_fps = 30.0
out = cv2.VideoWriter(name, fourcc, desired_fps,
(monitor['width'], monitor['height']))
last_time = 0
while self.__record_flag:
img = sct.grab(monitor)
# cv2.imshow('test', np.array(img))
if time.time() - last_time > 1. / desired_fps:
last_time = time.time()
destRGB = cv2.cvtColor(np.array(img), cv2.COLOR_BGRA2BGR)
out.write(destRGB)
# if cv2.waitKey(25) & 0xFF == ord('q'):
# cv2.destroyAllWindows()
# break
cv2.destroyAllWindows()
def __recordToggled(self, f):
self.__initScreenGeometry()
self.__initRecordThread()
self.__record_flag = f
if f:
self.__timer_lbl.start()
self.__t.start()
else:
self.__timer_lbl.stop()
def __capture(self):
self.__initScreenGeometry()
with mss.mss() as sct:
output = os.path.join(self.__savePath, 'sample.png')
monitor = {'top': self.__top, 'left': self.__left, 'width': self.__width, 'height': self.__height}
sct_img = sct.grab(monitor)
mss.tools.to_png(sct_img.rgb, sct_img.size, output=output)
def __settings(self):
dialog = SettingsDialog()
reply = dialog.exec()
if reply == QDialog.Accepted:
color = dialog.getFrameColor()
savePath = dialog.getSavePath()
self.__settingsStruct.setValue('frameColor', color.name())
self.__settingsStruct.setValue('savePath', savePath)
self.setFrameColor(color)
def event(self, e):
if e.type() == 17:
self.setFrameColor(self.__frameColor)
return super().event(e) |
valgur/GIMP-ML-Hub | plugins/deblur.py | <filename>plugins/deblur.py
#!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
from gimpfu import main
from _plugin_base import GimpPluginBase
class Deblur(GimpPluginBase):
def run(self):
self.model_file = 'DeblurGANv2.py'
result = self.predict(self.drawable)
self.create_layer(result)
plugin = Deblur()
plugin.register(
proc_name="deblur",
blurb="deblur",
help="Running deblurring.",
author="<NAME>",
copyright="",
date="2020",
label="deblur...",
imagetypes="RGB*"
)
main()
|
valgur/GIMP-ML-Hub | models/_util.py | <reponame>valgur/GIMP-ML-Hub
def to_rgb(image):
if len(image.shape) == 2:
image = image[:, :, None]
return image[:, :, (0, 0, 0)]
def apply_colormap(image, cmap='magma'):
# image must be in 0-1 range
import matplotlib.cm as cm
mapper = cm.ScalarMappable(norm=lambda x: x, cmap=cmap)
return mapper.to_rgba(image)[:, :, :3]
|
valgur/GIMP-ML-Hub | plugins/facegen.py | #!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
import gimpfu as gfu
from gimpfu import main
from _plugin_base import GimpPluginBase
class FaceGen(GimpPluginBase):
def run(self, img_layer, mask_layer, mask_m_layer):
self.model_file = 'MaskGAN.py'
result = self.predict(img_layer, mask_layer, mask_m_layer)
self.create_layer(result)
plugin = FaceGen()
plugin.register(
proc_name="facegen",
blurb="facegen",
help="Running face gen...",
author="<NAME>",
copyright="",
date="2020",
label="facegen...",
imagetypes="RGB*",
params=
[
(gfu.PF_LAYER, "drawinglayer", "Original Image:", None),
(gfu.PF_LAYER, "drawinglayer", "Original Mask:", None),
(gfu.PF_LAYER, "drawinglayer", "Modified Mask:", None),
]
)
main()
|
valgur/GIMP-ML-Hub | plugins/monocular_depth.py | <filename>plugins/monocular_depth.py
#!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
import gimpfu as gfu
from _plugin_base import GimpPluginBase
class DepthEstimation(GimpPluginBase):
model_options = [
("MiDaS (778 MB)", "MiDaS.py"),
("Monodepth2 (112 MB)", "Monodepth2.py"),
]
def run(self, model_idx, apply_colormap):
display_name, self.model_file = self.model_options[model_idx]
colormap = "magma" if apply_colormap else None
result = self.predict(self.drawable, colormap)
layer_name = self.drawable.name + " " + display_name.split()[0]
self.create_layer(result, name=layer_name)
plugin = DepthEstimation()
plugin.register(
proc_name="depth_estimation",
blurb="Monocular depth estimation",
help="Generate an inverse depth map based on deep learning.",
author="<NAME>",
copyright="",
date="2020",
label="Depth Estimation...",
imagetypes="RGB*",
params=[
(gfu.PF_OPTION, "Model", "Model", 0, [x[0] for x in DepthEstimation.model_options]),
(gfu.PF_TOGGLE, "Colormap", "Apply colormap", 0, True),
]
)
gfu.main()
|
valgur/GIMP-ML-Hub | plugins/super_resolution.py | #!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
from gimpfu import main, pdb
from _plugin_base import GimpPluginBase
class SuperResolution(GimpPluginBase):
def run(self):
self.model_file = 'SRResNet.py'
result = self.predict(self.drawable)
h, w, d = result.shape
self.gimp_img.resize(w, h, 0, 0)
self.create_layer(result)
plugin = SuperResolution()
plugin.register(
proc_name="super-resolution",
blurb="super-resolution",
help="Running super-resolution.",
author="<NAME>",
copyright="",
date="2020",
label="super-resolution...",
imagetypes="RGB*"
)
main()
|
valgur/GIMP-ML-Hub | models/Monodepth2.py | import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision import transforms
from _model_base import ModelBase, handle_alpha
from _util import to_rgb, apply_colormap
class Monodepth2(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = "valgur/monodepth2"
def load_model(self):
pretrained_model = "mono+stereo_640x192"
encoder = torch.hub.load(self.hub_repo, "ResnetEncoder", pretrained_model, map_location=self.device)
depth_decoder = torch.hub.load(self.hub_repo, "DepthDecoder", pretrained_model, map_location=self.device)
encoder.to(self.device)
depth_decoder.to(self.device)
return depth_decoder, encoder
@handle_alpha
@torch.no_grad()
def predict(self, input_image, colormap=None):
h, w, d = input_image.shape
assert d == 3, "Input image must be RGB"
# LOADING PRETRAINED MODEL
depth_decoder, encoder = self.model
input_image = Image.fromarray(input_image)
original_width, original_height = input_image.size
input_image = input_image.resize((encoder.feed_width, encoder.feed_height), Image.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
input_image = input_image.to(self.device)
# PREDICTION
features = encoder(input_image)
outputs = depth_decoder(features)
disp = outputs[("disp", 0)]
disp = torch.nn.functional.interpolate(
disp, (original_height, original_width), mode="bilinear", align_corners=False)
disp = disp.squeeze().cpu().numpy()
disp /= disp.max()
if colormap:
out = apply_colormap(disp, colormap)
else:
out = to_rgb(disp)
return (out * 255).astype(np.uint8)
model = Monodepth2()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
|
valgur/GIMP-ML-Hub | tests/test.py | #!/usr/bin/python
import traceback
from gimpfu import pdb, gimp
import gimpfu as gfu
infile = "test.jpg"
outfile = "out.png"
try:
image = pdb.gimp_file_load(infile, infile, run_mode=gfu.RUN_NONINTERACTIVE)
# w = 200
# h = round(w * image.height / float(image.width))
# pdb.gimp_image_scale(image, w, h)
pdb.python_fu_super_resolution(image, image.active_layer)
image.flatten()
pdb.gimp_file_save(image, image.active_layer, outfile, outfile, run_mode=gfu.RUN_NONINTERACTIVE)
except:
gimp.message("ERROR:\n" + traceback.format_exc())
pdb.gimp_quit(1)
|
valgur/GIMP-ML-Hub | models/MiDaS.py | import sys
import numpy as np
import torch
import torch.hub
from PIL import Image
from torchvision.transforms import Compose
from _model_base import ModelBase, handle_alpha
from _util import apply_colormap, to_rgb
# Simplified transforms from
# https://github.com/intel-isl/MiDaS/blob/master/models/transforms.py
class Resize:
def __init__(self, width, height, image_interpolation_method=Image.BICUBIC):
self.__width = width
self.__height = height
self.__multiple_of = 32
self.__image_interpolation_method = image_interpolation_method
def constrain_to_multiple_of(self, x):
return (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
def get_size(self, width, height):
scale_height = self.__height / height
scale_width = self.__width / width
# scale such that output size is upper bound
if scale_width < scale_height:
# fit width
scale_height = scale_width
else:
# fit height
scale_width = scale_height
new_height = self.constrain_to_multiple_of(scale_height * height)
new_width = self.constrain_to_multiple_of(scale_width * width)
return new_width, new_height
def __call__(self, image):
width, height = self.get_size(image.shape[1], image.shape[0])
resized = Image.fromarray(image).resize((width, height), self.__image_interpolation_method)
return np.array(resized)
class NormalizeImage:
def __init__(self, mean, std):
self.__mean = mean
self.__std = std
def __call__(self, image):
return (image - self.__mean) / self.__std
class PrepareForNet:
def __call__(self, image):
image = np.transpose(image, (2, 0, 1))
image = np.ascontiguousarray(image, dtype=np.float32)
tensor = torch.from_numpy(image)
return tensor.unsqueeze(0)
class MiDaS(ModelBase):
def __init__(self):
super().__init__()
self.hub_repo = "intel-isl/MiDaS"
def load_model(self):
model = torch.hub.load(self.hub_repo, "MiDaS", pretrained=True)
model.to(self.device)
model.eval()
return model
@staticmethod
def get_transform():
return Compose([
Resize(384, 384),
lambda x: x / 255.,
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
PrepareForNet()
])
@handle_alpha
@torch.no_grad()
def predict(self, input_image, colormap=None):
h, w, d = input_image.shape
assert d == 3, "Input image must be RGB"
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
transform = self.get_transform()
image_tensor = transform(input_image).to(self.device)
prediction = self.model.forward(image_tensor)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=(h, w),
mode="bicubic",
align_corners=False,
)
disp = prediction.squeeze().cpu().numpy()
disp /= disp.max()
if colormap:
out = apply_colormap(disp, colormap)
else:
out = to_rgb(disp)
return (out * 255).astype(np.uint8)
model = MiDaS()
if __name__ == '__main__':
rpc_url = sys.argv[1]
model.process_rpc(rpc_url)
|
valgur/GIMP-ML-Hub | plugins/colorize.py | <reponame>valgur/GIMP-ML-Hub
#!/usr/bin/env python2
import sys
from os.path import dirname, realpath
sys.path.append(realpath(dirname(__file__)))
from gimpfu import main, pdb
import gimpfu as gfu
from _plugin_base import GimpPluginBase
class Colorize(GimpPluginBase):
def run(self):
self.model_file = 'NeuralColorization.py'
result = self.predict(self.drawable)
if self.gimp_img.base_type != gfu.RGB:
pdb.gimp_image_convert_rgb(self.gimp_img)
self.create_layer(result)
plugin = Colorize()
plugin.register(
proc_name="colorize",
blurb="colorize",
help="Colorize grayscale images",
author="<NAME>",
copyright="",
date="2020",
label="colorize...",
imagetypes="RGB*, GRAY*"
)
main()
|
chaveza6870/tsu | tsu/env_map.py | import os
from pathlib import Path
import attr
import consolejs
import tsu
from . import user_utils
from . import consts
#@autowire(UserUtils)
class EnvMap:
_ENV_CLEAN_BASE = {"ANDROID_DATA": "/data", "ANDROID_ROOT": "/system"}
_ENV_CLEAN_BASE_COPY = ["EXTERNAL_STORAGE", "LANG", "TERM"]
_ENV_CLEAN_OTHER = {"HOME": "/", "PATH": "/system/bin:/system/xbin"}
def __init__(self, prepend=False, clean=False, usern="root"):
self.prependpath = prepend
self.cleanenv = clean
self.usern = usern
@property
def shell(self):
return self._shell
@shell.setter
def shell(self, shell):
self._shell = shell
@property
def c_uid(self):
return self._cuid
@c_uid.setter
def c_uid(self, c_uid):
self._cuid = c_uid
self.is_other_user = user_utils.is_other_user(self.usern, self._cuid)
def get_env(self):
if self.is_other_user:
return self.clean_other()
pass
if self.cleanenv:
return self.clean_root
pass
def get_shell(self):
console = consolejs.get_console(tsu)
root_shell = consts.SYS_SHELL
USER_SHELL = Path(Path.home(), ".termux/shell")
BASH_SHELL = Path(consts.TERMUX_PREFIX, "bin/bash")
shell = self.shell
# Others user cannot access Termux environment
if self.is_other_user:
shell = "system"
# The Android system shell.
if shell == "system":
root_shell = consts.SYS_SHELL
# Check if user has set a login shell
elif USER_SHELL.exists():
root_shell = str(USER_SHELL.resolve())
# Or at least installed bash
elif BASH_SHELL.exists():
root_shell = str(BASH_SHELL)
console.debug(r" {shell=} {self.is_other_user=} {root_shell=}")
return root_shell
@classmethod
def _merge_base(E):
env_b = E._ENV_CLEAN_BASE
env_bcp = {key: os.environ[key] for key in E._ENV_CLEAN_BASE_COPY}
return {**env_b, **env_bcp}
@property
def unclean_other(self):
env_copy = os.environ
env_copy["PATH"]
def add_to_path(self, env_path, prep_path):
front = self.prependpath
sep = os.pathsep
new_path = (
f"{prep_path}{sep}{env_path}" if front else f"{env_path}{sep}{prep_path}"
)
return new_path
def clean_other(self):
E = EnvMap
environ = E._merge_base()
return {**environ, **E._ENV_CLEAN_OTHER}
@property
def clean_root(self):
E = EnvMap
environ = E.__merge_base()
PREFIX = consts.TERMUX_PREFIX
PATH = self.add_to_path(
f"{PREFIX}/bin:${PREFIX}/bin/applets", consts.ANDROIDSYSTEM_PATHS
)
env_root = {
"HOME": "data/data/com.termux/files/home",
"PATH": PATH,
"PREFIX": f"{PREFIX}",
"TMPDIR": f"{PREFIX}/tmp",
}
environ = {**environ, **env_root}
return environ
|
chaveza6870/tsu | lint.py | import subprocess
args = [ 'black', 'tsu/', 'tests/' ]
subprocess.run(args)
|
chaveza6870/tsu | tests/test_env.py | <reponame>chaveza6870/tsu
import pytest
from unittest.mock import Mock, patch
from tsu.get_shell import GetShell
TERMUX_ID = 100972
class MockPath:
@staticmethod
def home():
return "/data/data/com.termux/files/home"
class TestTodos(object):
@classmethod
def setup_class(cls):
cls.mock_get_patcher = patch("project.services.requests.get")
cls.mock_get = cls.mock_get_patcher.start()
@classmethod
def teardown_class(cls):
cls.mock_get_patcher.stop()
def test_new_env(self):
shell = GetShell(None, 0, 100972).get()
#
assert shell == "/system/bin/sh"
|
chaveza6870/tsu | tsu/main.py | <gh_stars>1-10
# Copyright (c) 2019, <NAME> <<EMAIL>
# This software is licensed under the MIT Liscense.
# https://github.com/cswl/tsu/blob/v3.x/LICENSE-MIT
import os
import sys
import os
import pwd
from pathlib import Path, PurePath
from docopt import docopt
import consolejs
import tsu
from tsu import consts
from tsu.defs import magisk, losu, chsu
from tsu.exec import SuExec
from tsu.env_map import EnvMap
## Optimization strips docstring in build
USAGE = """
tsu A su interface wrapper for Termux
Usage:
tsu
tsu [ -s SHELL ] [-pe] [USER]
tsu --debug [ -s SHELL ] [-pel] [USER]
tsu -h | --help | --version
Options:
-s <shell> Use an alternate specified shell.
-l Start a login shell.
-p Prepend system binaries to PATH
-e Start with a fresh environment.
--debug Output debugging information to stderr.
-h --help Show this screen.
--version Show version.
"""
def cli():
args = docopt(USAGE)
cur_uid = os.getuid()
### Debug handler
debug_enabled = args["--debug"]
# consolejs is a dynamic console logger for Python
if debug_enabled:
cjs = consolejs.create(tsu)
cjs.level = consolejs.DEBUG
else:
cjs= consolejs.disabled()
console = cjs.console
console.dir(args)
### End Debug handler
### Setup Shell and Enviroment
env_new = EnvMap(
prepend=(args.get("-p")), clean=(args.get("-e")), usern=(args.get("USER"))
)
env_new.c_uid = cur_uid
env_new.shell = args.get("-s")
env = env_new.get_env()
shell = env_new.get_shell()
# Check `su` binaries:
su_paths = [magisk, losu, chsu]
for su_path in su_paths:
su_bin = SuExec(su_path)
result = su_bin.vercmp()
if result == SuExec.FOUND:
su_bin.call_su( args.get("USER"), shell, env)
break
elif result == SuExec.UNSUPP:
print("Unsupported `su` found. ")
print("Pleae open an issue to add support")
break
elif result == SuExec.ABANDONED:
print(consts.CHSU_WARN)
break
# At this point. there is no `su` binary
print("No `su` binary not found.")
print("Are you rooted? ")
## Handler for direct call
if __name__ == "__main__":
cli()
|
chaveza6870/tsu | tsu/defs.py | <reponame>chaveza6870/tsu<gh_stars>1-10
from .SuBinary import SuBinary
magisk = SuBinary(
name="MAGISK",
argmap={"init": "su", "shell": "-s"},
verstring=r"MAGISKSU",
veropt=["su", "--version"],
path="/sbin/magisk",
)
losu = SuBinary(
name="MAGISK",
argmap={"shell": "-s"},
verstring=r"cm-su",
veropt=["--version"],
path="/system/xbin/su",
)
chsu = SuBinary(
name="CHSU",
argmap={"shell": "-s"},
verstring=r"SUPERSU",
veropt=["--version"],
multipath=["/su/bin/su", ("/sbin/su"), ("/system/xbin/su")],
path="",
)
|
chaveza6870/tsu | tsu/user_utils.py | <reponame>chaveza6870/tsu
from functools import lru_cache
from pwd import getpwnam
from pathlib import PurePath, Path
import consolejs
import tsu
@lru_cache(maxsize=4)
def is_other_user( user_n, uid):
console = consolejs.get_console(tsu)
if user_n == 0 or user_n == "root" or (not user_n):
return False
target_uid = getpwnam(user_n).pw_uid
is_other = (target_uid != 0) and (target_uid != uid)
console.debug(r" {user_n=} {uid=} {target_uid=} {is_other=}")
return is_other
def hist_file(shell):
shellname = PurePath(shell).name
histfile = Path.home() / f"{shellname}_history_root"
return str(histfile)
|
chaveza6870/tsu | tsu/exec.py | <gh_stars>1-10
# Copyright (c) 2019, <NAME> <<EMAIL>
# This software is licensed under the MIT Liscense.
# https://github.com/cswl/tsu/blob/v3.x/LICENSE-MIT
import subprocess
import re
from pathlib import Path
from collections import deque
from . import consts
import consolejs
import tsu
class SuExec:
FOUND = 1
NONEXIST = -10
UNSUPP = -20
ABANDONED = -50
def __init__(self, su):
self.su = su
def argv_builder(self, su_path, shell, usern):
su = self.su
argv = deque([su.argmap["shell"], shell])
init = su.argmap.get("init", False)
if init:
argv.appendleft(init)
if usern:
argv.append(usern)
argv.appendleft(su_path)
to_list = list(argv)
return to_list
def call_su(self, usern, shell, env=None):
console = consolejs.get_console(tsu)
su = self.su
su_path = su.lpath()
argv = self.argv_builder(su_path, shell, usern)
console.debug("Calling {su_path=} with {usern=} {argv=} and with enviroment")
if env:
console.dir(env)
linux_execve(argv, env=env)
return True
def vercmp(self):
console = consolejs.get_console(tsu)
su = self.su
name = su.name
su_path = su.lpath()
checkver = [su_path] + su.veropt
if self.su.abandoned:
return False
try:
ver = subprocess.check_output(checkver).decode("utf-8")
console.debug(r" {name=} {ver=}")
found = SuExec.FOUND if su.verstring in ver else SuExec.UNSUPP
return found
except FileNotFoundError:
return SuExec.NONEXIST
except PermissionError:
return SuExec.NONEXIST
def linux_execve(args, env=None):
subprocess.run(args, env=env)
|
chaveza6870/tsu | tsu/SuBinary.py | from pathlib import Path
import attr
import typing
from . import consts
@attr.s(auto_attribs=True)
class SuBinary:
name: str
path: str
verstring: str
veropt: list
argmap: dict
multipath: typing.List[str] = None
abandoned : bool = None
def lpath(self):
if not self.multipath:
return Path(self.path)
|
chaveza6870/tsu | tsu/__init__.py | __version__ = "3.5.8"
import sys
import tsu.main
|
chaveza6870/tsu | setup_dist.py | <reponame>chaveza6870/tsu
import sys
from cx_Freeze import setup, Executable
with open('tsu/__init__.py', 'r') as f:
for line in f:
if line.startswith('__version__'):
version = line.strip().split('=')[1].strip(' \'"')
break
else:
version = '0.0.1'
options = {
'build_exe': {
'optimize' : 2,
'includes' : ['consolejs', 'docopt' ],
}
}
executables = [
Executable('tsu/main.py', targetName='tsu')
]
setup(name='tsuexec',
version=version,
description='Sample matplotlib script',
executables=executables,
options=options
)
|
chaveza6870/tsu | tsu/consts.py | <reponame>chaveza6870/tsu
# Copyright (c) 2019, <NAME> <<EMAIL>
# This software is licensed under the MIT Liscense.
# https://github.com/cswl/tsu/blob/v3.x/LICENSE-MIT
from pathlib import Path
from collections import OrderedDict
# Defaults in Termux and Android
TERMUX_FS = "/data/data/com.termux/files/"
TERMUX_PREFIX = f"{TERMUX_FS}/usr"
TERMUX_PATHS = f"{TERMUX_PREFIX}/bin:{TERMUX_PREFIX}/bin/applets"
ROOT_HOME = "/data/data/com.termux/files/root"
SYS_SHELL = "/system/bin/sh"
ANDROIDSYSTEM_PATHS = "/system/bin:/system/xbin"
## Location of su binaries.
ROOT_UID = 0
### Help texts
CHSU_WARN = "SuperSU is abandonware. Consider upgrading your SuperUser Application."
|
CyberDani/personal-roadmap | webPage/generator/unitTests/counter_test.py | import unittest
import sys
sys.path.append('..')
from modules import counter
class CounterTests(unittest.TestCase):
def test_initNonSense(self):
with self.assertRaises(Exception):
counter.SimpleCounter(-234)
with self.assertRaises(Exception):
counter.SimpleCounter()
with self.assertRaises(Exception):
counter.SimpleCounter(None)
with self.assertRaises(Exception):
counter.SimpleCounter(False)
with self.assertRaises(Exception):
counter.SimpleCounter("zero")
def test_getNextInt(self):
count = counter.SimpleCounter(4)
val = count.getNextInt()
self.assertEqual(val, 4)
self.assertEqual(count.getNextInt(), 5)
self.assertEqual(count.getNextInt(), 6)
self.assertEqual(count.getNextInt(), 7)
self.assertEqual(count.getNextInt(), 8)
self.assertEqual(count.getNextInt(), 9)
self.assertEqual(count.getNextInt(), 10)
val = count.getNextInt()
self.assertEqual(val, 11)
def test_getNextMessage(self):
count = counter.SimpleCounter(0)
with self.assertRaises(Exception):
count.getNextMessage(123)
strVal = count.getNextMessage("Hello")
self.assertEqual(strVal, "[0] Hello")
self.assertEqual(count.getNextMessage("Bye"), "[1] Bye")
self.assertEqual(count.getNextMessage("hey"), "[2] hey")
self.assertEqual(count.getNextMessage("just a sumple string"), "[3] just a sumple string")
def test_getNextIntAndMessage(self):
count = counter.SimpleCounter(55)
self.assertEqual(count.getNextMessage("random string here"), "[55] random string here")
self.assertEqual(count.getNextInt(), 56)
self.assertEqual(count.getNextInt(), 57)
self.assertEqual(count.getNextMessage("random string here"), "[58] random string here") |
CyberDani/personal-roadmap | webPage/generator/unitTests/checks_test.py | import unittest
import sys
sys.path.append('..')
from modules import checks
from modules import filerw
class ChecksTests(unittest.TestCase):
def test_checkIntIsBetween_raiseException(self):
with self.assertRaises(Exception):
checks.checkIntIsBetween(0, 1, 4)
with self.assertRaises(Exception):
checks.checkIntIsBetween(-20, 10, 55)
with self.assertRaises(Exception):
checks.checkIntIsBetween(20, -55, -15)
with self.assertRaises(Exception):
checks.checkIntIsBetween(-30, -15, -45)
with self.assertRaises(Exception):
checks.checkIntIsBetween(10, 2, 9)
with self.assertRaises(Exception):
checks.checkIntIsBetween(-120, 20, 90)
with self.assertRaises(Exception):
checks.checkIntIsBetween(100, 120, 90)
with self.assertRaises(Exception):
checks.checkIntIsBetween(100, 30, "hundred")
with self.assertRaises(Exception):
checks.checkIntIsBetween(100, [0], 1200)
with self.assertRaises(Exception):
checks.checkIntIsBetween("one", 0, 1200)
def test_checkIntIsBetween_notRaiseException(self):
try:
checks.checkIntIsBetween(0, 0, 1200)
checks.checkIntIsBetween(0, 0, 0)
checks.checkIntIsBetween(-2, -5, -1)
checks.checkIntIsBetween(20, 5, 103)
checks.checkIntIsBetween(20, 5, 20)
checks.checkIntIsBetween(5, 5, 20)
checks.checkIntIsBetween(15, 5, 20)
except Exception:
self.fail("checkIntIsBetween() raised Exception unexpectedly!")
def test_checkIfStringIsAlphaNumerical_invalid(self):
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("!")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical([])
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical(23)
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical(None)
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("ha-ha-ha")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("[something]")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("2+4")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("4's street")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("hey!")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("my text")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("nickname_12")
with self.assertRaises(Exception):
checks.checkIfStringIsAlphaNumerical("professional?")
def test_checkIfStringIsAlphaNumerical_valid(self):
try:
checks.checkIfStringIsAlphaNumerical("text")
checks.checkIfStringIsAlphaNumerical("simpleText")
checks.checkIfStringIsAlphaNumerical("2022")
checks.checkIfStringIsAlphaNumerical("2errors2fails")
checks.checkIfStringIsAlphaNumerical("good2go")
checks.checkIfStringIsAlphaNumerical("number1")
checks.checkIfStringIsAlphaNumerical("1dev4all100")
except Exception:
self.fail("checkIfStringIsAlphaNumerical() raised Exception unexpectedly!")
def test_checkIfFile_raiseException(self):
with self.assertRaises(Exception):
checks.checkIfFile(0)
with self.assertRaises(Exception):
checks.checkIfFile(None)
with self.assertRaises(Exception):
checks.checkIfFile("file.txt")
with self.assertRaises(Exception):
checks.checkIfFile([2, 3, 4])
with self.assertRaises(Exception):
checks.checkIfFile(True)
def test_checkIfFile_notRaiseException(self):
try:
fileWrite = open("unitTests/temp/test.txt", "w")
checks.checkIfFile(fileWrite)
fileWrite.close()
fileRead = open("unitTests/temp/test.txt", "r")
fileRead.close()
except Exception:
self.fail("checkIfFile() raised Exception unexpectedly!")
def test_checkIfList_raiseException(self):
with self.assertRaises(Exception):
checks.checkIfList(0)
with self.assertRaises(Exception):
checks.checkIfList(None)
with self.assertRaises(Exception):
checks.checkIfList(False)
with self.assertRaises(Exception):
checks.checkIfList("hey")
def test_checkIfList_notRaiseException(self):
try:
checks.checkIfList([0, 2, 4, 1, 0])
checks.checkIfList([0])
checks.checkIfList([])
checks.checkIfList(["hello", "world"])
checks.checkIfList([0, "world", False])
checks.checkIfList(["hey", None])
except Exception:
self.fail("checkIfList() raised Exception unexpectedly!")
def test_checkIfPureListOfStrings_raiseException(self):
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings(0)
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings(None)
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings(False)
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings("hey")
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings(["hello", "my", "world",12])
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings([True, "hello", "my", "world"])
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings(["hello", "my", ["one", "two"], "world"])
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings([True])
with self.assertRaises(Exception):
checks.checkIfPureListOfStrings([0, 1, 2, 3, 4, 5, 6])
def test_checkIfPureListOfStrings_notRaiseException(self):
try:
checks.checkIfPureListOfStrings([])
checks.checkIfPureListOfStrings([""])
checks.checkIfPureListOfStrings(["\t"])
checks.checkIfPureListOfStrings(["X"])
checks.checkIfPureListOfStrings(["\tHELLO\n"])
checks.checkIfPureListOfStrings(["one"])
checks.checkIfPureListOfStrings(["one", "two"])
checks.checkIfPureListOfStrings(["one", "two", "three"])
checks.checkIfPureListOfStrings(["one", "\t", "two", "three", "four", "five", "six", "seven", "\n"])
except Exception:
self.fail("checkIfPureListOfStrings() raised Exception unexpectedly!")
def test_checkIfString_raiseException(self):
with self.assertRaises(Exception):
checks.checkIfString(123,3, 10)
with self.assertRaises(Exception):
checks.checkIfString("hello", "empty", 10)
with self.assertRaises(Exception):
checks.checkIfString("hey", 1, None)
with self.assertRaises(Exception):
checks.checkIfString("hey", -3, 10)
with self.assertRaises(Exception):
checks.checkIfString("", -3, 10)
with self.assertRaises(Exception):
checks.checkIfString("hey", 20, 2)
with self.assertRaises(Exception):
checks.checkIfString("hey", -2, -1)
with self.assertRaises(Exception):
checks.checkIfString("hey", 5, 1500)
with self.assertRaises(Exception):
checks.checkIfString("", 1, 21)
with self.assertRaises(Exception):
checks.checkIfString("this string is intended to represent a longer one", 5, 15)
def test_checkIfString_notRaiseException(self):
try:
checks.checkIfString("hey", 3, 10)
checks.checkIfString("hey", 0, 3)
checks.checkIfString("", 0, 23)
checks.checkIfString("hello", 0, 12)
checks.checkIfString("hello", 3, 20)
except Exception:
self.fail("checkIfString() raised Exception unexpectedly!")
def test_checkIfAllNoneOrString_raiseException(self):
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString("not a list", 3, 10)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString([], 0, 10)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["hello", "hey", "hi"], 3, 10)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["", "hello"], 0, 2)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["heyho", "hello"], 0, 4)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["heyho", "hello"], 10, 22)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["hello"], 6, 6)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["hello", "bye", None], 0, 16)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString(["hello", None, "bye"], 0, 16)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString([None, "hello", "bye"], 0, 16)
with self.assertRaises(Exception):
checks.checkIfAllNoneOrString([None, "im a lonely string :(", None], 0, 16)
def test_checkIfAllNoneOrString_notRaiseException(self):
try:
checks.checkIfAllNoneOrString([None], 3, 10)
checks.checkIfAllNoneOrString([None, None], 0, 10)
checks.checkIfAllNoneOrString([None, None, None], 10, 100)
checks.checkIfAllNoneOrString([""], 0, 0)
checks.checkIfAllNoneOrString([""], 0, 10)
checks.checkIfAllNoneOrString(["hello"], 0, 10)
checks.checkIfAllNoneOrString(["hello", ""], 0, 10)
checks.checkIfAllNoneOrString(["hello", "hey"], 3, 5)
checks.checkIfAllNoneOrString(["hello", "hey", "hi", "k", ""], 0, 15)
except Exception:
self.fail("checkIfString() raised Exception unexpectedly!")
def test_checkIfValidJsonFile_nonSense(self):
with self.assertRaises(Exception):
checks.checkIfValidJsonFile("test.json")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(None)
with self.assertRaises(Exception):
checks.checkIfValidJsonFile("{}")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(False)
def test_checkIfValidJsonFile_invalid(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [""])
file = open("./unitTests/temp/test.txt", "r")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(file)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["hello internet"])
file = open("./unitTests/temp/test.txt", "r")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(file)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "true", "}"])
file = open("./unitTests/temp/test.txt", "r")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(file)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["1", "2", "3", "4"])
file = open("./unitTests/temp/test.txt", "r")
with self.assertRaises(Exception):
checks.checkIfValidJsonFile(file)
def test_checkIfValidJsonFile_valid(self):
try:
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "}"])
file = open("./unitTests/temp/test.txt", "r")
checks.checkIfValidJsonFile(file)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"numbers\": ", "[1,2,3,4,5]", "}"])
file = open("./unitTests/temp/test.txt", "r")
checks.checkIfValidJsonFile(file)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"boolValue\": ", "true", "}"])
file = open("./unitTests/temp/test.txt", "r")
checks.checkIfValidJsonFile(file)
except Exception:
self.fail("checkIfValidJsonFile() raised Exception unexpectedly!")
def test_checkIfValidJsonFileByFilePath_nonSense(self):
file = open("./unitTests/temp/test.txt", "r")
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath(file)
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/notExistingFile.extension")
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath(None)
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath(12)
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath(False)
def test_checkIfValidJsonFileByFilePath_invalid(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [""])
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["hello internet"])
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "true", "}"])
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["1", "2", "3", "4"])
with self.assertRaises(Exception):
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
def test_checkIfValidJsonFileByFilePath_valid(self):
try:
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "}"])
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"numbers\": ", "[1,2,3,4,5]", "}"])
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"boolValue\": ", "true", "}"])
checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
except Exception:
self.fail("checkIfValidJsonFileByFilePath() raised Exception unexpectedly!")
def test_checkIfValidJsonFileByFilePath_returnedJson(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"boolValue\": ", "true", "}"])
jsonVals = checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
self.assertEqual(jsonVals["boolValue"], True)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"intValue\": ", "23", "}"])
jsonVals = checks.checkIfValidJsonFileByFilePath("./unitTests/temp/test.txt")
self.assertEqual(jsonVals["intValue"], 23)
def test_checkIfValidJsonFile_returnedJson(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"boolValue\": ", "true", "}"])
file = open("./unitTests/temp/test.txt", "r")
jsonVals = checks.checkIfValidJsonFile(file)
self.assertEqual(jsonVals["boolValue"], True)
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["{", "\"intValue\": ", "23", "}"])
file = open("./unitTests/temp/test.txt", "r")
jsonVals = checks.checkIfValidJsonFile(file)
self.assertEqual(jsonVals["intValue"], 23)
|
CyberDani/personal-roadmap | webPage/generator/modules/htmlBody.py | from modules import checks
from modules import htmlBuilder
class HtmlBody:
def __init__(self, htmlFile, indentDepth):
checks.checkIntIsBetween(indentDepth, 1, 30)
checks.checkIfFile(htmlFile)
self.htmlFile = htmlFile
self.indentDepth = indentDepth
self.openedHtmlTags = []
def includeFileThenAppendNewLine(self, filePath):
htmlBuilder.includeFileThenAppendNewLine(self.htmlFile, filePath, self.indentDepth)
return self
def openHtmlTagThenAppendNewLine(self, htmlTag, options = ""):
tabs = htmlBuilder.getEscapedTabs(self.indentDepth)
openedHtmlTag = htmlBuilder.getOpenedHtmlTag(htmlTag, options)
self.htmlFile.write(tabs + openedHtmlTag + "\n")
self.openedHtmlTags.append(htmlTag)
self.indentDepth += 1
return self
def closeLastOpenedHtmlTag(self):
if len(self.openedHtmlTags) == 0:
raise Exception("There is not any opened html tag remained to closed")
self.indentDepth -= 1
tabs = htmlBuilder.getEscapedTabs(self.indentDepth)
lastTag = self.openedHtmlTags[-1]
closedHtmlTag = tabs + htmlBuilder.getClosedHtmlTag(lastTag)
self.htmlFile.write(closedHtmlTag + "\n")
del self.openedHtmlTags[-1]
return self
def addHtmlNewLineThenAppendNewLine(self, nrOfNewLines = 1):
htmlBuilder.addHtmlNewLineToFile(self.htmlFile, self.indentDepth, nrOfNewLines)
return self
def addJsScriptSrcThenAppendNewLine(self, url, integrity=None, crossorigin=None, referrerpolicy=None):
htmlBuilder.addJsScriptSrcToHtmlOutputFile(self.htmlFile, self.indentDepth, url,
integrity, crossorigin, referrerpolicy)
return self
def includeFileAsInlineJs(self, filePath):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(self.htmlFile, filePath,
"script", "", self.indentDepth)
return self
|
CyberDani/personal-roadmap | webPage/generator/unitTests/htmlBuilder_test.py | <gh_stars>0
import os
import sys
import unittest
sys.path.append('..')
from defTypes import buildType
from defTypes import dbBranchType
from defTypes import buildSettings
from modules import htmlBuilder
from modules import filerw
from modules import counter
def emptyHtmlHeadContent(settings):
a = 2
def emptyHtmlBodyContent(settings):
a = 2
def minimalistHtmlHeadContent(settings):
htmlTabs = htmlBuilder.getEscapedTabs(settings.indentDepth)
filerw.writeLinesToFileThenAppendNewLine(settings.htmlOutputFile, [htmlTabs + "<title>Hey!</title>"])
def minimalistHtmlBodyContent(settings):
htmlTabs = htmlBuilder.getEscapedTabs(settings.indentDepth)
filerw.writeLinesToFileThenAppendNewLine(settings.htmlOutputFile, [htmlTabs + "<h1>Hello!</h1>"])
class HtmlBuilderTests(unittest.TestCase):
def setUp(self):
if not os.path.exists('./unitTests/temp'):
os.makedirs('./unitTests/temp')
def test_writeIndexHtmlToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
settings = buildSettings.BuildSettings(htmlOutputFile=file,
buildOption=buildType.BuildType.BUILD,
dbBranch=dbBranchType.DbBranchType.DEVEL,
stepsCounter=counter.SimpleCounter(1),
indentDepth=2)
with self.assertRaises(Exception):
htmlBuilder.buildIndexHtmlFile("hello", "hello", settings)
with self.assertRaises(Exception):
htmlBuilder.buildIndexHtmlFile(None, None, settings)
with self.assertRaises(Exception):
htmlBuilder.buildIndexHtmlFile(True, False, settings)
def test_writeIndexHtmlToFile_emptyHtml(self):
file = open("./unitTests/temp/test.txt", "w")
settings = buildSettings.BuildSettings(htmlOutputFile=file,
buildOption=buildType.BuildType.BUILD,
dbBranch=dbBranchType.DbBranchType.DEVEL,
stepsCounter=counter.SimpleCounter(1),
indentDepth=2)
htmlBuilder.buildIndexHtmlFile(emptyHtmlHeadContent, emptyHtmlBodyContent, settings)
file.close()
emptyHtmlLines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(emptyHtmlLines), 6)
self.assertEqual(emptyHtmlLines[0], "<html>")
self.assertEqual(emptyHtmlLines[1], "\t<head>")
self.assertEqual(emptyHtmlLines[2], "\t</head>")
self.assertEqual(emptyHtmlLines[3], "\t<body>")
self.assertEqual(emptyHtmlLines[4], "\t</body>")
self.assertEqual(emptyHtmlLines[5], "</html>")
def test_writeIndexHtmlToFile_minimalistHtml(self):
file = open("./unitTests/temp/test.txt", "w")
settings = buildSettings.BuildSettings(htmlOutputFile=file,
buildOption=buildType.BuildType.BUILD,
dbBranch=dbBranchType.DbBranchType.DEVEL,
stepsCounter=counter.SimpleCounter(1),
indentDepth=2)
htmlBuilder.buildIndexHtmlFile(minimalistHtmlHeadContent, minimalistHtmlBodyContent, settings)
file.close()
emptyHtmlLines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(emptyHtmlLines), 8)
self.assertEqual(emptyHtmlLines[0], "<html>")
self.assertEqual(emptyHtmlLines[1], "\t<head>")
self.assertEqual(emptyHtmlLines[2], "\t\t<title>Hey!</title>")
self.assertEqual(emptyHtmlLines[3], "\t</head>")
self.assertEqual(emptyHtmlLines[4], "\t<body>")
self.assertEqual(emptyHtmlLines[5], "\t\t<h1>Hello!</h1>")
self.assertEqual(emptyHtmlLines[6], "\t</body>")
self.assertEqual(emptyHtmlLines[7], "</html>")
def test_getEscapedTabs_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(0)
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(-1)
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(-10)
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(124)
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs('hello')
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(False)
with self.assertRaises(Exception):
htmlBuilder.getEscapedTabs(None)
def test_getEscapedTabs_examples(self):
self.assertEqual(htmlBuilder.getEscapedTabs(1), '\t')
self.assertEqual(htmlBuilder.getEscapedTabs(2), '\t\t')
self.assertEqual(htmlBuilder.getEscapedTabs(3), '\t\t\t')
self.assertEqual(htmlBuilder.getEscapedTabs(4), '\t\t\t\t')
self.assertEqual(htmlBuilder.getEscapedTabs(5), '\t\t\t\t\t')
self.assertEqual(htmlBuilder.getEscapedTabs(10), '\t\t\t\t\t\t\t\t\t\t')
def test_getHtmlNewLines_nonsense(self):
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = "two", nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 2, nrOfNewLines = "one")
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = -2, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 0, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 100, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = -1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 100)
with self.assertRaises(Exception):
htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 0)
def test_getHtmlNewLines_defaultParameter_nrOfNewLines_1(self):
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 1)
self.assertEqual(newLines, "\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 2)
self.assertEqual(newLines, "\t\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 3)
self.assertEqual(newLines, "\t\t\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 6)
self.assertEqual(newLines, "\t\t\t\t\t\t<br\\>")
def test_getHtmlNewLines_normalCases(self):
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 1)
self.assertEqual(newLines, "\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 2, nrOfNewLines = 1)
self.assertEqual(newLines, "\t\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 4, nrOfNewLines = 1)
self.assertEqual(newLines, "\t\t\t\t<br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 2)
self.assertEqual(newLines, "\t<br\\> <br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 3)
self.assertEqual(newLines, "\t<br\\> <br\\> <br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 1, nrOfNewLines = 6)
self.assertEqual(newLines, "\t<br\\> <br\\> <br\\> <br\\> <br\\> <br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 2, nrOfNewLines = 2)
self.assertEqual(newLines, "\t\t<br\\> <br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 2, nrOfNewLines = 4)
self.assertEqual(newLines, "\t\t<br\\> <br\\> <br\\> <br\\>")
newLines = htmlBuilder.getHtmlNewLines(indentDepth = 4, nrOfNewLines = 2)
self.assertEqual(newLines, "\t\t\t\t<br\\> <br\\>")
def test_addNewLineToHtmlOutputFile_nonsense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile("heyho", indentDepth = 2, nrOfNewLines = 2)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth ="two", nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 2, nrOfNewLines ="one")
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = -2, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 0, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 100, nrOfNewLines = 1)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 1, nrOfNewLines = -1)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 1, nrOfNewLines = 100)
with self.assertRaises(Exception):
htmlBuilder.addHtmlNewLineToFile(file, indentDepth = 1, nrOfNewLines = 0)
file.close()
def test_addNewLineToHtmlOutputFile_defaultParameter_nrOfNewLines_1(self):
for indent in range(1, 6):
newLines = htmlBuilder.getHtmlNewLines(indent)
file = open("./unitTests/temp/test.txt", "w")
htmlBuilder.addHtmlNewLineToFile(file, indent)
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], newLines + "\n")
def test_addFaviconToHtmlOutputFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, "favicon.png", -1)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, "/img/fav.ico", None)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, "./images/f.png", True)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, None, 2)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, 34, 2)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, False, 2)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile(file, "", 2)
with self.assertRaises(Exception):
htmlBuilder.addFaviconToHtmlOutputFile("./unitTests/temp/test.txt", "myFavicon.ico", 2)
def test_addFaviconToHtmlOutputFile_examples(self):
for indent in [3, 4, 5]:
for favicon in ["fav.png", "./media/img/icon.ico", "../../myFavIcon.jpg"]:
file = open("./unitTests/temp/test.txt", "w")
htmlBuilder.addFaviconToHtmlOutputFile(file, favicon, indent)
file.close()
line = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(line), 1)
self.assertEqual(line[0], htmlBuilder.getHtmlFavicon(favicon, indent))
def test_addTitleToHtmlOutputFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, "title", -1)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, "title", None)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, "title", True)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, None, 2)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, 34, 2)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, False, 2)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile(file, "", 2)
with self.assertRaises(Exception):
htmlBuilder.addTitleToHtmlOutputFile("./unitTests/temp/test.txt", "title", 2)
def test_addTitleToHtmlOutputFile_examples(self):
for indent in [2, 3, 4]:
for title in ["title", "my page", "Look At This 23!#"]:
file = open("./unitTests/temp/test.txt", "w")
htmlBuilder.addTitleToHtmlOutputFile(file, title, indent)
file.close()
line = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(line), 1)
self.assertEqual(line[0], htmlBuilder.getHtmlTitle(title, indent))
def test_addMetaScreenOptimizedForMobileToHtmlOutputFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBuilder.addMetaScreenOptimizedForMobileToHtmlOutputFile(file, -3)
with self.assertRaises(Exception):
htmlBuilder.addMetaScreenOptimizedForMobileToHtmlOutputFile(file, "2")
with self.assertRaises(Exception):
htmlBuilder.addMetaScreenOptimizedForMobileToHtmlOutputFile("./unitTests/temp/test.txt", 2)
def test_addMetaScreenOptimizedForMobileToHtmlOutputFile_examples(self):
for indent in range(1, 5):
file = open("./unitTests/temp/test.txt", "w")
htmlBuilder.addMetaScreenOptimizedForMobileToHtmlOutputFile(file, indent)
file.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], htmlBuilder.getMetaScreenOptimizedForMobile(indent))
def test_getCssLinkHref_nonsense(self):
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(indentDepth=-3, url="www.mysite.com/res.css",
integrity=None, crossorigin=None, referrerpolicy=None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(2, False, None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(2, "", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "hello", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "sha215-23", None, None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", None, "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", None, None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", None, "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "sha512-23", None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "sha512-23", "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "a", "x", "z")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "abc", "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "sha512-asdasdc-xcx", "abc", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", "sha512-asdasdc-xcx", "anonymous", "ab")
def test_getCssLinkHref_justUrl(self):
result = htmlBuilder.getCssLinkHref(1, "www.mysite.com/res.css", None, None, None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\t<link href=\"www.mysite.com/res.css\" rel=\"stylesheet\" />")
result = htmlBuilder.getCssLinkHref(2, "https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/css/materialize.min.css", None, None, None)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "\t\t<link href=\"https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/css/materialize.min.css\"")
self.assertEqual(result[1], "\t\t\trel=\"stylesheet\" />")
def test_getCssLinkHref_containsIntegrity(self):
result = htmlBuilder.getCssLinkHref(3, "https://www.randomsite.com/resource.css",
"asdsadbsdsadbi32gr3ur", "techguy", "refferrer")
self.assertEqual(len(result), 3)
self.assertEqual(result[0], "\t\t\t<link href=\"https://www.randomsite.com/resource.css\"")
self.assertEqual(result[1], "\t\t\t\tintegrity=\"asdsadbsdsadbi32gr3ur\"")
self.assertEqual(result[2], "\t\t\t\trel=\"stylesheet\" crossorigin=\"techguy\" referrerpolicy=\"refferrer\" />")
def test_addCssLinkHrefToHtmlOutputFile_nonsense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(htmlFile=file, indentDepth=-3, url="www.mysite.com/res.css",
integrity=None, crossorigin=None, referrerpolicy=None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile("file.html", 2, "https://site.com/random.css", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 2, False, None, None, None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 2, "", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "hello", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "sha215-23", None, None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", None, "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", None, None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", None, "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "sha512-23", None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "sha512-23", "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "a", "x", "z")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "abc", "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "sha512-asdasdc-xcx", "abc", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, 1, "www.mysite.com/res.css", "sha512-asdasdc-xcx", "anonymous", "ab")
file.close()
def test_addCssLinkHrefToHtmlOutputFile_normalCases(self):
self.cssLinkHrefTestHelper(1, "www.mysite.com/res.css", None, None, None)
self.cssLinkHrefTestHelper(5, "https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/css/materialize.min.css",
None, None, None)
self.cssLinkHrefTestHelper(3, "https://www.randomsite.com/resource.css", "asdsadbsdsadbi32gr3ur", "techguy", "refferrer")
def cssLinkHrefTestHelper(self, indentDepth, url, integrity, crossorigin, referrerpolicy):
file = open("./unitTests/temp/test.txt", "w")
lines = htmlBuilder.getCssLinkHref(indentDepth, url, integrity, crossorigin, referrerpolicy)
htmlBuilder.addCssLinkHrefToHtmlOutputFile(file, indentDepth, url, integrity, crossorigin, referrerpolicy)
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), len(lines))
for i in range(len(readLines)):
self.assertEqual(readLines[i], lines[i] + "\n")
def test_getHtmlFavicon_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon("favicon.ico", -1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon("fav.png", None)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon("images/favicon.ico", True)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon(None, 1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon(34, 2)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon(False, 3)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon("", 4)
with self.assertRaises(Exception):
htmlBuilder.getHtmlFavicon("X", 5)
def test_getHtmlFavicon_examples(self):
self.assertEqual(htmlBuilder.getHtmlFavicon("favicon.png", 1), "\t<link rel=\"icon\" href=\"favicon.png\">")
self.assertEqual(htmlBuilder.getHtmlFavicon("/images/fav.ico", 2),
"\t\t<link rel=\"icon\" href=\"/images/fav.ico\">")
self.assertEqual(htmlBuilder.getHtmlFavicon("../media/img/F.ico", 3),
"\t\t\t<link rel=\"icon\" href=\"../media/img/F.ico\">")
def test_getHtmlTitle_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle("title", -1)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle("title", None)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle("title", True)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle(None, 2)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle(34, 2)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle(False, 2)
with self.assertRaises(Exception):
htmlBuilder.getHtmlTitle("", 2)
def test_getHtmlTitle_examples(self):
self.assertEqual(htmlBuilder.getHtmlTitle("Title", 2), "\t\t<title>Title</title>")
self.assertEqual(htmlBuilder.getHtmlTitle("My pagE", 3), "\t\t\t<title>My pagE</title>")
self.assertEqual(htmlBuilder.getHtmlTitle("awesome title here", 4), "\t\t\t\t<title>awesome title here</title>")
def test_getJsScriptSrc_nonsense(self):
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(indentDepth=-3, url="www.mysite.com/res.js",
integrity=None, crossorigin=None, referrerpolicy=None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(2, False, None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(2, "", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "hello", None, None, None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "sha215-23", None, None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", None, "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", None, None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", None, "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "sha512-23", None, "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "sha512-23", "anonymous", None)
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "a", "x", "z")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "abc", "anonymous", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "sha512-asdasdc-xcx", "abc", "no-refferer")
with self.assertRaises(Exception):
htmlBuilder.getJsScriptSrc(1, "www.mysite.com/res.js", "sha512-asdasdc-xcx", "anonymous", "ab")
def test_getJsScriptSrc_justUrl(self):
result = htmlBuilder.getJsScriptSrc(1, "https://randomsite.com/myscript.js", None, None, None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\t<script src=\"https://randomsite.com/myscript.js\"></script>")
result = htmlBuilder.getJsScriptSrc(2, "https://code.jquery.com/jquery-3.6.0.min.js", None, None, None)
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\t\t<script src=\"https://code.jquery.com/jquery-3.6.0.min.js\"></script>")
def test_getJsScriptSrc_containsIntegrity(self):
result = htmlBuilder.getJsScriptSrc(3, "https://randomsite.com/mySuperScript.js",
"sha512-adasdbidbeiebewiwbf==", "theGeek", "no-refferrer")
self.assertEqual(len(result), 3)
self.assertEqual(result[0], "\t\t\t<script src=\"https://randomsite.com/mySuperScript.js\"")
self.assertEqual(result[1], "\t\t\t\tintegrity=\"sha512-adasdbidbeiebewiwbf==\"")
self.assertEqual(result[2], "\t\t\t\tcrossorigin=\"theGeek\" referrerpolicy=\"no-refferrer\"></script>")
def test_addJsScriptSrcToHtmlOutputFile_normalCases(self):
self.jsScriptSrcTestHelper(1, "www.myAwesomeSite.com/script.js", None, None, None)
self.jsScriptSrcTestHelper(5, "https://cdn.jsdelivr.net/npm/gasparesganga-jquery-loading-overlay@2.1.7/dist/loadingoverlay.min.js",
None, None, None)
self.jsScriptSrcTestHelper(3, "https://www.randomsite.com/resource.js", "asfldfohsdofsdflndjfbfd", "TechGuy", "refferrer")
def jsScriptSrcTestHelper(self, indentDepth, url, integrity, crossorigin, referrerpolicy):
file = open("./unitTests/temp/test.txt", "w")
lines = htmlBuilder.getJsScriptSrc(indentDepth, url, integrity, crossorigin, referrerpolicy)
htmlBuilder.addJsScriptSrcToHtmlOutputFile(file, indentDepth, url, integrity, crossorigin, referrerpolicy)
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), len(lines))
for i in range(len(readLines)):
self.assertEqual(readLines[i], lines[i] + "\n")
def test_getMetaScreenOptimizedForMobile_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile(-1)
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile(None)
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile(False)
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile("zero")
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile("")
with self.assertRaises(Exception):
htmlBuilder.getMetaScreenOptimizedForMobile([])
def test_getMetaScreenOptimizedForMobile_examples(self):
self.assertEqual(htmlBuilder.getMetaScreenOptimizedForMobile(1),
"\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>")
self.assertEqual(htmlBuilder.getMetaScreenOptimizedForMobile(2),
"\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>")
self.assertEqual(htmlBuilder.getMetaScreenOptimizedForMobile(3),
"\t\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>")
self.assertEqual(htmlBuilder.getMetaScreenOptimizedForMobile(4),
"\t\t\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>")
self.assertEqual(htmlBuilder.getMetaScreenOptimizedForMobile(5),
"\t\t\t\t\t<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>")
def test_includeFileSurroundedByHtmlTagToHtmlOutputFile_nonSense(self):
dest = open("./unitTests/temp/test.txt", "w")
src = open("./unitTests/temp/test2.txt", "w")
src.close()
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "div", "", -1)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, src, "div", "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, None, "div", "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(None, None, "div", "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(None, "./unitTests/temp/test2.txt", "div", "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "div", "", False)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "div", "", None)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(None, None, "div", "", None)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "", "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "div", None, 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "span", 12, 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", 22, "", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", None, "", 2)
def test_includeFileSurroundedByHtmlTagToHtmlOutputFile_emptyFile(self):
src = open("./unitTests/temp/test2.txt", "w")
src.close()
dest = open("./unitTests/temp/test.txt", "w")
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "a", "href='url.com'", 1)
dest.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "\t<a href='url.com'>")
self.assertEqual(lines[1], "\t</a>")
def test_includeFileSurroundedByHtmlTagToHtmlOutputFile_emptyFile_2(self):
src = open("./unitTests/temp/test2.txt", "w")
src.close()
dest = open("./unitTests/temp/test.txt", "w")
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", "div", "", 2)
dest.close()
lines = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "\t\t<div>")
self.assertEqual(lines[1], "\t\t</div>")
def test_includeFileSurroundedByHtmlTagToHtmlOutputFile_examples(self):
lines = self.helper_includeFileSurroundedByHtmlTagToHtmlOutputFile(1, ["be proud of yourself"],
"span", "class='myClass'")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "\t<span class='myClass'>")
self.assertEqual(lines[1], "\t\tbe proud of yourself")
self.assertEqual(lines[2], "\t</span>")
lines = self.helper_includeFileSurroundedByHtmlTagToHtmlOutputFile(2, ["<div>", "\thooray!", "</div>"],
"footer", "")
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], "\t\t<footer>")
self.assertEqual(lines[1], "\t\t\t<div>")
self.assertEqual(lines[2], "\t\t\t\thooray!")
self.assertEqual(lines[3], "\t\t\t</div>")
self.assertEqual(lines[4], "\t\t</footer>")
def test_includeFileSurroundedByHtmlTagToHtmlOutputFile_examples_2(self):
lines = self.helper_includeFileSurroundedByHtmlTagToHtmlOutputFile_2(3, ["be proud of yourself", "find a meaning"],
"span", "class='myClass'")
self.assertEqual(len(lines), 7)
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "\tline 2")
self.assertEqual(lines[2], "\t\t\tline 3")
self.assertEqual(lines[3], "\t\t\t<span class='myClass'>")
self.assertEqual(lines[4], "\t\t\t\tbe proud of yourself")
self.assertEqual(lines[5], "\t\t\t\tfind a meaning")
self.assertEqual(lines[6], "\t\t\t</span>")
lines = self.helper_includeFileSurroundedByHtmlTagToHtmlOutputFile_2(2, ["<div>", "\thooray!", "</div>"],
"footer", "")
self.assertEqual(len(lines), 8)
self.assertEqual(lines[0], "line 1")
self.assertEqual(lines[1], "\tline 2")
self.assertEqual(lines[2], "\t\t\tline 3")
self.assertEqual(lines[3], "\t\t<footer>")
self.assertEqual(lines[4], "\t\t\t<div>")
self.assertEqual(lines[5], "\t\t\t\thooray!")
self.assertEqual(lines[6], "\t\t\t</div>")
self.assertEqual(lines[7], "\t\t</footer>")
def helper_includeFileSurroundedByHtmlTagToHtmlOutputFile_2(self, indent, lines, htmlTag, htmlTagOption):
src = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(src, lines)
src.close()
dest = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(dest, ["line 1", "\tline 2", "\t\t\tline 3"])
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", htmlTag,
htmlTagOption, indent)
dest.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def helper_includeFileSurroundedByHtmlTagToHtmlOutputFile(self, indent, lines, htmlTag, htmlTagOption):
src = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(src, lines)
src.close()
dest = open("./unitTests/temp/test.txt", "w")
htmlBuilder.includeFileSurroundedByHtmlTagThenAppendNewLine(dest, "./unitTests/temp/test2.txt", htmlTag,
htmlTagOption, indent)
dest.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_includeFileToHtmlOutputFile_nonSense(self):
dest = open("./unitTests/temp/test.txt", "w")
src = open("./unitTests/temp/test2.txt", "w")
src.close()
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", -1)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(dest, None, 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(None, None, 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(None, "./unitTests/temp/test2.txt", 2)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", False)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", None)
with self.assertRaises(Exception):
htmlBuilder.includeFileThenAppendNewLine(None, None, None)
def test_includeFileToHtmlOutputFile_emptyFile(self):
src = open("./unitTests/temp/test2.txt", "w")
src.close()
dest = open("./unitTests/temp/test.txt", "w")
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", 1)
dest.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "\n")
def test_includeFileToHtmlOutputFile_normalCases(self):
lines = self.helper_includeFileToHtmlOutputFile(1, ["be proud of yourself"])
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], "\tbe proud of yourself\n")
self.assertEqual(lines[1], "\n")
lines = self.helper_includeFileToHtmlOutputFile(2, ["<div>", "\thooray!", "</div>"])
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], "\t\t<div>\n")
self.assertEqual(lines[1], "\t\t\thooray!\n")
self.assertEqual(lines[2], "\t\t</div>\n")
self.assertEqual(lines[3], "\n")
def test_includeFileToHtmlOutputFile_normalCases_2(self):
lines = self.helper_includeFileToHtmlOutputFile_2(1, ["be proud of yourself"])
self.assertEqual(len(lines), 5)
self.assertEqual(lines[0], "> first line\n")
self.assertEqual(lines[1], ">> second line\n")
self.assertEqual(lines[2], ">>> third line\n")
self.assertEqual(lines[3], "\tbe proud of yourself\n")
self.assertEqual(lines[4], "\n")
lines = self.helper_includeFileToHtmlOutputFile_2(2, ["<div>", "\thooray!", "</div>"])
self.assertEqual(len(lines), 7)
self.assertEqual(lines[0], "> first line\n")
self.assertEqual(lines[1], ">> second line\n")
self.assertEqual(lines[2], ">>> third line\n")
self.assertEqual(lines[3], "\t\t<div>\n")
self.assertEqual(lines[4], "\t\t\thooray!\n")
self.assertEqual(lines[5], "\t\t</div>\n")
self.assertEqual(lines[6], "\n")
def helper_includeFileToHtmlOutputFile_2(self, indent, lines):
src = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(src, lines)
src.close()
dest = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(dest, ["> first line", ">> second line", ">>> third line"])
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", indent)
dest.close()
return filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
def helper_includeFileToHtmlOutputFile(self, indent, lines):
src = open("./unitTests/temp/test2.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(src, lines)
src.close()
dest = open("./unitTests/temp/test.txt", "w")
htmlBuilder.includeFileThenAppendNewLine(dest, "./unitTests/temp/test2.txt", indent)
dest.close()
return filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
def test_getOpenedHtmlTag_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("<div")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("<div>")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("/div")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("div\nspan")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("ul selected")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("", "focused")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag(12)
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag(2, "option2")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag(None)
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag(None, "selected")
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("abc", None)
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("abc", 12)
with self.assertRaises(Exception):
htmlBuilder.getOpenedHtmlTag("abc", False)
def test_getOpenedHtmlTag_examples(self):
self.assertEqual(htmlBuilder.getOpenedHtmlTag("b"), "<b>")
self.assertEqual(htmlBuilder.getOpenedHtmlTag("div"), "<div>")
self.assertEqual(htmlBuilder.getOpenedHtmlTag("footer"), "<footer>")
self.assertEqual(htmlBuilder.getOpenedHtmlTag("ul", "selected"), "<ul selected>")
self.assertEqual(htmlBuilder.getOpenedHtmlTag("a", "href='webpage.com'"), "<a href='webpage.com'>")
self.assertEqual(htmlBuilder.getOpenedHtmlTag("a", "href='webpage.com' class='new-link'"),
"<a href='webpage.com' class='new-link'>")
def test_getClosedHtmlTag_nonSense(self):
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("")
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag(12)
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag(None)
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("<div")
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("<div>")
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("/div")
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("div\nspan")
with self.assertRaises(Exception):
htmlBuilder.getClosedHtmlTag("ul selected")
def test_getClosedHtmlTag_examples(self):
self.assertEqual(htmlBuilder.getClosedHtmlTag("b"), "</b>")
self.assertEqual(htmlBuilder.getClosedHtmlTag("style"), "</style>")
self.assertEqual(htmlBuilder.getClosedHtmlTag("script"), "</script>")
self.assertEqual(htmlBuilder.getClosedHtmlTag("navbar"), "</navbar>")
|
CyberDani/personal-roadmap | webPage/generator/unitTests/argumentParser_test.py | import unittest
import sys
sys.path.append('..')
from defTypes import dbBranchType
from defTypes import buildType
from modules import argumentParser
from modules import checks
from modules import db
class ArgumentParserTests(unittest.TestCase):
def test_nonSense(self):
with self.assertRaises(Exception):
argumentParser.parseArguments(["hello", False])
with self.assertRaises(Exception):
argumentParser.parseArguments([False, False])
with self.assertRaises(Exception):
argumentParser.parseArguments(["hello", "arg2", None, "arg4"])
with self.assertRaises(Exception):
argumentParser.parseArguments([12, "one", "two", "three"])
with self.assertRaises(Exception):
argumentParser.parseArguments("-b -a")
def test_noArgument(self):
args = []
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertTrue(invalidUsage)
self.assertFalse(runUnitTests)
self.assertFalse(backup)
self.assertEqual(buildOption, buildType.BuildType.DO_NOT_BUILD)
# not interested in actual value of dbBranch in this case
self.assertTrue(dbBranch == dbBranchType.DbBranchType.MASTER or dbBranch == dbBranchType.DbBranchType.DEVEL)
def test_unitTestSingleArgument(self):
args = ['-u']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertFalse(backup)
self.assertEqual(buildOption, buildType.BuildType.DO_NOT_BUILD)
self.assertEqual(dbBranch, db.getCurrentDbBranch())
def test_unitTest_dbMaster(self):
args = ['-u', 'db:master']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertFalse(backup)
self.assertEqual(buildOption, buildType.BuildType.DO_NOT_BUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.MASTER)
def test_unitTest_dbDevel(self):
args = ['-u', 'db:devel']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertFalse(backup)
self.assertEqual(buildOption, buildType.BuildType.DO_NOT_BUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.DEVEL)
def test_build(self):
args = ['-b']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.BUILD)
self.assertEqual(dbBranch, db.getCurrentDbBranch())
def test_build_dbMaster(self):
args = ['-b', 'db:master']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.BUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.MASTER)
def test_build_dbDevel(self):
args = ['-b', 'db:devel']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.BUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.DEVEL)
def test_rebuild(self):
args = ['-rb']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.REBUILD)
self.assertEqual(dbBranch, db.getCurrentDbBranch())
def test_rebuild_dbMaster(self):
args = ['-rb', 'db:master']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.REBUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.MASTER)
def test_rebuild_dbDevel(self):
args = ['-rb', 'db:devel']
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertFalse(invalidUsage)
self.assertTrue(runUnitTests)
self.assertTrue(backup)
self.assertEqual(buildOption, buildType.BuildType.REBUILD)
self.assertEqual(dbBranch, dbBranchType.DbBranchType.DEVEL)
def invalidArgumentCheck(self, args):
invalidUsage, runUnitTests, backup, buildOption, dbBranch = argumentParser.parseArguments(args)
self.assertTrue(invalidUsage)
self.assertFalse(runUnitTests)
self.assertFalse(backup)
self.assertEqual(buildOption, buildType.BuildType.DO_NOT_BUILD)
# not interested in actual value of dbBranch in this case
self.assertTrue(dbBranch == dbBranchType.DbBranchType.MASTER or dbBranch == dbBranchType.DbBranchType.DEVEL)
def test_invalidArguments(self):
self.invalidArgumentCheck(['-A'])
self.invalidArgumentCheck(['-B'])
self.invalidArgumentCheck(['-U'])
self.invalidArgumentCheck(['-U', 'db:master'])
self.invalidArgumentCheck(['b'])
self.invalidArgumentCheck(['a'])
self.invalidArgumentCheck(['a', 'db:devel'])
self.invalidArgumentCheck(['-a'])
self.invalidArgumentCheck(['u'])
self.invalidArgumentCheck(['au'])
self.invalidArgumentCheck(['ua'])
self.invalidArgumentCheck(['-ua'])
self.invalidArgumentCheck(['-au'])
self.invalidArgumentCheck(['-a', '-u'])
self.invalidArgumentCheck(['-b', '-u'])
self.invalidArgumentCheck(['-a', '-x'])
self.invalidArgumentCheck(['-b', '-x'])
self.invalidArgumentCheck(['-u', '-a'])
self.invalidArgumentCheck(['-u', '-b'])
self.invalidArgumentCheck(['-u', '-x'])
self.invalidArgumentCheck(['-u', 'file'])
self.invalidArgumentCheck(['-', 'file'])
self.invalidArgumentCheck(['-', '-idk'])
self.invalidArgumentCheck(['-a', '-idk'])
self.invalidArgumentCheck(['-b', '-idk'])
self.invalidArgumentCheck(['-u', '-idk'])
self.invalidArgumentCheck(['-u', 'db:nonExistingBranch'])
self.invalidArgumentCheck(['x'])
self.invalidArgumentCheck(['-x'])
self.invalidArgumentCheck(['-x', 'file.yaml'])
self.invalidArgumentCheck(['-x', '-y'])
self.invalidArgumentCheck(['-x', '-y', 'file.txt'])
self.invalidArgumentCheck(['text', '-y', '-file'])
self.invalidArgumentCheck(['-x', '-y', '-z', '-alpha', '-beta', '-gamma'])
self.invalidArgumentCheck(['db:master', 'db:devel'])
self.invalidArgumentCheck(['db:master', '-u'])
self.invalidArgumentCheck(['db:master'])
self.invalidArgumentCheck(['db:devel'])
def test_getScriptUsageLines_returnsPureListOfStrings(self):
lines = argumentParser.getScriptUsageLines()
checks.checkIfPureListOfStrings(lines)
self.assertTrue(len(lines) > 7)
|
CyberDani/personal-roadmap | webPage/generator/unitTests/cmd_test.py | <gh_stars>0
import unittest
import sys
sys.path.append('..')
from modules import cmd
from modules import stringUtil
class CmdTests(unittest.TestCase):
def test_getOutputFromCommandNonSense(self):
with self.assertRaises(Exception):
cmd.getOutputFromCommand("")
with self.assertRaises(Exception):
cmd.getOutputFromCommand(12)
with self.assertRaises(Exception):
cmd.getOutputFromCommand()
with self.assertRaises(Exception):
cmd.getOutputFromCommand(None)
with self.assertRaises(Exception):
cmd.getOutputFromCommand(True)
def test_getOutputFromCommand_simpleExamples(self):
ans = cmd.getOutputFromCommand("cd")
self.assertTrue(stringUtil.rTrimNewLines(ans).endswith("\webPage\generator"))
ans = cmd.getOutputFromCommand("echo hello")
self.assertEqual(stringUtil.rTrimNewLines(ans), "hello")
ans = cmd.getOutputFromCommand("git --version")
self.assertTrue(ans.startswith("git version ")) |
CyberDani/personal-roadmap | webPage/generator/modules/argumentParser.py | import sys
from defTypes import buildType
from defTypes import dbBranchType
from modules import checks
from modules import db
# args excludes the name of the script
def parseArguments(args):
checks.checkIfPureListOfStrings(args)
argsSize = len(args)
invalidUsage = True
runUnitTests = False
backup = False
buildOption = buildType.BuildType.DO_NOT_BUILD
dbBranch = db.getCurrentDbBranch()
if argsSize == 0 or argsSize > 2:
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
if argsSize == 2:
secondArg = args[1]
if secondArg != "db:master" and secondArg != "db:devel":
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
if secondArg == "db:master":
dbBranch = dbBranchType.DbBranchType.MASTER
elif secondArg == "db:devel":
dbBranch = dbBranchType.DbBranchType.DEVEL
firstArg = args[0]
if firstArg == "-u":
invalidUsage = False
runUnitTests = True
backup = False
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
if firstArg == "-b":
invalidUsage = False
runUnitTests = True
backup = True
buildOption = buildType.BuildType.BUILD
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
if firstArg == "-rb":
invalidUsage = False
runUnitTests = True
backup = True
buildOption = buildType.BuildType.REBUILD
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
return invalidUsage, runUnitTests, backup, buildOption, dbBranch
def getCommandLineArgs():
#skip the first argument which contains the name of the script
return sys.argv[1:]
def getScriptUsageLines():
lines = [" Usage: {0} [command] [db-branch] \n".format(sys.argv[0]),
"Commands:",
"\t -u \t Run unit tests, nothing else happens",
"\t -b \t If all unit tests pass, backup current files and regenerate necessary files",
"\t -rb \t If all unit tests pass, backup current files and rebuild all files",
"",
"DB branch:",
" [!] Note: if not given it uses db:master for git:master and db:devel otherwise",
"",
"\t db:master \t Use the master branch on dbhub.io",
"\t db:devel \t Use the devel branch on dbhub.io"]
return lines
|
CyberDani/personal-roadmap | webPage/generator/unitTests/filerw_test.py | import os
import sys
import unittest
sys.path.append('..')
from modules import filerw
from modules import htmlBuilder
class FileReadWriterTests(unittest.TestCase):
def test_fileExists_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.fileExists(file)
with self.assertRaises(Exception):
filerw.fileExists("")
with self.assertRaises(Exception):
filerw.fileExists()
with self.assertRaises(Exception):
filerw.fileExists(None)
with self.assertRaises(Exception):
filerw.fileExists(23)
with self.assertRaises(Exception):
filerw.fileExists(False)
def test_fileExists_example(self):
file = open("./unitTests/temp/testFile.txt", "w")
file.close()
self.assertTrue(filerw.fileExists("./unitTests/temp/testFile.txt"))
os.remove("./unitTests/temp/testFile.txt")
self.assertFalse(filerw.fileExists("./unitTests/temp/testFile.txt"))
def test_getLinesByFilePathWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePathWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesByFilePathWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLinesByFilePath_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesByFilePath_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
linesFromFile = filerw.getLinesByFilePath("./unitTests/temp/test.txt")
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_getLinesWithEndingNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLinesWithEndingNewLine_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY\n")
def test_getLinesWithEndingNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLinesWithEndingNewLine(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear\n")
self.assertEqual(linesFromFile[1], "this is the tester\n")
def test_getLines_1line(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_1line_1emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("HEY\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 1)
self.assertEqual(linesFromFile[0], "HEY")
def test_getLines_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
file.write("hello dear\n")
file.write("this is the tester\n")
file.close()
file = open("./unitTests/temp/test.txt", "r")
linesFromFile = filerw.getLines(file)
self.assertEqual(len(linesFromFile), 2)
self.assertEqual(linesFromFile[0], "hello dear")
self.assertEqual(linesFromFile[1], "this is the tester")
def test_writeLinesPrefixedToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFile(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFile_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [])
self.assertEqual(len(readLines), 0)
def test_writeLinesPrefixedToFile_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", [""])
self.assertEqual(len(readLines), 1)
# empty line
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("[-]", ["\n"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "")
def test_writeLinesPrefixedToFile_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFile("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("line: ", ["\n", ""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("text: ", ["", "\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_oneString(self):
readLines = self.helper_writeLinesPrefixedToFile("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "Greetings: hey")
def test_writeLinesPrefixedToFile_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
def test_writeLinesPrefixedToFile_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFile("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
def test_writeLinesPrefixedToFile_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(".", ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFile_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFile_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFile(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFile_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFile("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def helper_writeLinesPrefixedToFile(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFile(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeLinesPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeLinesPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "") # empty line
def test_writeLinesPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", [""])
self.assertEqual(len(readLines), 2)
# empty lines
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("== prefix ==", ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[-]", ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("-=-", ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("line: ", ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("text: ", ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("Greetings: ", ["hey"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "Greetings: hey")
self.assertEqual(readLines[1], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("[text] ", ["hey", "Joe"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "[text] hey")
self.assertEqual(readLines[1], "[text] Joe")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("", ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "hey")
self.assertEqual(readLines[1], "magnificent")
self.assertEqual(readLines[2], "Joe")
self.assertEqual(readLines[3], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(".", ["hey\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], ".hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("# ", ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "# hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "# Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine(">", ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 6)
self.assertEqual(readLines[0], ">hey")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], ">Joe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
def test_writeLinesPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeLinesPrefixedToFileThenAppendNewLine("\t\t", ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 7)
self.assertEqual(readLines[0], "\t\they")
self.assertEqual(readLines[1], "")
self.assertEqual(readLines[2], "\t\tJoe")
self.assertEqual(readLines[3], "")
self.assertEqual(readLines[4], "")
self.assertEqual(readLines[5], "")
self.assertEqual(readLines[6], "")
def helper_writeLinesPrefixedToFileThenAppendNewLine(self, prefix, lines):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesPrefixedToFileThenAppendNewLine(file, prefix, lines)
file.close()
return filerw.getLinesByFilePath("./unitTests/temp/test.txt")
def test_writeStringsPrefixedToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", "asd")
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, "prefix", None)
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, 1, ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, ["prefix"], ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine("./unitTests/temp/test.txt", "prefix", ["asd"])
with self.assertRaises(Exception):
filerw.writeStringsPrefixedToFileThenAppendNewLine(None, "prefix", ["asd"])
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyList(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, [])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, [""])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoEmptyStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoNewLines(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(5, ["\n", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_NewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["\n", ""])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_emptyStringAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["", "\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\n")
self.assertEqual(readLines[1], "\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(2, ["hey"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\t\they\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_threeStrings(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(1, ["hey", "magnificent", "Joe"])
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\they\tmagnificent\tJoe\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_oneStringEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(3, ["hey\n"])
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "\t\t\they\n")
self.assertEqual(readLines[1], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_twoStringsEndingWithNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n"])
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLine(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n"])
self.assertEqual(len(readLines), 4)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
def test_writeStringsPrefixedToFileThenAppendNewLine_stringsAndNewLineAndEmptyString(self):
readLines = self.helper_writeStringsIndentedToFileThenAppendNewLine(4, ["hey\n", "Joe\n", "\n", ""])
self.assertEqual(len(readLines), 5)
self.assertEqual(readLines[0], "\t\t\t\they\n")
self.assertEqual(readLines[1], "\t\t\t\tJoe\n")
self.assertEqual(readLines[2], "\n")
self.assertEqual(readLines[3], "\n")
self.assertEqual(readLines[4], "\n")
def helper_writeStringsIndentedToFileThenAppendNewLine(self, indent, lines):
file = open("./unitTests/temp/test.txt", "w")
tabs = htmlBuilder.getEscapedTabs(indent)
filerw.writeStringsPrefixedToFileThenAppendNewLine(file, tabs, lines)
file.close()
return filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
def test_writeLinesToFileThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFileThenAppendNewLine("text.txt", ["firstLine"])
def test_writeLinesToFileThenAppendNewLine_noLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileThenAppendNewLine_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileThenAppendNewLine_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileThenAppendNewLine_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileThenAppendNewLine_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_writeLinesToFileThenAppendNewLine_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFileThenAppendNewLine(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePathThenAppendNewLine(file, ["firstLine"])
def test_writeLinesToFileByFilePathThenAppendNewLine_Noline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_emptyLine_afterSomethingElse(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["first", "second", "third", "fourth"])
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1line(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me\n")
self.assertEqual(readLines[1], "\n")
def test_writeLinesToFileByFilePathThenAppendNewLine_2lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
def test_wwriteLinesToFileByFilePathThenAppendNewLine_3lines(self):
filerw.writeLinesToFileByFilePathThenAppendNewLine("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123\n")
def test_writeLinesToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, 1)
with self.assertRaises(Exception):
filerw.writeLinesToFile(file, None)
with self.assertRaises(Exception):
filerw.writeLinesToFile("text.txt", ["firstLine"])
def test_writeLinesToFile_Noline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_emptyLine(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, [""])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFile_1line(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFile_1lineEndingWithNewline(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me\n"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFile_2lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFile_3lines(self):
file = open("./unitTests/temp/test.txt", "w")
filerw.writeLinesToFile(file, ["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
file.close()
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_writeLinesToFileByFilePath_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", "asd")
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", 1)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", None)
with self.assertRaises(Exception):
filerw.writeLinesToFileByFilePath(file, ["firstLine"])
def test_writeLinesToFileByFilePath_noLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_noLine_afterSomeLines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["hey", "little", "man"])
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_emptyLine(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", [""])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 0)
def test_writeLinesToFileByFilePath_1line(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me")
def test_writeLinesToFileByFilePath_1lineEndingWithNewline(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt", ["this is me\n"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 1)
self.assertEqual(readLines[0], "this is me\n")
def test_writeLinesToFileByFilePath_2lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 2)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor")
def test_writeLinesToFileByFilePath_3lines(self):
filerw.writeLinesToFileByFilePath("./unitTests/temp/test.txt",
["this is me:", "\t<NAME>, VIP executor", "tel: 0875432123"])
readLines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(readLines), 3)
self.assertEqual(readLines[0], "this is me:\n")
self.assertEqual(readLines[1], "\t<NAME>, VIP executor\n")
self.assertEqual(readLines[2], "tel: 0875432123")
def test_rTrimNewLines_nonSense(self):
with self.assertRaises(Exception):
filerw.rTrimNewLines()
with self.assertRaises(Exception):
filerw.rTrimNewLines("hello")
with self.assertRaises(Exception):
filerw.rTrimNewLines(None)
with self.assertRaises(Exception):
filerw.rTrimNewLines("hey\n")
with self.assertRaises(Exception):
filerw.rTrimNewLines(False)
with self.assertRaises(Exception):
filerw.rTrimNewLines(["one", None, "three"])
def test_rTrimNewLines_emptyList(self):
result = filerw.rTrimNewLines([])
self.assertEqual(len(result), 0)
def test_rTrimNewLines_oneElement(self):
result = filerw.rTrimNewLines(["Hello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello!")
result = filerw.rTrimNewLines(["\n\tHello!"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["\n\tHello!\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "\n\tHello!")
result = filerw.rTrimNewLines(["Hello\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
result = filerw.rTrimNewLines(["Hello\n\n\n\n\n\n\n"])
self.assertEqual(len(result), 1)
self.assertEqual(result[0], "Hello")
def test_rTrimNewLines_twoElements(self):
result = filerw.rTrimNewLines(["Hello", "hey\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["hey\n", "Hello\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[1], "Hello")
self.assertEqual(result[0], "hey")
result = filerw.rTrimNewLines(["Hello", "hey"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
result = filerw.rTrimNewLines(["Hello", "\n\n"])
self.assertEqual(len(result), 2)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "")
def test_rTrimNewLines_threeElements(self):
result = filerw.rTrimNewLines(["Hello\n", "hey", "hi\n\n"])
self.assertEqual(len(result), 3)
self.assertEqual(result[0], "Hello")
self.assertEqual(result[1], "hey")
self.assertEqual(result[2], "hi")
|
CyberDani/personal-roadmap | webPage/generator/modules/htmlBuilder.py | from modules import checks
from modules import filerw
# <html><head> [headWriter] </head><body> [bodyWriter] </body></html>
def buildIndexHtmlFile(indexHtmlHeadWriterFunction, indexHtmlBodyWriterFunction, settings):
htmlFile = settings.htmlOutputFile
settings.indentDepth = 2
htmlFile.write("<html>\n")
htmlFile.write("\t<head>\n")
indexHtmlHeadWriterFunction(settings)
htmlFile.write("\t</head>\n")
htmlFile.write("\t<body>\n")
indexHtmlBodyWriterFunction(settings)
htmlFile.write("\t</body>\n")
htmlFile.write("</html>\n")
# file1 += file2
def includeFileThenAppendNewLine(htmlFile, includeFilePath, indentDepth):
lines = filerw.getLinesByFilePathWithEndingNewLine(includeFilePath)
tabs = getEscapedTabs(indentDepth)
filerw.writeStringsPrefixedToFileThenAppendNewLine(htmlFile, tabs, lines)
# file1 += <htmlTag> file2 </htmlTag>
def includeFileSurroundedByHtmlTagThenAppendNewLine(htmlFile, includeFilePath, htmlTag, htmlTagOption, indentDepth):
tabs = getEscapedTabs(indentDepth)
htmlFile.write(tabs + getOpenedHtmlTag(htmlTag, htmlTagOption) + "\n")
fileLines = filerw.getLinesByFilePath(includeFilePath)
filerw.writeLinesPrefixedToFile(htmlFile, tabs + "\t", fileLines)
htmlFile.write(tabs + getClosedHtmlTag(htmlTag) + "\n")
# <script src=".js" /> -> file
def addJsScriptSrcToHtmlOutputFile(htmlFile, indentDepth, url, integrity=None, crossorigin=None, referrerpolicy=None):
lines = getJsScriptSrc(indentDepth, url, integrity, crossorigin, referrerpolicy)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, lines)
# <link href=".css" /> -> file
def addCssLinkHrefToHtmlOutputFile(htmlFile, indentDepth, url, integrity=None, crossorigin=None, referrerpolicy=None):
lines = getCssLinkHref(indentDepth, url, integrity, crossorigin, referrerpolicy)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, lines)
# <br\> <br\> <br\> -> file
def addHtmlNewLineToFile(htmlFile, indentDepth, nrOfNewLines=1):
newLinesString = getHtmlNewLines(indentDepth, nrOfNewLines)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, [newLinesString])
# <title> Page title </title> -> file
def addTitleToHtmlOutputFile(htmlFile, titleString, indentDepth):
htmlTitle = getHtmlTitle(titleString, indentDepth)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, [htmlTitle])
# <link rel="icon" href="favicon.png"> -> file
def addFaviconToHtmlOutputFile(htmlFile, faviconPath, indentDepth):
htmlFavicon = getHtmlFavicon(faviconPath, indentDepth)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, [htmlFavicon])
# <meta name="viewport" content="width=device-width, initial-scale=1.0"/> -> file
def addMetaScreenOptimizedForMobileToHtmlOutputFile(htmlFile, indentDepth):
metaTag = getMetaScreenOptimizedForMobile(indentDepth)
filerw.writeLinesToFileThenAppendNewLine(htmlFile, [metaTag])
# <script src=".js" />
def getJsScriptSrc(indentDepth, url, integrity=None, crossorigin=None, referrerpolicy=None):
# "a.io/s.js" -> length 9
checks.checkIfString(url, 9, 500)
checks.checkIfAllNoneOrString([integrity, crossorigin, referrerpolicy], 5, 200)
tabs = getEscapedTabs(indentDepth)
result = [tabs + "<script src=\"" + url + "\""]
if integrity is None:
result[0] += "></script>"
return result
tabs += "\t"
# integrity deserves its own line because usually it is a long string
result.append(tabs + "integrity=\"" + integrity + "\"")
result.append(tabs + "crossorigin=\"" + crossorigin + "\" referrerpolicy=\"" + referrerpolicy + "\"></script>")
return result
# <link href=".css" />
def getCssLinkHref(indentDepth, url, integrity=None, crossorigin=None, referrerpolicy=None):
# "a.io/s.css" -> length 10
checks.checkIfString(url, 10, 500)
checks.checkIfAllNoneOrString([integrity, crossorigin, referrerpolicy], 5, 200)
tabs = getEscapedTabs(indentDepth)
result = [tabs + "<link href=\"" + url + "\""]
tabs += "\t"
if integrity is None:
if len(url) > 95:
result.append(tabs + "rel=\"stylesheet\" />")
else:
result[0] += " rel=\"stylesheet\" />"
return result
# integrity deserves its own line because usually it is a long string
result.append(tabs + "integrity=\"" + integrity + "\"")
result.append(tabs + "rel=\"stylesheet\" crossorigin=\"" + crossorigin
+ "\" referrerpolicy=\"" + referrerpolicy + "\" />")
return result
# <link rel="icon" href="favicon.png">
def getHtmlFavicon(faviconPath, indentDepth):
checks.checkIntIsBetween(indentDepth, 1, 150)
checks.checkIfString(faviconPath, 3, 300)
result = getEscapedTabs(indentDepth)
result += "<link rel=\"icon\" href=\"" + faviconPath + "\">"
return result
# <title> page title </title>
def getHtmlTitle(titleString, indentDepth):
checks.checkIntIsBetween(indentDepth, 1, 150)
checks.checkIfString(titleString, 2, 300)
result = getEscapedTabs(indentDepth)
result += "<title>" + titleString + "</title>"
return result
# <meta name="viewport" content="width=device-width, initial-scale=1.0"/>
def getMetaScreenOptimizedForMobile(indentDepth):
tabs = getEscapedTabs(indentDepth)
metaTag = tabs + "<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"/>"
return metaTag
# <br\> <br\> <br\>
def getHtmlNewLines(indentDepth, nrOfNewLines = 1):
checks.checkIntIsBetween(nrOfNewLines, 1, 50)
result = getEscapedTabs(indentDepth)
for i in range(nrOfNewLines):
result += "<br\\>"
if i != nrOfNewLines - 1:
result += " "
return result
# <htmlTag options>
def getOpenedHtmlTag(htmlTag, options = ""):
checks.checkIfString(htmlTag, 1, 100)
checks.checkIfString(options, 0, 500)
checks.checkIfStringIsAlphaNumerical(htmlTag)
result = "<" + htmlTag
if len(options) > 0:
result += " " + options
result += ">"
return result
def getClosedHtmlTag(htmlTag):
checks.checkIfString(htmlTag, 1, 100)
checks.checkIfStringIsAlphaNumerical(htmlTag)
return "</" + htmlTag + ">"
# \t\t\t
def getEscapedTabs(indentDepth):
checks.checkIntIsBetween(indentDepth, 1, 50)
ans = ""
for i in range(indentDepth):
ans += "\t"
return ans
|
CyberDani/personal-roadmap | webPage/generator/defTypes/buildType.py | from enum import Enum
class BuildType(Enum):
DO_NOT_BUILD = 0
BUILD = 1
REBUILD = 2 |
CyberDani/personal-roadmap | webPage/generator/defTypes/dbBranchType.py | from enum import Enum
class DbBranchType(Enum):
MASTER = 0
DEVEL = 1
|
CyberDani/personal-roadmap | webPage/generator/modules/counter.py | from modules import checks
class SimpleCounter:
def __init__(self, startCounter):
checks.checkIntIsBetween(startCounter, 0, 1000000)
self.counter = startCounter
def getNextInt(self):
counterCp = self.counter
self.counter += 1
return counterCp
def getNextMessage(self, string):
checks.checkIfString(string, 0, 500)
ans = "[{0}] {1}".format(self.counter, string)
self.counter += 1
return ans
|
CyberDani/personal-roadmap | webPage/generator/unitTests/webReq_test.py | <filename>webPage/generator/unitTests/webReq_test.py
import os
import sys
import unittest
sys.path.append('..')
from modules import webReq
from modules import filerw
class WebReqTests(unittest.TestCase):
def test_getstatusCodeEncodingAndHtmlFromUrl_nonSense(self):
with self.assertRaises(Exception):
webReq.getstatusCodeEncodingAndHtmlFromUrl("")
with self.assertRaises(Exception):
webReq.getstatusCodeEncodingAndHtmlFromUrl()
with self.assertRaises(Exception):
webReq.getstatusCodeEncodingAndHtmlFromUrl(None)
with self.assertRaises(Exception):
webReq.getstatusCodeEncodingAndHtmlFromUrl(False)
with self.assertRaises(Exception):
webReq.getstatusCodeEncodingAndHtmlFromUrl("123")
def test_getstatusCodeEncodingAndHtmlFromUrl_incorrectUrl(self):
with self.assertRaises(Exception):
statusCode, encoding, html = webReq.getstatusCodeEncodingAndHtmlFromUrl("http://www.asdsasdsadasdas.qweqwe.com")
def test_getstatusCodeEncodingAndHtmlFromUrl_correctUrl(self):
statusCode, encoding, html = webReq.getstatusCodeEncodingAndHtmlFromUrl("https://www.youtube.com")
self.assertEqual(statusCode, 200)
self.assertEqual(encoding, "utf-8")
self.assertTrue(len(html) > 300)
def test_getstatusCodeEncodingAndHtmlFromUrl_404(self):
statusCode, encoding, html = webReq.getstatusCodeEncodingAndHtmlFromUrl("https://www.google.com/asdfeogeroiyfgwieuapfbi")
self.assertEqual(statusCode, 404)
self.assertEqual(encoding.lower(), "utf-8")
self.assertTrue(len(html) > 300)
def test_downloadFromUrlToFile_nonSense(self):
file = open("./unitTests/temp/test.txt", "wb")
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200("io", "./unitTests/temp/logo.binary")
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200("https://cyberdani.github.io/Programming-puzzle-pieces/webPage/images/Logo_text.png", file)
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200("https://cyberdani.github.io/Programming-puzzle-pieces/webPage/images/Logo_text.png", "")
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200("https://cyberdani.github.io/Programming-puzzle-pieces/webPage/images/Logo_text.png", None)
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200(None, "./unitTests/temp/logo.binary")
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200(None, None)
self.assertFalse(filerw.fileExists("./unitTests/temp/logo.binary"))
def test_downloadFromUrlToFile_incorrectUrl(self):
if filerw.fileExists("./unitTests/temp/download.binary"):
os.remove("./unitTests/temp/download.binary")
with self.assertRaises(Exception):
statusCode, encoding, html = webReq.downloadFromUrlToFileIfStatusIs200("https://www.google.com/asdfeogeroiyfgwieuapfbi", "./unitTests/temp/download.binary")
self.assertFalse(filerw.fileExists("./unitTests/temp/download.binary"))
def test_downloadFromUrlToFile_404(self):
with self.assertRaises(Exception):
webReq.downloadFromUrlToFileIfStatusIs200("https://www.google.com/asdfeogeroiyfgwieuapfbi", "./unitTests/temp/download.binary")
def test_downloadFromUrlToFile_correctUrl200(self):
webReq.downloadFromUrlToFileIfStatusIs200("https://cyberdani.github.io/Programming-puzzle-pieces/webPage/images/Logo_text.png", "./unitTests/temp/download.png")
self.assertTrue(filerw.fileExists("./unitTests/temp/download.png"))
size1 = os.path.getsize("./unitTests/temp/download.png") / 1024
self.assertTrue(size1 > 15)
self.assertTrue(size1 < 150)
webReq.downloadFromUrlToFileIfStatusIs200("https://cyberdani.github.io/Programming-puzzle-pieces/webPage/images/Logo_text.png", "./unitTests/temp/download.png")
self.assertTrue(filerw.fileExists("./unitTests/temp/download.png"))
size2 = os.path.getsize("./unitTests/temp/download.png") / 1024
self.assertEqual(size1, size2)
os.remove("./unitTests/temp/download.png") |
CyberDani/personal-roadmap | webPage/generator/modules/cmd.py | import subprocess
from modules import checks
def getOutputFromCommand(command):
checks.checkIfString(command, 1, 800)
result = subprocess.run(command, stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')
return result
|
CyberDani/personal-roadmap | webPage/generator/unitTests/git_test.py | <gh_stars>0
import sys
import unittest
sys.path.append('..')
from modules import git
from modules import cmd
from modules import stringUtil
from modules import filerw
class GitUtilTests(unittest.TestCase):
def test_getRepoRootDirectory(self):
gitRoot = git.getRepoRootDirectory()
currentPath = stringUtil.rTrimNewLines(cmd.getOutputFromCommand("cd"))
gitRoot = gitRoot.replace("\\", "/")
currentPath = currentPath.replace("\\", "/")
self.assertTrue(currentPath.startswith(gitRoot))
self.assertTrue(filerw.fileExists(gitRoot + "/.git/HEAD"))
def test_getCurrentBranch(self):
currentBranch = git.getCurrentBranch()
gitBranchCmd = stringUtil.rTrimNewLines(cmd.getOutputFromCommand("git branch --show-current"))
self.assertEqual(currentBranch, gitBranchCmd)
|
CyberDani/personal-roadmap | webPage/generator/modules/db.py | from defTypes import dbBranchType
from modules import checks
from modules import git
# git:master -> MASTER, otherwise DEVEL
def getDbBranchByGitBranch(gitBranch):
checks.checkIfString(gitBranch, 1, 300)
if gitBranch == "master":
return dbBranchType.DbBranchType.MASTER
return dbBranchType.DbBranchType.DEVEL
# git:master -> MASTER, otherwise DEVEL
def getCurrentDbBranch():
gitBranch = git.getCurrentBranch()
return getDbBranchByGitBranch(gitBranch) |
CyberDani/personal-roadmap | webPage/generator/unitTests4unitTests/fail_x_group1.py | import unittest
class Fail1Group1Tests(unittest.TestCase):
def test_quickPass1(self):
self.assertTrue(True)
def test_quickPass2(self):
self.assertTrue(True)
def test_quickFail1(self):
self.fail(":(")
def test_quickPass3(self):
self.assertTrue(True)
|
CyberDani/personal-roadmap | webPage/generator/unitTests/webLibs_test.py | <filename>webPage/generator/unitTests/webLibs_test.py
import sys
import unittest
sys.path.append('..')
from modules import filerw
from modules import webLibs
class WebLibsTests(unittest.TestCase):
def test_addFontAwesome_v611_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addFontAwesome_v611(file, None)
with self.assertRaises(Exception):
webLibs.addFontAwesome_v611(file, -1)
with self.assertRaises(Exception):
webLibs.addFontAwesome_v611(file, 150)
with self.assertRaises(Exception):
webLibs.addFontAwesome_v611("index.html", 2)
with self.assertRaises(Exception):
webLibs.addFontAwesome_v611(None, 3)
def test_addFontAwesome_v611_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addFontAwesome_v611(file, 2)
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 6)
self.assertEqual(lines[0], "\t\t<link href=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/css/all.min.css\"\n")
self.assertEqual(lines[1], "\t\t\tintegrity=\"sha512-KfkfwYDsLkIlwQp6LFnl8zNdLGxu9YAA1QvwINks4PhcElQSvqcyVLLD9aMhXd13uQjoXtEKNosOWaZqXgel0g==\"\n")
self.assertEqual(lines[2], "\t\t\trel=\"stylesheet\" crossorigin=\"anonymous\" referrerpolicy=\"no-referrer\" />\n")
self.assertEqual(lines[3], "\t\t<script src=\"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/js/all.min.js\"\n")
self.assertEqual(lines[4], "\t\t\tintegrity=\"<KEY>n")
self.assertEqual(lines[5], "\t\t\tcrossorigin=\"anonymous\" referrerpolicy=\"no-referrer\"></script>\n")
def test_addJquery_v360_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addJquery_v360(file, None)
with self.assertRaises(Exception):
webLibs.addJquery_v360(file, -1)
with self.assertRaises(Exception):
webLibs.addJquery_v360(file, 150)
with self.assertRaises(Exception):
webLibs.addJquery_v360("index.html", 2)
with self.assertRaises(Exception):
webLibs.addJquery_v360(None, 3)
def test_addJquery_v360_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addJquery_v360(file, 3)
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "\t\t\t<script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery/3.6.0/jquery.min.js\"\n")
self.assertEqual(lines[1], "\t\t\t\tintegrity=\"sha512-894YE6QWD5I59HgZOGReFYm4dnWc1Qt5NtvYSaNcOP+u1T9qYdvdihz0PPSiiqn/+/3e7Jo4EaG7TubfWGUrMQ==\"\n")
self.assertEqual(lines[2], "\t\t\t\tcrossorigin=\"anonymous\" referrerpolicy=\"no-referrer\"></script>\n")
def test_addMaterialize_v110_alpha_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addMaterialize_v110_alpha(file, None)
with self.assertRaises(Exception):
webLibs.addMaterialize_v110_alpha(file, -1)
with self.assertRaises(Exception):
webLibs.addMaterialize_v110_alpha(file, 150)
with self.assertRaises(Exception):
webLibs.addMaterialize_v110_alpha("index.html", 2)
with self.assertRaises(Exception):
webLibs.addMaterialize_v110_alpha(None, 3)
def test_addMaterialize_v110_alpha_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addMaterialize_v110_alpha(file, 1)
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "\t<link href=\"https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/css/materialize.min.css\"\n")
self.assertEqual(lines[1], "\t\trel=\"stylesheet\" />\n")
self.assertEqual(lines[2], "\t<script src=\"https://cdn.jsdelivr.net/npm/@materializecss/materialize@1.1.0-alpha/dist/js/materialize.min.js\"></script>\n")
def test_addGoogleIcons_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addGoogleIcons(file, None)
with self.assertRaises(Exception):
webLibs.addGoogleIcons(file, -1)
with self.assertRaises(Exception):
webLibs.addGoogleIcons(file, 150)
with self.assertRaises(Exception):
webLibs.addGoogleIcons("index.html", 2)
with self.assertRaises(Exception):
webLibs.addGoogleIcons(None, 3)
def test_addGoogleIcons_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addGoogleIcons(file, 4)
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "\t\t\t\t<link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\" />\n")
def test_addJQueryLoadingOverlay_v217_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addJQueryLoadingOverlay_v217(file, None)
with self.assertRaises(Exception):
webLibs.addJQueryLoadingOverlay_v217(file, -1)
with self.assertRaises(Exception):
webLibs.addJQueryLoadingOverlay_v217(file, 150)
with self.assertRaises(Exception):
webLibs.addJQueryLoadingOverlay_v217("index.html", 2)
with self.assertRaises(Exception):
webLibs.addJQueryLoadingOverlay_v217(None, 3)
def test_addJQueryLoadingOverlay_v217_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addJQueryLoadingOverlay_v217(file, 5)
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0], "\t\t\t\t\t<script src=\"https://cdn.jsdelivr.net/npm/gasparesganga-jquery-loading-overlay@2.1.7/dist/loadingoverlay.min.js\"></script>\n")
def test_addGoogleFont_nonSense(self):
file = open("./unitTests/temp/test.txt", "w")
with self.assertRaises(Exception):
webLibs.addGoogleFont(file, None, "Dani Sans")
with self.assertRaises(Exception):
webLibs.addGoogleFont(file, -1, "Dani Sans")
with self.assertRaises(Exception):
webLibs.addGoogleFont(file, 150, "Dani Sans")
with self.assertRaises(Exception):
webLibs.addGoogleFont("index.html", 2, "Dani Sans")
with self.assertRaises(Exception):
webLibs.addGoogleFont(None, 3, "Dani Sans")
with self.assertRaises(Exception):
webLibs.addGoogleFont(file, 3, 24)
def test_addGoogleFont_normalCase(self):
file = open("./unitTests/temp/test.txt", "w")
webLibs.addGoogleFont(file, 6, "?family=Heyho+Joe:wght@500&display=something")
file.close()
lines = filerw.getLinesByFilePathWithEndingNewLine("./unitTests/temp/test.txt")
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "\t\t\t\t\t\t<link rel=\"preconnect\" href=\"https://fonts.googleapis.com\">\n")
self.assertEqual(lines[1], "\t\t\t\t\t\t<link rel=\"preconnect\" href=\"https://fonts.gstatic.com\" crossorigin>\n")
self.assertEqual(lines[2], "\t\t\t\t\t\t<link href=\"https://fonts.googleapis.com/css2?family=Heyho+Joe:wght@500&display=something\" rel=\"stylesheet\">\n")
|
CyberDani/personal-roadmap | webPage/generator/unitTests4unitTests/pass_x_group1.py | import unittest
class Pass1Group1Tests(unittest.TestCase):
def test_quickPass1(self):
self.assertTrue(True)
def test_quickPass2(self):
self.assertTrue(True)
def test_quickPass3(self):
self.assertTrue(True)
|
CyberDani/personal-roadmap | webPage/generator/unitTests/buildType_test.py | import unittest
import sys
sys.path.append('..')
from defTypes import buildType
class BuildTypeTests(unittest.TestCase):
def test_values(self):
buildType.BuildType.DO_NOT_BUILD
buildType.BuildType.BUILD
buildType.BuildType.REBUILD
def test_validateLength(self):
self.assertEqual(len(buildType.BuildType), 3) |
CyberDani/personal-roadmap | webPage/generator/modules/webReq.py | import requests
from modules import checks
def getstatusCodeEncodingAndHtmlFromUrl(url):
checks.checkIfString(url, 9, 500)
r = requests.get(url)
return r.status_code, r.encoding, r.text
def downloadFromUrlToFileIfStatusIs200(url, filePath):
checks.checkIfString(url, 9, 500)
checks.checkIfString(filePath, 3, 300)
r = requests.get(url, stream=True)
if r.status_code != 200:
raise Exception('Status Code {0} returned, download prevented!'.format(r.status_code))
with open(filePath, 'wb') as fd:
for chunk in r.iter_content(chunk_size=512000): # 500 kb chunks
fd.write(chunk) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.